This is an automated email from the ASF dual-hosted git repository.

driazati pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new d31a1fb0db [ci] Dis-allow any non-S3 URLs in CI (#13283)
d31a1fb0db is described below

commit d31a1fb0dbea484dec045c22ce2a756aa1071b38
Author: driazati <[email protected]>
AuthorDate: Fri Dec 2 17:39:15 2022 -0800

    [ci] Dis-allow any non-S3 URLs in CI (#13283)
    
    * [ci] Dis-allow any non-S3 URLs in CI
    
    This PR makes it so any URLs accessed in tests in CI must be hosted in
    S3. This improves reliability as we've seen even files on GitHub
    sometimes serve 503s even when everything else is working fine. This
    raises an error if any unallowed URL is detected and adds the remaining
    few.
---
 tests/python/frontend/darknet/test_forward.py | 200 ++++++++++++++------------
 tests/scripts/request_hook/request_hook.py    | 161 ++++++++++++++++++++-
 2 files changed, 263 insertions(+), 98 deletions(-)

diff --git a/tests/python/frontend/darknet/test_forward.py 
b/tests/python/frontend/darknet/test_forward.py
index 5e6af51f32..58695e1fd6 100644
--- a/tests/python/frontend/darknet/test_forward.py
+++ b/tests/python/frontend/darknet/test_forward.py
@@ -34,15 +34,29 @@ from tvm.relay.frontend.darknet import ACTIVATION
 from tvm import relay
 
 REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/";
-DARKNET_LIB = "libdarknet2.0.so"
-DARKNETLIB_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true"
-LIB = __darknetffi__.dlopen(download_testdata(DARKNETLIB_URL, DARKNET_LIB, 
module="darknet"))
 
-DARKNET_TEST_IMAGE_NAME = "dog.jpg"
-DARKNET_TEST_IMAGE_URL = REPO_URL + "data/" + DARKNET_TEST_IMAGE_NAME + 
"?raw=true"
-DARKNET_TEST_IMAGE_PATH = download_testdata(
-    DARKNET_TEST_IMAGE_URL, DARKNET_TEST_IMAGE_NAME, module="data"
-)
+# Lazily initialized
+DARKNET_TEST_IMAGE_PATH = None
+LIB = None
+
+
+def _lib():
+    global LIB
+    lib = "libdarknet2.0.so"
+    url = REPO_URL + "lib/" + lib + "?raw=true"
+    if LIB is None:
+        LIB = __darknetffi__.dlopen(download_testdata(url, lib, 
module="darknet"))
+
+    return LIB
+
+
+def _darknet_test_image_path():
+    global DARKNET_TEST_IMAGE_PATH
+    if DARKNET_TEST_IMAGE_PATH is None:
+        name = "dog.jpg"
+        url = REPO_URL + "data/" + name + "?raw=true"
+        DARKNET_TEST_IMAGE_PATH = download_testdata(url, name, module="data")
+    return DARKNET_TEST_IMAGE_PATH
 
 
 def astext(program, unify_free_vars=False):
@@ -96,7 +110,7 @@ def _get_tvm_output(net, data, build_dtype="float32", 
states=None):
 def _load_net(cfg_url, cfg_name, weights_url, weights_name):
     cfg_path = download_testdata(cfg_url, cfg_name, module="darknet")
     weights_path = download_testdata(weights_url, weights_name, 
module="darknet")
-    net = LIB.load_network(cfg_path.encode("utf-8"), 
weights_path.encode("utf-8"), 0)
+    net = _lib().load_network(cfg_path.encode("utf-8"), 
weights_path.encode("utf-8"), 0)
     return net
 
 
@@ -104,7 +118,7 @@ def verify_darknet_frontend(net, build_dtype="float32"):
     """Test network with given input image on both darknet and tvm"""
 
     def get_darknet_output(net, img):
-        LIB.network_predict_image(net, img)
+        _lib().network_predict_image(net, img)
         out = []
         for i in range(net.n):
             layer = net.layers[i]
@@ -147,8 +161,8 @@ def verify_darknet_frontend(net, build_dtype="float32"):
 
     dtype = "float32"
 
-    img = LIB.letterbox_image(
-        LIB.load_image_color(DARKNET_TEST_IMAGE_PATH.encode("utf-8"), 0, 0), 
net.w, net.h
+    img = _lib().letterbox_image(
+        _lib().load_image_color(_darknet_test_image_path().encode("utf-8"), 0, 
0), net.w, net.h
     )
     darknet_output = get_darknet_output(net, img)
     batch_size = 1
@@ -169,7 +183,7 @@ def _test_rnn_network(net, states):
     """Test network with given input data on both darknet and tvm"""
 
     def get_darknet_network_predict(net, data):
-        return LIB.network_predict(net, data)
+        return _lib().network_predict(net, data)
 
     ffi = FFI()
     np_arr = np.zeros([1, net.inputs], dtype="float32")
@@ -195,7 +209,7 @@ def test_forward_extraction():
     weights_url = "http://pjreddie.com/media/files/"; + weights_name + 
"?raw=true"
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_alexnet():
@@ -207,7 +221,7 @@ def test_forward_alexnet():
     weights_url = "http://pjreddie.com/media/files/"; + weights_name + 
"?raw=true"
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_resnet50():
@@ -219,7 +233,7 @@ def test_forward_resnet50():
     weights_url = "http://pjreddie.com/media/files/"; + weights_name + 
"?raw=true"
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_resnext50():
@@ -231,7 +245,7 @@ def test_forward_resnext50():
     weights_url = "http://pjreddie.com/media/files/"; + weights_name + 
"?raw=true"
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_yolov2():
@@ -244,7 +258,7 @@ def test_forward_yolov2():
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     build_dtype = {}
     verify_darknet_frontend(net, build_dtype)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_yolov3():
@@ -257,88 +271,88 @@ def test_forward_yolov3():
     net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
     build_dtype = {}
     verify_darknet_frontend(net, build_dtype)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_convolutional():
     """test convolutional layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 
0, 0, 0)
+    net = _lib().make_network(1)
+    layer = _lib().make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 
0, 0, 0, 0)
     net.layers[0] = layer
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_dense():
     """test fully connected layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_connected_layer(1, 75, 20, 1, 0, 0)
+    net = _lib().make_network(1)
+    layer = _lib().make_connected_layer(1, 75, 20, 1, 0, 0)
     net.layers[0] = layer
     net.w = net.h = 5
-    LIB.resize_network(net, 5, 5)
+    _lib().resize_network(net, 5, 5)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_dense_batchnorm():
     """test fully connected layer with batchnorm"""
-    net = LIB.make_network(1)
-    layer = LIB.make_connected_layer(1, 12, 2, 1, 1, 0)
+    net = _lib().make_network(1)
+    layer = _lib().make_connected_layer(1, 12, 2, 1, 1, 0)
     for i in range(5):
         layer.rolling_mean[i] = np.random.rand(1)
         layer.rolling_variance[i] = np.random.rand(1) + 0.5
         layer.scales[i] = np.random.rand(1)
     net.layers[0] = layer
     net.w = net.h = 2
-    LIB.resize_network(net, 2, 2)
+    _lib().resize_network(net, 2, 2)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_maxpooling():
     """test maxpooling layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_maxpool_layer(1, 224, 224, 3, 2, 2, 0)
+    net = _lib().make_network(1)
+    layer = _lib().make_maxpool_layer(1, 224, 224, 3, 2, 2, 0)
     net.layers[0] = layer
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_avgpooling():
     """test avgerage pooling layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_avgpool_layer(1, 224, 224, 3)
+    net = _lib().make_network(1)
+    layer = _lib().make_avgpool_layer(1, 224, 224, 3)
     net.layers[0] = layer
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_conv_batch_norm():
     """test batch normalization layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 1, 
0, 0, 0)
+    net = _lib().make_network(1)
+    layer = _lib().make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 
1, 0, 0, 0)
     for i in range(32):
         layer.rolling_mean[i] = np.random.rand(1)
         layer.rolling_variance[i] = np.random.rand(1) + 0.5
     net.layers[0] = layer
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_shortcut():
     """test shortcut layer"""
-    net = LIB.make_network(3)
-    layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 
0, 0, 0, 0)
-    layer_2 = LIB.make_convolutional_layer(1, 111, 111, 32, 32, 1, 1, 1, 0, 1, 
0, 0, 0, 0)
-    layer_3 = LIB.make_shortcut_layer(1, 0, 111, 111, 32, 111, 111, 32)
+    net = _lib().make_network(3)
+    layer_1 = _lib().make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 
1, 0, 0, 0, 0)
+    layer_2 = _lib().make_convolutional_layer(1, 111, 111, 32, 32, 1, 1, 1, 0, 
1, 0, 0, 0, 0)
+    layer_3 = _lib().make_shortcut_layer(1, 0, 111, 111, 32, 111, 111, 32)
     layer_3.activation = ACTIVATION.RELU
     layer_3.alpha = 1
     layer_3.beta = 1
@@ -346,118 +360,118 @@ def test_forward_shortcut():
     net.layers[1] = layer_2
     net.layers[2] = layer_3
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_reorg():
     """test reorg layer"""
-    net = LIB.make_network(2)
-    layer_1 = LIB.make_convolutional_layer(1, 222, 222, 3, 32, 1, 3, 2, 0, 1, 
0, 0, 0, 0)
-    layer_2 = LIB.make_reorg_layer(1, 110, 110, 32, 2, 0, 0, 0)
+    net = _lib().make_network(2)
+    layer_1 = _lib().make_convolutional_layer(1, 222, 222, 3, 32, 1, 3, 2, 0, 
1, 0, 0, 0, 0)
+    layer_2 = _lib().make_reorg_layer(1, 110, 110, 32, 2, 0, 0, 0)
     net.layers[0] = layer_1
     net.layers[1] = layer_2
     net.w = net.h = 222
-    LIB.resize_network(net, 222, 222)
+    _lib().resize_network(net, 222, 222)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_region():
     """test region layer"""
-    net = LIB.make_network(2)
-    layer_1 = LIB.make_convolutional_layer(1, 19, 19, 3, 425, 1, 1, 1, 0, 1, 
0, 0, 0, 0)
-    layer_2 = LIB.make_region_layer(1, 19, 19, 5, 80, 4)
+    net = _lib().make_network(2)
+    layer_1 = _lib().make_convolutional_layer(1, 19, 19, 3, 425, 1, 1, 1, 0, 
1, 0, 0, 0, 0)
+    layer_2 = _lib().make_region_layer(1, 19, 19, 5, 80, 4)
     layer_2.softmax = 1
     net.layers[0] = layer_1
     net.layers[1] = layer_2
     net.w = net.h = 19
-    LIB.resize_network(net, 19, 19)
+    _lib().resize_network(net, 19, 19)
     build_dtype = {}
     verify_darknet_frontend(net, build_dtype)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_yolo_op():
     """test yolo layer"""
-    net = LIB.make_network(2)
-    layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 14, 1, 3, 2, 0, 1, 
0, 0, 0, 0)
-    layer_2 = LIB.make_yolo_layer(1, 111, 111, 2, 9, __darknetffi__.NULL, 2)
+    net = _lib().make_network(2)
+    layer_1 = _lib().make_convolutional_layer(1, 224, 224, 3, 14, 1, 3, 2, 0, 
1, 0, 0, 0, 0)
+    layer_2 = _lib().make_yolo_layer(1, 111, 111, 2, 9, __darknetffi__.NULL, 2)
     net.layers[0] = layer_1
     net.layers[1] = layer_2
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     build_dtype = {}
     verify_darknet_frontend(net, build_dtype)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_upsample():
     """test upsample layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_upsample_layer(1, 19, 19, 3, 3)
+    net = _lib().make_network(1)
+    layer = _lib().make_upsample_layer(1, 19, 19, 3, 3)
     layer.scale = 1
     net.layers[0] = layer
     net.w = net.h = 19
-    LIB.resize_network(net, 19, 19)
+    _lib().resize_network(net, 19, 19)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_l2normalize():
     """test l2 normalization layer"""
-    net = LIB.make_network(1)
-    layer = LIB.make_l2norm_layer(1, 224 * 224 * 3)
+    net = _lib().make_network(1)
+    layer = _lib().make_l2norm_layer(1, 224 * 224 * 3)
     layer.c = layer.out_c = 3
     layer.h = layer.out_h = 224
     layer.w = layer.out_w = 224
     net.layers[0] = layer
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_elu():
     """test elu activation layer"""
-    net = LIB.make_network(1)
-    layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 
0, 0, 0, 0)
+    net = _lib().make_network(1)
+    layer_1 = _lib().make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 
1, 0, 0, 0, 0)
     layer_1.activation = ACTIVATION.ELU
     net.layers[0] = layer_1
     net.w = net.h = 224
-    LIB.resize_network(net, 224, 224)
+    _lib().resize_network(net, 224, 224)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_softmax():
     """test softmax layer"""
-    net = LIB.make_network(1)
-    layer_1 = LIB.make_softmax_layer(1, 75, 1)
+    net = _lib().make_network(1)
+    layer_1 = _lib().make_softmax_layer(1, 75, 1)
     layer_1.temperature = 1
     net.layers[0] = layer_1
     net.w = net.h = 5
-    LIB.resize_network(net, net.w, net.h)
+    _lib().resize_network(net, net.w, net.h)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_softmax_temperature():
     """test softmax layer"""
-    net = LIB.make_network(1)
-    layer_1 = LIB.make_softmax_layer(1, 75, 1)
+    net = _lib().make_network(1)
+    layer_1 = _lib().make_softmax_layer(1, 75, 1)
     layer_1.temperature = 0.8
     net.layers[0] = layer_1
     net.w = net.h = 5
-    LIB.resize_network(net, net.w, net.h)
+    _lib().resize_network(net, net.w, net.h)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_activation_logistic():
     """test logistic activation layer"""
-    net = LIB.make_network(1)
+    net = _lib().make_network(1)
     batch = 1
     h = 224
     width = 224
@@ -472,7 +486,7 @@ def test_forward_activation_logistic():
     binary = 0
     xnor = 0
     adam = 0
-    layer_1 = LIB.make_convolutional_layer(
+    layer_1 = _lib().make_convolutional_layer(
         batch,
         h,
         width,
@@ -491,14 +505,14 @@ def test_forward_activation_logistic():
     net.layers[0] = layer_1
     net.w = width
     net.h = h
-    LIB.resize_network(net, net.w, net.h)
+    _lib().resize_network(net, net.w, net.h)
     verify_darknet_frontend(net)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 def test_forward_rnn():
     """test RNN layer"""
-    net = LIB.make_network(1)
+    net = _lib().make_network(1)
     batch = 1
     inputs = 4
     outputs = 4
@@ -506,15 +520,17 @@ def test_forward_rnn():
     activation = ACTIVATION.RELU
     batch_normalize = 0
     adam = 0
-    layer_1 = LIB.make_rnn_layer(batch, inputs, outputs, steps, activation, 
batch_normalize, adam)
+    layer_1 = _lib().make_rnn_layer(
+        batch, inputs, outputs, steps, activation, batch_normalize, adam
+    )
     net.layers[0] = layer_1
     net.inputs = inputs
     net.outputs = outputs
     net.w = net.h = 0
-    LIB.resize_network(net, net.w, net.h)
+    _lib().resize_network(net, net.w, net.h)
     states = {"rnn0_state": np.zeros([1, net.inputs])}
     _test_rnn_network(net, states)
-    LIB.free_network(net)
+    _lib().free_network(net)
 
 
 if __name__ == "__main__":
diff --git a/tests/scripts/request_hook/request_hook.py 
b/tests/scripts/request_hook/request_hook.py
index dd1adf0ded..ce379b6b2c 100644
--- a/tests/scripts/request_hook/request_hook.py
+++ b/tests/scripts/request_hook/request_hook.py
@@ -20,6 +20,8 @@
 import urllib.request
 import logging
 
+from urllib.parse import quote
+
 LOGGER = None
 
 
@@ -30,22 +32,119 @@ URL_MAP = {
     "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel": 
f"{BASE}/bvlc_alexnet.caffemodel",
     "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel": 
f"{BASE}/bvlc_googlenet.caffemodel",
     
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz":
 f"{BASE}/tf-mobilenet_v1_1.0_224.tgz",
+    
"http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz":
 
f"{BASE}/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
+    
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz":
 f"{BASE}/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
     "http://images.cocodataset.org/zips/val2017.zip": 
f"{BASE}/cocodataset-val2017.zip",
+    "http://pjreddie.com/media/files/alexnet.weights?raw=true": 
f"{BASE}/media/files/alexnet.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/alexnet.weights?raw=true": 
f"{BASE}/media/files/alexnet.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/extraction.weights?raw=true": 
f"{BASE}/media/files/extraction.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/extraction.weights?raw=true": 
f"{BASE}/media/files/extraction.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/resnet50.weights?raw=true": 
f"{BASE}/media/files/resnet50.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/resnext50.weights?raw=true": 
f"{BASE}/media/files/resnext50.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/yolov2.weights?raw=true": 
f"{BASE}/media/files/yolov2.weights"
+    + quote("?raw=true"),
+    "http://pjreddie.com/media/files/yolov3.weights?raw=true": 
f"{BASE}/media/files/yolov3.weights"
+    + quote("?raw=true"),
+    "http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz": 
f"{BASE}/imikolov/rnnlm/simple-examples.tgz",
     "https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar": 
f"{BASE}/bcebos-paddle_resnet50.tar",
     "https://data.deepai.org/stanfordcars.zip": 
f"{BASE}/deepai-stanfordcars.zip",
+    
"https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth":
 f"{BASE}/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
+    
"https://github.com/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite?raw=true":
 
f"{BASE}/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite"
+    + quote("?raw=true"),
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/adreno_v0.01.log":
 f"{BASE}/tlc-pack/tophub/main/tophub/adreno_v0.01.log",
     "https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel": 
f"{BASE}/2022-10-05/MobileNet.mlmodel",
+    "https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel": 
f"{BASE}/coreml/models/Resnet50.mlmodel",
+    
"https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth":
 f"{BASE}/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
+    
"https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth": 
f"{BASE}/models/deeplabv3_resnet101_coco-586e9e4e.pth",
+    "https://download.pytorch.org/models/densenet121-a639ec97.pth": 
f"{BASE}/models/densenet121-a639ec97.pth",
+    
"https://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pth": 
f"{BASE}/models/efficientnet_b4_rwightman-7eb33cd5.pth",
+    "https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth": 
f"{BASE}/models/fcn_resnet101_coco-7ecb50ca.pth",
+    "https://download.pytorch.org/models/googlenet-1378be20.pth": 
f"{BASE}/models/googlenet-1378be20.pth",
+    "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth": 
f"{BASE}/models/inception_v3_google-0cc3c7bd.pth",
     
"https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth": 
f"{BASE}/2022-10-05/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
+    
"https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth": 
f"{BASE}/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
     "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth": 
f"{BASE}/2022-10-05/mobilenet_v2-b0353104.pth",
+    "https://download.pytorch.org/models/r3d_18-b3b3357e.pth": 
f"{BASE}/models/r3d_18-b3b3357e.pth",
     "https://download.pytorch.org/models/resnet18-f37072fd.pth": 
f"{BASE}/2022-10-05/resnet18-f37072fd.pth",
+    "https://download.pytorch.org/models/resnet50-0676ba61.pth": 
f"{BASE}/models/resnet50-0676ba61.pth",
+    "https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth": 
f"{BASE}/models/squeezenet1_0-b66bff10.pth",
+    "https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth": 
f"{BASE}/models/squeezenet1_1-b8a52dc0.pth",
+    
"https://download.pytorch.org/models/vgg16_features-amdegroot-88682ab5.pth": 
f"{BASE}/models/vgg16_features-amdegroot-88682ab5.pth",
     
"https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx":
 f"{BASE}/2022-10-05/super_resolution_0.2.onnx",
     
"https://gist.githubusercontent.com/zhreshold/4d0b62f3d01426887599d4f7ede23ee5/raw/596b27d23537e5a1b5751d2b0481ef172f58b539/imagenet1000_clsid_to_human.txt":
 f"{BASE}/2022-10-05/imagenet1000_clsid_to_human.txt",
+    
"https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png":
 
f"{BASE}/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png",
+    
"https://github.com/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite":
 
f"{BASE}/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite",
+    
"https://github.com/ARM-software/ML-zoo/raw/master/models/keyword_spotting/cnn_small/tflite_int8//cnn_s_quantized.tflite":
 
f"{BASE}/ARM-software/ML-zoo/raw/master/models/keyword_spotting/cnn_small/tflite_int8//cnn_s_quantized.tflite",
+    
"https://github.com/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb":
 f"{BASE}/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb",
+    "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true": 
f"{BASE}/dmlc/mxnet.js/blob/main/data/cat.png"
+    + quote("?raw=true"),
+    "https://github.com/dmlc/mxnet.js/raw/main/data/cat.png": 
f"{BASE}/dmlc/mxnet.js/raw/main/data/cat.png",
+    
"https://github.com/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg?raw=true": 
f"{BASE}/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg"
+    + quote("?raw=true"),
+    
"https://github.com/dmlc/web-data/blob/main/darknet/data/arial.ttf?raw=true": 
f"{BASE}/dmlc/web-data/blob/main/darknet/data/arial.ttf"
+    + quote("?raw=true"),
+    
"https://github.com/dmlc/web-data/blob/main/darknet/data/coco.names?raw=true": 
f"{BASE}/dmlc/web-data/blob/main/darknet/data/coco.names"
+    + quote("?raw=true"),
+    
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg?raw=true": 
f"{BASE}/dmlc/web-data/blob/main/darknet/data/dog.jpg"
+    + quote("?raw=true"),
     "https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg": 
f"{BASE}/dog.jpg",
+    
"https://github.com/dmlc/web-data/blob/main/darknet/data/person.jpg?raw=true": 
f"{BASE}/dmlc/web-data/blob/main/darknet/data/person.jpg"
+    + quote("?raw=true"),
+    
"https://github.com/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so?raw=true":
 f"{BASE}/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so"
+    + quote("?raw=true"),
     
"https://github.com/dmlc/web-data/blob/main/gluoncv/detection/street_small.jpg?raw=true":
 f"{BASE}/2022-10-05/small_street_raw.jpg",
+    "https://github.com/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg": 
f"{BASE}/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg",
+    "https://github.com/dmlc/web-data/raw/main/darknet/data/arial.ttf": 
f"{BASE}/dmlc/web-data/raw/main/darknet/data/arial.ttf",
+    "https://github.com/dmlc/web-data/raw/main/darknet/data/coco.names": 
f"{BASE}/dmlc/web-data/raw/main/darknet/data/coco.names",
+    "https://github.com/dmlc/web-data/raw/main/darknet/data/dog.jpg": 
f"{BASE}/dmlc/web-data/raw/main/darknet/data/dog.jpg",
+    "https://github.com/dmlc/web-data/raw/main/darknet/data/person.jpg": 
f"{BASE}/dmlc/web-data/raw/main/darknet/data/person.jpg",
+    "https://github.com/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so": 
f"{BASE}/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so",
     
"https://github.com/dmlc/web-data/raw/main/gluoncv/detection/street_small.jpg": 
f"{BASE}/2022-10-05/gluon-small-stree.jpg",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb":
 f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb":
 
f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg":
 
f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt":
 
f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt":
 
f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
+    
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb":
 
f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb",
+    
"https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg":
 
f"{BASE}/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg",
+    
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt":
 f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
+    
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt":
 f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
+    
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel":
 f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel",
+    
"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite":
 f"{BASE}/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite",
     
"https://github.com/JonathanCMitchell/mobilenet_v2_keras/releases/download/v1.1/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5":
 
f"{BASE}/2022-10-05/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
     
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model/mnist-1.onnx":
 f"{BASE}/onnx/mnist-1.onnx",
+    
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx":
 
f"{BASE}/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx",
+    
"https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx":
 
f"{BASE}/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx",
     
"https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet50-v2-7.onnx":
 f"{BASE}/2022-10-05/resnet50-v2-7.onnx",
+    
"https://github.com/pjreddie/darknet/blob/master/cfg/alexnet.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/alexnet.cfg"
+    + quote("?raw=true"),
+    
"https://github.com/pjreddie/darknet/blob/master/cfg/extraction.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/extraction.cfg"
+    + quote("?raw=true"),
+    
"https://github.com/pjreddie/darknet/blob/master/cfg/resnet50.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/resnet50.cfg"
+    + quote("?raw=true"),
+    
"https://github.com/pjreddie/darknet/blob/master/cfg/resnext50.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/resnext50.cfg"
+    + quote("?raw=true"),
+    "https://github.com/pjreddie/darknet/blob/master/cfg/yolov2.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov2.cfg"
+    + quote("?raw=true"),
     
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3-tiny.cfg?raw=true": 
f"{BASE}/2022-10-05/yolov3-tiny-raw.cfg",
+    "https://github.com/pjreddie/darknet/blob/master/cfg/yolov3.cfg?raw=true": 
f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov3.cfg"
+    + quote("?raw=true"),
+    
"https://github.com/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite?raw=true":
 
f"{BASE}/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite"
+    + quote("?raw=true"),
+    
"https://github.com/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel?raw=true":
 f"{BASE}/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel"
+    + quote("?raw=true"),
+    
"https://github.com/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt":
 f"{BASE}/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt",
+    
"https://github.com/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite":
 
f"{BASE}/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite",
+    
"https://github.com/tlc-pack/web-data/raw/25fe99fb00329a26bd37d3dca723da94316fd34c/testdata/microTVM/model/keyword_spotting_quant.tflite":
 
f"{BASE}/tlc-pack/web-data/raw/25fe99fb00329a26bd37d3dca723da94316fd34c/testdata/microTVM/model/keyword_spotting_quant.tflite",
+    
"https://github.com/tlc-pack/web-data/raw/967fc387dadb272c5a7f8c3461d34c060100dbf1/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy":
 
f"{BASE}/tlc-pack/web-data/raw/967fc387dadb272c5a7f8c3461d34c060100dbf1/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
+    
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy":
 
f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
+    
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite":
 
f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite",
     "https://github.com/uwsampl/web-data/raw/main/vta/models/synset.txt": 
f"{BASE}/2022-10-05/synset.txt",
     "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch": 
f"{BASE}/gcn_cora.torch",
     "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": 
f"{BASE}/vta_cat.jpg",
@@ -56,26 +155,76 @@ URL_MAP = {
     "https://pjreddie.com/media/files/yolov3.weights": 
f"{BASE}/yolov3.weights",
     
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_classes.txt":
 f"{BASE}/2022-10-05/imagenet_classes.txt",
     
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_synsets.txt":
 f"{BASE}/2022-10-05/imagenet_synsets.txt",
+    "https://raw.githubusercontent.com/dmlc/mxnet.js/main/data/cat.png": 
f"{BASE}/dmlc/mxnet.js/main/data/cat.png",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/cfg/yolov3.cfg": 
f"{BASE}/dmlc/web-data/main/darknet/cfg/yolov3.cfg",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/arial.ttf": 
f"{BASE}/dmlc/web-data/main/darknet/data/arial.ttf",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/coco.names": 
f"{BASE}/dmlc/web-data/main/darknet/data/coco.names",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/dog.jpg": 
f"{BASE}/dmlc/web-data/main/darknet/data/dog.jpg",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/person.jpg": 
f"{BASE}/dmlc/web-data/main/darknet/data/person.jpg",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/lib/libdarknet2.0.so":
 f"{BASE}/dmlc/web-data/main/darknet/lib/libdarknet2.0.so",
     
"https://raw.githubusercontent.com/dmlc/web-data/main/gluoncv/detection/street_small.jpg":
 f"{BASE}/2022-10-05/small_street.jpg",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg":
 f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite",
+    
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite":
 
f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite",
     
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg":
 f"{BASE}/2022-10-05/street_small.jpg",
     
"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt":
 f"{BASE}/2022-10-05/labels_mobilenet_quant_v1_224.txt",
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log":
 f"{BASE}/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log",
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/cuda_v0.10.log": 
f"{BASE}/tlc-pack/tophub/main/tophub/cuda_v0.10.log",
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/llvm_v0.04.log": 
f"{BASE}/tlc-pack/tophub/main/tophub/llvm_v0.04.log",
     
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/mali_v0.06.log": 
f"{BASE}/2022-10-05/mali_v0.06.log",
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/opencl_v0.04.log":
 f"{BASE}/tlc-pack/tophub/main/tophub/opencl_v0.04.log",
+    
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/vta_v0.10.log": 
f"{BASE}/tlc-pack/tophub/main/tophub/vta_v0.10.log",
+    
"https://raw.githubusercontent.com/tlc-pack/web-data/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy":
 
f"{BASE}/tlc-pack/web-data/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
+    
"https://raw.githubusercontent.com/tlc-pack/web-data/main/testdata/microTVM/model/keyword_spotting_quant.tflite":
 
f"{BASE}/tlc-pack/web-data/main/testdata/microTVM/model/keyword_spotting_quant.tflite",
     "https://s3.amazonaws.com/model-server/inputs/kitten.jpg": 
f"{BASE}/2022-10-05/kitten.jpg",
     "https://s3.amazonaws.com/onnx-model-zoo/synset.txt": 
f"{BASE}/2022-10-05/synset-s3.txt",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz":
 f"{BASE}/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz":
 f"{BASE}/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz":
 
f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz":
 
f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz":
 
f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz":
 
f"{BASE}/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz",
     
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz":
 f"{BASE}/2022-10-05/mobilenet_v2_1.0_224_quant.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip":
 
f"{BASE}/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite":
 f"{BASE}/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz":
 
f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz":
 
f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
+    
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz":
 
f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
+    
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite":
 f"{BASE}/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite",
+    
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite":
 f"{BASE}/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite",
+    
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz":
 f"{BASE}/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
+    
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz":
 f"{BASE}/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz",
+    
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5":
 
f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5",
+    
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5":
 f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5",
     
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5":
 f"{BASE}/2022-10-05/mobilenet_2_5_128_tf.h5",
-    
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5":
 f"{BASE}/2022-10-05/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
+    
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5":
 
f"{BASE}/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
+    
"https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5":
 
f"{BASE}/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5",
+    
"https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5":
 
f"{BASE}/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5",
+    "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz": 
f"{BASE}/tensorflow/tf-keras-datasets/mnist.npz",
 }
 
 
 class TvmRequestHook(urllib.request.Request):
     def __init__(self, url, *args, **kwargs):
         LOGGER.info(f"Caught access to {url}")
-        if url in URL_MAP:
-            new_url = URL_MAP[url]
-            LOGGER.info(f"Mapped URL {url} to {new_url}")
-        else:
-            new_url = url
+        url = url.strip()
+        if url not in URL_MAP and not url.startswith(BASE):
+            # Dis-allow any accesses that aren't going through S3
+            msg = (
+                f"Uncaught URL found in CI: {url}. "
+                "A committer must upload the relevant file to S3 via"
+                
"https://github.com/apache/tvm/actions/workflows/upload_ci_resource.yml";
+                "and add it to the mapping in 
tests/scripts/request_hook/request_hook.py"
+            )
+            raise RuntimeError(msg)
+
+        new_url = URL_MAP[url]
+        LOGGER.info(f"Mapped URL {url} to {new_url}")
         super().__init__(new_url, *args, **kwargs)
 
 


Reply via email to