This is an automated email from the ASF dual-hosted git repository.

tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new acda952b31 [Relax][PyTorch] Unify tests using shared 
tvm.testing.assert_allclose (#18522)
acda952b31 is described below

commit acda952b31dd358ad8b830d591e2bc98aba1ddd3
Author: Guan-Ming (Wesley) Chiu <[email protected]>
AuthorDate: Sat Nov 29 13:51:52 2025 +0800

    [Relax][PyTorch] Unify tests using shared tvm.testing.assert_allclose 
(#18522)
    
    ## Why
    
    We have the shared assert_allclose func in tests and to use it in every
    tests could help persist consistency
---
 python/tvm/contrib/msc/core/utils/info.py              |  5 +++--
 python/tvm/testing/utils.py                            |  4 ++--
 tests/python/codegen/test_target_codegen.py            |  2 +-
 tests/python/codegen/test_target_codegen_cuda_fp8.py   |  2 +-
 tests/python/codegen/test_target_codegen_metal.py      |  6 +++---
 .../test_hexagon/test_software_pipeline_async.py       |  2 +-
 tests/python/disco/test_ccl.py                         |  4 ++--
 tests/python/driver/test_compile.py                    | 10 +++++-----
 .../nightly/test_nnapi/test_from_exported_to_cuda.py   |  4 ++--
 .../test_runtime_builtin_kv_cache_transfer_kernel.py   | 14 +++++++-------
 tests/python/relax/test_base_py_module_printer.py      |  2 +-
 .../python/relax/test_base_py_module_symbolic_shape.py | 18 +++++++++---------
 tests/python/relax/test_dlpack_integration.py          |  8 ++++----
 .../relax/test_frontend_from_exported_program.py       |  4 ++--
 tests/python/relax/test_runtime_builtin.py             |  4 ++--
 tests/python/relax/test_vm_build.py                    |  2 +-
 tests/python/tir-base/test_tir_intrin.py               |  2 +-
 .../test_tir_schedule_fuse_reduction_epilogue.py       |  6 +++---
 web/tests/python/webgpu_rpc_test.py                    |  2 +-
 19 files changed, 51 insertions(+), 50 deletions(-)

diff --git a/python/tvm/contrib/msc/core/utils/info.py 
b/python/tvm/contrib/msc/core/utils/info.py
index 65ed51f80f..03eed9b7fd 100644
--- a/python/tvm/contrib/msc/core/utils/info.py
+++ b/python/tvm/contrib/msc/core/utils/info.py
@@ -21,6 +21,7 @@ from packaging.version import parse
 import numpy as np
 
 import tvm
+import tvm.testing
 from tvm.contrib.msc.core import _ffi_api
 from .namespace import MSCFramework
 
@@ -365,11 +366,11 @@ def compare_arrays(
             )
             continue
         if gol.dtype.name in ("int32", "int64"):
-            passed = np.abs(gol - data), max() == 0
+            passed = np.abs(gol - data).max() == 0
             _add_report(name, gol, data, passed)
             continue
         try:
-            np.testing.assert_allclose(gol, data, rtol=rtol, atol=atol, 
verbose=False)
+            tvm.testing.assert_allclose(gol, data, rtol=rtol, atol=atol, 
verbose=False)
             _add_report(name, gol, data, True)
         except:  # pylint: disable=bare-except
             _add_report(name, gol, data, False)
diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py
index da22cf7746..828ffe7750 100644
--- a/python/tvm/testing/utils.py
+++ b/python/tvm/testing/utils.py
@@ -104,7 +104,7 @@ skip_if_wheel_test = pytest.mark.skipif(
 )
 
 
-def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
+def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7, verbose=True):
     """Version of np.testing.assert_allclose with `atol` and `rtol` fields set
     in reasonable defaults.
 
@@ -115,7 +115,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):
     actual = np.asanyarray(actual)
     desired = np.asanyarray(desired)
     np.testing.assert_allclose(actual.shape, desired.shape)
-    np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, 
verbose=True)
+    np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, 
verbose=verbose)
 
 
 def check_numerical_grads(
diff --git a/tests/python/codegen/test_target_codegen.py 
b/tests/python/codegen/test_target_codegen.py
index 7530786a38..329dfac35d 100644
--- a/tests/python/codegen/test_target_codegen.py
+++ b/tests/python/codegen/test_target_codegen.py
@@ -120,7 +120,7 @@ def test_codegen_loop_step(target):
 
     # Check that the loop executes at positions 3, 99, 195, 291, 387, 483, 
579, 675, 771, 867, 963
     for i in range(3, 1024, 96):
-        np.testing.assert_allclose(c_result[i], a_np[i] + b_np[i], rtol=1e-5)
+        tvm.testing.assert_allclose(c_result[i], a_np[i] + b_np[i], rtol=1e-5)
 
     # Assert non-touched positions remain zero
     for i in range(0, 3):
diff --git a/tests/python/codegen/test_target_codegen_cuda_fp8.py 
b/tests/python/codegen/test_target_codegen_cuda_fp8.py
index 51a9db240f..4ea938cad8 100644
--- a/tests/python/codegen/test_target_codegen_cuda_fp8.py
+++ b/tests/python/codegen/test_target_codegen_cuda_fp8.py
@@ -1005,7 +1005,7 @@ def test_fp8_fp16_bf16_vectorize_arith(vec_length, dtype):
     c_tvm = tvm.runtime.empty((128,), dtype=dtype, device=device)
     f(a_tvm, b_tvm, c_tvm)
     c_tvm = c_tvm.numpy()
-    np.testing.assert_allclose(
+    tvm.testing.assert_allclose(
         c_tvm.astype(np.float32), c_np.astype(np.float32), atol=5e-1, rtol=1e-2
     )
 
diff --git a/tests/python/codegen/test_target_codegen_metal.py 
b/tests/python/codegen/test_target_codegen_metal.py
index e938eb64d5..b969f0e0b9 100644
--- a/tests/python/codegen/test_target_codegen_metal.py
+++ b/tests/python/codegen/test_target_codegen_metal.py
@@ -74,7 +74,7 @@ def test_unaligned_vectorize():
     b_nd = tvm.runtime.empty((6,), "float32", dev)
     f = tvm.compile(IRModule, target=target)
     f(a_nd, b_nd)
-    np.testing.assert_allclose(b_nd.numpy(), a.reshape(6), atol=1e-5, 
rtol=1e-5)
+    tvm.testing.assert_allclose(b_nd.numpy(), a.reshape(6), atol=1e-5, 
rtol=1e-5)
 
 
 @tvm.testing.requires_gpu
@@ -146,7 +146,7 @@ def test_select_vectorize():
     f = tvm.compile(IRModule, target=target)
     f(a_nd, b_nd)
     a.reshape(3, 2)[:, 1] = 0
-    np.testing.assert_allclose(b_nd.numpy(), a, atol=1e-5, rtol=1e-5)
+    tvm.testing.assert_allclose(b_nd.numpy(), a, atol=1e-5, rtol=1e-5)
 
 
 @tvm.testing.requires_gpu
@@ -166,7 +166,7 @@ def test_vectorized_uint8():
     b_nd = tvm.runtime.empty((16,), "float32", dev)
     f = tvm.compile(func, target="metal")
     f(a_nd, b_nd)
-    np.testing.assert_allclose(b_nd.numpy(), a.astype("float32"), atol=1e-5, 
rtol=1e-5)
+    tvm.testing.assert_allclose(b_nd.numpy(), a.astype("float32"), atol=1e-5, 
rtol=1e-5)
 
 
 @tvm.testing.requires_metal(support_required="compile-only")
diff --git a/tests/python/contrib/test_hexagon/test_software_pipeline_async.py 
b/tests/python/contrib/test_hexagon/test_software_pipeline_async.py
index 714d37a3b9..b4d2aed433 100644
--- a/tests/python/contrib/test_hexagon/test_software_pipeline_async.py
+++ b/tests/python/contrib/test_hexagon/test_software_pipeline_async.py
@@ -89,7 +89,7 @@ class TestAsyncSoftwarePipeline:
             if "int" in dtype:
                 np.testing.assert_equal(out.numpy(), ref)
             else:
-                np.testing.assert_allclose(out.numpy(), ref, rtol=1e-3, 
atol=1e-3)
+                tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-3, 
atol=1e-3)
 
         return check
 
diff --git a/tests/python/disco/test_ccl.py b/tests/python/disco/test_ccl.py
index 260ac12d8d..8a1518765f 100644
--- a/tests/python/disco/test_ccl.py
+++ b/tests/python/disco/test_ccl.py
@@ -517,7 +517,7 @@ def test_mlp(session_kind, ccl):  # pylint: 
disable=too-many-locals
         sess.sync_worker_0()
         Y_result = Y_result.numpy()
     # pylint: enable=invalid-name
-    np.testing.assert_allclose(Y_result, Y_expected, rtol=1e-4, atol=1e-4)
+    tvm.testing.assert_allclose(Y_result, Y_expected, rtol=1e-4, atol=1e-4)
 
 
 @pytest.mark.parametrize("session_kind", _all_session_kinds)
@@ -666,7 +666,7 @@ def test_attention(session_kind, ccl):  # pylint: 
disable=too-many-locals,too-ma
         sess.sync_worker_0()
         Y_result = Y_result.numpy()
     # pylint: enable=invalid-name
-    np.testing.assert_allclose(Y_result, Y_expected, rtol=1e-3, atol=1e-3)
+    tvm.testing.assert_allclose(Y_result, Y_expected, rtol=1e-3, atol=1e-3)
 
 
 if __name__ == "__main__":
diff --git a/tests/python/driver/test_compile.py 
b/tests/python/driver/test_compile.py
index f0bd17a2f6..25c71b16dd 100644
--- a/tests/python/driver/test_compile.py
+++ b/tests/python/driver/test_compile.py
@@ -52,9 +52,9 @@ def test_compile_tir():
     c = tvm.runtime.tensor(np.zeros(10, dtype=np.float32), dev)
 
     exec_prim(a, b, c)
-    np.testing.assert_allclose(c.numpy(), a_np + b_np)
+    tvm.testing.assert_allclose(c.numpy(), a_np + b_np)
     exec_mod(a, b, c)
-    np.testing.assert_allclose(c.numpy(), a_np + b_np)
+    tvm.testing.assert_allclose(c.numpy(), a_np + b_np)
 
 
 def test_compile_relax():
@@ -82,7 +82,7 @@ def test_compile_relax():
 
     vm = relax.VirtualMachine(exec_relax, dev)
     z = vm["main"](x, y)
-    np.testing.assert_allclose(z.numpy(), x_np + y_np)
+    tvm.testing.assert_allclose(z.numpy(), x_np + y_np)
 
 
 @tvm.testing.skip_if_32bit(reason="skipping test for i386.")
@@ -111,11 +111,11 @@ def test_compile_mixed_module():
     y = tvm.runtime.tensor(np.zeros(4, dtype=np.float32), dev)
     # For tir function, we can directly call the function
     ex["add_one"](x, y)
-    np.testing.assert_allclose(y.numpy(), x.numpy() + 1)
+    tvm.testing.assert_allclose(y.numpy(), x.numpy() + 1)
     # For relax function, we need to use the vm to call the function
     vm = relax.VirtualMachine(ex, dev)
     z = vm["main"](x)
-    np.testing.assert_allclose(z.numpy(), x.numpy() + 1)
+    tvm.testing.assert_allclose(z.numpy(), x.numpy() + 1)
 
 
 if __name__ == "__main__":
diff --git a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py 
b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py
index 72edf67d68..64898ecdba 100644
--- a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py
+++ b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py
@@ -57,11 +57,11 @@ def 
assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, tar
         for i in range(len(pytorch_out)):
             actual = gpu_out[i].numpy()
             desired = pytorch_out[i].detach().numpy()
-            np.testing.assert_allclose(actual=actual, desired=desired, 
rtol=1e-5, atol=1e-5)
+            tvm.testing.assert_allclose(actual=actual, desired=desired, 
rtol=1e-5, atol=1e-5)
     else:
         actual = gpu_out[0].numpy()
         desired = pytorch_out.detach().numpy()
-        np.testing.assert_allclose(actual=actual, desired=desired, rtol=1e-5, 
atol=1e-5)
+        tvm.testing.assert_allclose(actual=actual, desired=desired, rtol=1e-5, 
atol=1e-5)
 
 
 @tvm.testing.parametrize_targets("cuda")
diff --git 
a/tests/python/relax/nvshmem/test_runtime_builtin_kv_cache_transfer_kernel.py 
b/tests/python/relax/nvshmem/test_runtime_builtin_kv_cache_transfer_kernel.py
index 302ae1cd56..0bdf63b6d5 100644
--- 
a/tests/python/relax/nvshmem/test_runtime_builtin_kv_cache_transfer_kernel.py
+++ 
b/tests/python/relax/nvshmem/test_runtime_builtin_kv_cache_transfer_kernel.py
@@ -85,10 +85,10 @@ def test_kv_transfer_without_disco():
             offset_in_page = position % page_size
             original_k = k_np[i]
             transferred_k = pages_np[layer_id, page_id, 0, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_k, transferred_k)
+            tvm.testing.assert_allclose(original_k, transferred_k)
             original_v = v_np[i]
             transferred_v = pages_np[layer_id, page_id, 1, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_v, transferred_v)
+            tvm.testing.assert_allclose(original_v, transferred_v)
     finalize_func = 
tvm.get_global_func("runtime.disco.nvshmem.finalize_nvshmem")
     finalize_func()
     comm.Barrier()
@@ -154,7 +154,7 @@ def test_kv_transfer_page_to_page_without_disco():
             rank_0_offset_in_page = rank_0_position % page_size
             rank_0_entry = pages_np[layer_id, rank_0_page_id, :, :, 
rank_0_offset_in_page, :]
             transferred_entry = new_pages_np[layer_id, page_id, :, :, 
offset_in_page, :]
-            np.testing.assert_allclose(rank_0_entry, transferred_entry)
+            tvm.testing.assert_allclose(rank_0_entry, transferred_entry)
     finalize_func = 
tvm.get_global_func("runtime.disco.nvshmem.finalize_nvshmem")
     finalize_func()
     comm.Barrier()
@@ -223,20 +223,20 @@ def test_kv_transfer_with_disco():
             offset_in_page = position % page_size
             original_k = k_np_0[i]
             transferred_k = pages_np[layer_id, page_id, 0, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_k, transferred_k)
+            tvm.testing.assert_allclose(original_k, transferred_k)
             original_v = v_np_0[i]
             transferred_v = pages_np[layer_id, page_id, 1, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_v, transferred_v)
+            tvm.testing.assert_allclose(original_v, transferred_v)
         pages_np = pages.debug_get_from_remote(1).numpy()
         for i, position in enumerate(position_map_array):
             page_id = position // page_size
             offset_in_page = position % page_size
             original_k = k_np_1[i]
             transferred_k = pages_np[layer_id, page_id, 0, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_k, transferred_k)
+            tvm.testing.assert_allclose(original_k, transferred_k)
             original_v = v_np_1[i]
             transferred_v = pages_np[layer_id, page_id, 1, :, offset_in_page, 
:]
-            np.testing.assert_allclose(original_v, transferred_v)
+            tvm.testing.assert_allclose(original_v, transferred_v)
     finalize_dfunc = 
sess.get_global_func("runtime.disco.nvshmem.finalize_nvshmem")
     finalize_dfunc()
     for i in range(2):
diff --git a/tests/python/relax/test_base_py_module_printer.py 
b/tests/python/relax/test_base_py_module_printer.py
index a64b3fed5a..0b5b97b0c3 100644
--- a/tests/python/relax/test_base_py_module_printer.py
+++ b/tests/python/relax/test_base_py_module_printer.py
@@ -800,7 +800,7 @@ def test_call_py_func_with_base_py_module():
         expected_np = expected
 
     # Use numpy for comparison since we have numpy arrays
-    np.testing.assert_allclose(final_result_np, expected_np, rtol=1e-5, 
atol=1e-5)
+    tvm.testing.assert_allclose(final_result_np, expected_np, rtol=1e-5, 
atol=1e-5)
 
 
 if __name__ == "__main__":
diff --git a/tests/python/relax/test_base_py_module_symbolic_shape.py 
b/tests/python/relax/test_base_py_module_symbolic_shape.py
index aa39fe14bf..3179c8f51e 100644
--- a/tests/python/relax/test_base_py_module_symbolic_shape.py
+++ b/tests/python/relax/test_base_py_module_symbolic_shape.py
@@ -88,13 +88,13 @@ def test_base_py_module_relax_symbolic_end_to_end():
     out = bpm.main_relax(a, b)
     assert isinstance(out, np.ndarray) or hasattr(out, "numpy")
     out_np = out if isinstance(out, np.ndarray) else out.numpy()
-    np.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
 
     a7 = np.random.randn(7).astype("float32")
     b7 = np.random.randn(7).astype("float32")
     out2 = bpm.main_relax(a7, b7)
     out2_np = out2 if isinstance(out2, np.ndarray) else out2.numpy()
-    np.testing.assert_allclose(out2_np, a7 + b7, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out2_np, a7 + b7, rtol=1e-6, atol=1e-6)
 
 
 def test_base_py_module_tir_symbolic_end_to_end():
@@ -108,7 +108,7 @@ def test_base_py_module_tir_symbolic_end_to_end():
 
     out = bpm.call_tir("add_tir", [a, b], out_sinfo)
     out_np = out if isinstance(out, np.ndarray) else out.numpy()
-    np.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
 
 
 def test_infer_concrete_shape_multiple_symbolic_dims():
@@ -225,14 +225,14 @@ def test_base_py_module_multiple_symbolic_dims():
     out = bpm.matmul_relax(a, b)
     out_np = out if isinstance(out, np.ndarray) else out.numpy()
     expected = np.matmul(a, b)
-    np.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
 
     # Test TIR function with multiple symbolic dims
     # Use concrete shapes for TIR function to avoid constraint issues
     out_sinfo = relax.TensorStructInfo((2, 4), "float32")
     out_tir = bpm.call_tir("matmul_tir", [a, b], out_sinfo)
     out_tir_np = out_tir if isinstance(out_tir, np.ndarray) else 
out_tir.numpy()
-    np.testing.assert_allclose(out_tir_np, expected, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out_tir_np, expected, rtol=1e-6, atol=1e-6)
 
 
 def test_base_py_module_call_dps_packed_symbolic():
@@ -258,7 +258,7 @@ def test_base_py_module_call_dps_packed_symbolic():
 
         out = bpm.call_dps_packed("test_add_packed", [a, b], out_sinfo)
         out_np = out if isinstance(out, np.ndarray) else out.numpy()
-        np.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
+        tvm.testing.assert_allclose(out_np, a + b, rtol=1e-6, atol=1e-6)
 
     except AttributeError as e:
         pytest.skip(f"call_dps_packed test requires register_global_func: {e}")
@@ -287,7 +287,7 @@ def test_base_py_module_call_dps_packed_multiple_args():
         out = bpm.call_dps_packed("test_matmul_packed", [a, b], out_sinfo)
         out_np = out if isinstance(out, np.ndarray) else out.numpy()
         expected = np.matmul(a, b)
-        np.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
+        tvm.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
 
     except AttributeError as e:
         pytest.skip(f"call_dps_packed test requires register_global_func: {e}")
@@ -320,7 +320,7 @@ def test_base_py_module_call_dps_packed_scalar_args():
         out = bpm.call_dps_packed("test_add_scalar_packed", [x, scalar], 
out_sinfo)
         out_np = out if isinstance(out, np.ndarray) else out.numpy()
         expected = x + scalar
-        np.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
+        tvm.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
 
     except AttributeError as e:
         pytest.skip(f"call_dps_packed test requires register_global_func: {e}")
@@ -360,7 +360,7 @@ def test_base_py_module_relax_with_pytorch_tensors():
     out = bpm.main_relax(a_torch, b_torch)
     out_np = out if isinstance(out, np.ndarray) else out.numpy()
     expected = a_torch.numpy() + b_torch.numpy()
-    np.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
+    tvm.testing.assert_allclose(out_np, expected, rtol=1e-6, atol=1e-6)
 
 
 if __name__ == "__main__":
diff --git a/tests/python/relax/test_dlpack_integration.py 
b/tests/python/relax/test_dlpack_integration.py
index 7378fe74a4..b212f710b2 100644
--- a/tests/python/relax/test_dlpack_integration.py
+++ b/tests/python/relax/test_dlpack_integration.py
@@ -46,7 +46,7 @@ class TestDLPackIntegration:
 
         tvm_numpy = tvm_tensor.numpy()
         pytorch_numpy = pytorch_tensor.numpy()
-        np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+        tvm.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
 
     def test_dlpack_pytorch_to_tvm_conversion_gpu(self):
         if tvm.cuda().exist:
@@ -64,7 +64,7 @@ class TestDLPackIntegration:
             # Move to CPU for numpy conversion
             tvm_numpy = tvm_tensor.numpy()
             pytorch_numpy = pytorch_tensor.cpu().numpy()
-            np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+            tvm.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
         else:
             pytest.skip("CUDA not available")
 
@@ -82,7 +82,7 @@ class TestDLPackIntegration:
 
         tvm_numpy = tvm_tensor.numpy()
         pytorch_numpy = pytorch_tensor.numpy()
-        np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+        tvm.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
 
     def test_dlpack_tvm_to_pytorch_conversion_gpu(self):
         if tvm.cuda().exist:
@@ -100,7 +100,7 @@ class TestDLPackIntegration:
 
             tvm_numpy = tvm_tensor.numpy()
             pytorch_numpy = pytorch_tensor.cpu().numpy()
-            np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+            tvm.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
         else:
             pytest.skip("CUDA not available")
 
diff --git a/tests/python/relax/test_frontend_from_exported_program.py 
b/tests/python/relax/test_frontend_from_exported_program.py
index 8ff46bf611..091f0a4a29 100644
--- a/tests/python/relax/test_frontend_from_exported_program.py
+++ b/tests/python/relax/test_frontend_from_exported_program.py
@@ -7957,7 +7957,7 @@ def test_gru():
     assert (
         pytorch_output.shape == tvm_output_np.shape
     ), f"Shape mismatch: PyTorch {pytorch_output.shape} vs TVM 
{tvm_output_np.shape}"
-    np.testing.assert_allclose(pytorch_output.numpy(), tvm_output_np, 
rtol=1e-4, atol=1e-5)
+    tvm.testing.assert_allclose(pytorch_output.numpy(), tvm_output_np, 
rtol=1e-4, atol=1e-5)
 
     class SeqFirstGRU(nn.Module):
         def __init__(self):
@@ -7990,7 +7990,7 @@ def test_gru():
     else:
         tvm_output2_np = tvm_output2[0].numpy()
     assert pytorch_output2.shape == tvm_output2_np.shape
-    np.testing.assert_allclose(pytorch_output2.numpy(), tvm_output2_np, 
rtol=1e-4, atol=1e-5)
+    tvm.testing.assert_allclose(pytorch_output2.numpy(), tvm_output2_np, 
rtol=1e-4, atol=1e-5)
 
 
 def test_dynamic_shape_with_range_constraints():
diff --git a/tests/python/relax/test_runtime_builtin.py 
b/tests/python/relax/test_runtime_builtin.py
index e243770ed6..8abdcda152 100644
--- a/tests/python/relax/test_runtime_builtin.py
+++ b/tests/python/relax/test_runtime_builtin.py
@@ -185,7 +185,7 @@ def test_tensor_cache():
         v_np = param_dict[f"x_{i}"]
         if v_np.dtype == "float32":
             v_np = tvmjs._convert_bf16_to_f32(tvmjs._convert_f32_to_bf16(v_np))
-        np.testing.assert_allclose(v.numpy(), v_np, atol=1e-6, rtol=1e-6)
+        tvm.testing.assert_allclose(v.numpy(), v_np, atol=1e-6, rtol=1e-6)
 
 
 def test_tensor_cache_update():
@@ -210,7 +210,7 @@ def test_tensor_cache_update():
         v_np = param_dict[f"x_{i}"]
         if v_np.dtype == "float32":
             v_np = tvmjs._convert_bf16_to_f32(tvmjs._convert_f32_to_bf16(v_np))
-        np.testing.assert_allclose(v.numpy(), v_np, atol=1e-6, rtol=1e-6)
+        tvm.testing.assert_allclose(v.numpy(), v_np, atol=1e-6, rtol=1e-6)
 
 
 def test_attention_kv_cache_window_override():
diff --git a/tests/python/relax/test_vm_build.py 
b/tests/python/relax/test_vm_build.py
index e29d486584..efd2f7ecbf 100644
--- a/tests/python/relax/test_vm_build.py
+++ b/tests/python/relax/test_vm_build.py
@@ -412,7 +412,7 @@ def test_vm_emit_te_dtype_change(exec_mode):
         ).astype(np.float32)
     )
     res = check_saved_func(vm, "rx_func", inp)
-    np.testing.assert_allclose(res.numpy(), inp.numpy().astype("int16"))
+    tvm.testing.assert_allclose(res.numpy(), inp.numpy().astype("int16"))
 
 
 def test_vm_emit_te_floor_symbolic_shape(exec_mode):
diff --git a/tests/python/tir-base/test_tir_intrin.py 
b/tests/python/tir-base/test_tir_intrin.py
index afeefba2a3..8dabdbb344 100644
--- a/tests/python/tir-base/test_tir_intrin.py
+++ b/tests/python/tir-base/test_tir_intrin.py
@@ -128,7 +128,7 @@ def test_unary_intrin():
             assert b2.numpy().dtype == np.float32
             # Verify correctness against NumPy exp
             expected = np.exp(out_np.astype(np.float32))
-            np.testing.assert_allclose(b2.numpy(), expected, rtol=1e-5, 
atol=1e-5)
+            tvm.testing.assert_allclose(b2.numpy(), expected, rtol=1e-5, 
atol=1e-5)
 
     for func in test_funcs:
         atol = rtol = 1e-3 if func[0].__name__ in ["asin", "acos", "atan"] 
else 1e-5
diff --git 
a/tests/python/tir-schedule/test_tir_schedule_fuse_reduction_epilogue.py 
b/tests/python/tir-schedule/test_tir_schedule_fuse_reduction_epilogue.py
index 82a488851a..dc89f9df56 100644
--- a/tests/python/tir-schedule/test_tir_schedule_fuse_reduction_epilogue.py
+++ b/tests/python/tir-schedule/test_tir_schedule_fuse_reduction_epilogue.py
@@ -197,9 +197,9 @@ def test_fuse_reduction_epilogue_numerical_correctness():
     D_original = D_original_tvm.numpy()
     D_fused = D_fused_tvm.numpy()
 
-    np.testing.assert_allclose(D_original, expected, rtol=1e-5)
-    np.testing.assert_allclose(D_fused, expected, rtol=1e-5)
-    np.testing.assert_allclose(D_fused, D_original, rtol=1e-5)
+    tvm.testing.assert_allclose(D_original, expected, rtol=1e-5)
+    tvm.testing.assert_allclose(D_fused, expected, rtol=1e-5)
+    tvm.testing.assert_allclose(D_fused, D_original, rtol=1e-5)
 
 
 def test_fuse_reduction_epilogue_multiple_epilogue():
diff --git a/web/tests/python/webgpu_rpc_test.py 
b/web/tests/python/webgpu_rpc_test.py
index 260ccc9b34..f1e1c82888 100644
--- a/web/tests/python/webgpu_rpc_test.py
+++ b/web/tests/python/webgpu_rpc_test.py
@@ -71,7 +71,7 @@ def test_rpc():
         f1 = remote.system_lib()
         addone = f1.get_function("main")
         addone(a, b)
-        np.testing.assert_allclose(b.numpy(), np.log(np.abs(a.numpy()) + 1), 
atol=1e-5, rtol=1e-5)
+        tvm.testing.assert_allclose(b.numpy(), np.log(np.abs(a.numpy()) + 1), 
atol=1e-5, rtol=1e-5)
         print("Test pass..")
 
     check(remote, 71821 * 32)

Reply via email to