This is an automated email from the ASF dual-hosted git repository. wuwei pushed a commit to branch unity in repository https://gitbox.apache.org/repos/asf/tvm.git
commit 047211f1da28749a22a5c9d865ecc7bb63cdad81 Author: LeiWang <[email protected]> AuthorDate: Wed Jan 3 12:35:39 2024 -0400 revert the change for dyanmic test --- ...t_tir_transform_merge_dynamic_shared_memory_allocations.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py b/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py index 343ed1c10f..1eb9cd97cf 100644 --- a/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py +++ b/tests/python/tir-transform/test_tir_transform_merge_dynamic_shared_memory_allocations.py @@ -136,7 +136,7 @@ def test_matmul_dyn_shared(): np_ref = np.dot(a_np.astype("float32"), b_np.astype("float32")) tvm.testing.assert_allclose(c.numpy(), np_ref, 1e-4, 1e-4) - for target in ["cuda"]: + for target in ["cuda", "nvptx"]: check_target(target) @@ -201,7 +201,7 @@ def test_dyn_shared_vectorized_store(): c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4 ) - for target in ["cuda"]: + for target in ["cuda", "nvptx"]: check_target(target) @@ -266,7 +266,7 @@ def test_dyn_shared_reuse_and_merge(): fadd(a, b, c, d) tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + c.numpy(), 1e-4, 1e-4) - for target in ["cuda"]: + for target in ["cuda", "nvptx"]: check_target(target) @@ -323,7 +323,7 @@ def test_dyn_shared_more_dtype(): fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4) - for target in ["cuda"]: + for target in ["cuda", "nvptx"]: check_target(target) @@ -455,5 +455,4 @@ class TestMatmul(tvm.testing.CompareBeforeAfter): if __name__ == "__main__": - # tvm.testing.main() - test_dyn_shared_more_dtype() + tvm.testing.main()
