This is an automated email from the ASF dual-hosted git repository.

tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 5138efcfff [Relax][PyTorch] Unify dtype used in conv2d tests (#18553)
5138efcfff is described below

commit 5138efcfff3585400aaa9566765d67c2c31eb2d8
Author: Guan-Ming (Wesley) Chiu <[email protected]>
AuthorDate: Sat Dec 6 14:16:33 2025 +0800

    [Relax][PyTorch] Unify dtype used in conv2d tests (#18553)
    
    ## Why
    
    - resolve todo in
    
[test_op_gradient_numeric.py](https://github.com/apache/tvm/compare/main...guan404ming:update-conv2d-test?expand=1#diff-65bec2fe9ca46b486e6e1d3412e9092d25d3815bb6173435501bbfab7eefd87b)
    by unifying the dtype used in conv2d related test
    - use float32 with reduced range [0, 3] to maintain numerical precision
    for gradient checking
---
 tests/python/relax/test_op_gradient_numeric.py | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/tests/python/relax/test_op_gradient_numeric.py 
b/tests/python/relax/test_op_gradient_numeric.py
index bcea74a883..c76c150f6a 100644
--- a/tests/python/relax/test_op_gradient_numeric.py
+++ b/tests/python/relax/test_op_gradient_numeric.py
@@ -781,11 +781,8 @@ def test_nll_loss_no_batch(target, dev, nll_reduction1, 
nll_weighted1, nll_ignor
 
 @tvm.testing.parametrize_targets("llvm")
 def test_conv2d(target, dev, c2d_shape1, c2d_shape2, c2d_kwargs):
-    # TODO(mlc-team) Update to uniform
-    # We should use float32 to check the correctness of conv2d
-    # to avoid possible precision problems
-    data1_numpy = np.random.uniform(0, 16, c2d_shape1).astype(np.float64)
-    data2_numpy = np.random.uniform(0, 3, c2d_shape2).astype(np.float64)
+    data1_numpy = np.random.uniform(0, 3, c2d_shape1).astype(np.float32)
+    data2_numpy = np.random.uniform(0, 3, c2d_shape2).astype(np.float32)
     relax_check_gradients(
         relax.op.nn.conv2d,
         [data1_numpy, data2_numpy],
@@ -819,7 +816,7 @@ def test_conv2d(target, dev, c2d_shape1, c2d_shape2, 
c2d_kwargs):
 
 @tvm.testing.parametrize_targets("llvm")
 def test_max_pool2d(target, dev, pool_size, pool_kwargs):
-    data_numpy = np.random.uniform(0, 16, size=(3, 2, 10, 
10)).astype(np.float64)
+    data_numpy = np.random.uniform(0, 3, size=(3, 2, 10, 
10)).astype(np.float32)
     relax_check_gradients(
         relax.op.nn.max_pool2d,
         [data_numpy],
@@ -832,7 +829,7 @@ def test_max_pool2d(target, dev, pool_size, pool_kwargs):
 
 @tvm.testing.parametrize_targets("llvm")
 def test_avg_pool2d(target, dev, pool_size, pool_kwargs):
-    data_numpy = np.random.uniform(0, 16, size=(3, 2, 10, 
10)).astype(np.float64)
+    data_numpy = np.random.uniform(0, 3, size=(3, 2, 10, 
10)).astype(np.float32)
     relax_check_gradients(
         relax.op.nn.avg_pool2d,
         [data_numpy],

Reply via email to