Cookiee235 opened a new issue, #17244:
URL: https://github.com/apache/tvm/issues/17244
### Actual behavior
```
Traceback (most recent call last):
File "/share_container/optfuzz/res/bugs/simple/wrong_res.py", line 59, in
<module>
np.testing.assert_allclose(outputs1.numpy(), outputs2.numpy(),
rtol=1e-3, atol=1e-3)
File
"/root/miniconda3/lib/python3.12/site-packages/numpy/testing/_private/utils.py",
line 1504, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/root/miniconda3/lib/python3.12/contextlib.py", line 81, in inner
return func(*args, **kwds)
^^^^^^^^^^^^^^^^^^^
File
"/root/miniconda3/lib/python3.12/site-packages/numpy/testing/_private/utils.py",
line 718, in assert_array_compare
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File
"/root/miniconda3/lib/python3.12/site-packages/numpy/testing/_private/utils.py",
line 688, in func_assert_same_pos
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=0.001
x and y nan location mismatch:
x: array([ nan, nan, 3.084220e-37, 0.000000e+00,
nan, nan, 3.085111e-37, 0.000000e+00,
nan, nan], dtype=float32)
y: array([1.401298e-44, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 2.879961e-37, 0.000000e+00,
0.000000e+00, 0.000000e+00], dtype=float32)
```
### Steps to reproduce
```
import tvm
from tvm import relax
import numpy as np
from tvm.script import ir as I
from tvm.script import tir as T
from tvm.script import relax as R
@I.ir_module
class Module:
@T.prim_func
def add_8(rxplaceholder: T.Buffer((T.int64(8),), "float32"),
rxplaceholder_1: T.Buffer((), "float32"), T_add: T.Buffer((T.int64(8),),
"float32")):
T.evaluate(0)
@T.prim_func
def exp(rxplaceholder: T.Buffer((T.int64(2), T.int64(4)), "float32"),
compute: T.Buffer((T.int64(2), T.int64(4)), "float32")):
T.evaluate(0)
@T.prim_func
def log(rxplaceholder: T.Buffer((T.int64(10),), "float32"), compute:
T.Buffer((T.int64(10),), "float32")):
T.evaluate(0)
@T.prim_func
def pad(rxplaceholder: T.Buffer((T.int64(8),), "float32"), PadInput:
T.Buffer((T.int64(10),), "float32")):
T.evaluate(0)
@T.prim_func
def relu(rxplaceholder: T.Buffer((T.int64(8),), "float32"), compute:
T.Buffer((T.int64(8),), "float32")):
T.evaluate(0)
@T.prim_func
def reshape_1(rxplaceholder: T.Buffer((T.int64(2), T.int64(4)),
"float32"), T_reshape: T.Buffer((T.int64(8),), "float32")):
T.evaluate(0)
@R.function
def main(x: R.Tensor((2, 4), dtype="float32")) -> R.Tensor((10,),
dtype="float32"):
R.func_attr({"relax.force_pure": 1})
storage_1: R.Object = R.memory.alloc_storage(R.shape([40]),
R.prim_value(0), R.str("global"), R.dtype("uint8"))
alloc4: R.Tensor((10,), dtype="float32") =
R.memory.alloc_tensor(storage_1, R.prim_value(0), R.shape([10]),
R.dtype("float32"))
gv: R.Tensor((10,), dtype="float32") = alloc4
return gv
mod = Module
mod = tvm.relax.transform.DeadCodeElimination()(mod)
mod.show()
mod = tvm.relax.transform.LegalizeOps()(mod)
def compile_mod(mod, target_func, *inputs):
mod = relax.transform.FuseTIR()(mod)
mod = relax.transform.LambdaLift()(mod)
ex = relax.build(mod, target='llvm')
vm = relax.VirtualMachine(ex, tvm.cpu())
mod_outputs = vm[f'{target_func}'](*inputs)
mod_outputs = mod_outputs
return mod_outputs
input_0 = tvm.nd.array(10 * np.random.random([2, 4]).astype('float32'))
outputs1 = compile_mod(mod, 'main', input_0)
outputs2 = compile_mod(mod, 'main', input_0)
np.testing.assert_allclose(outputs1.numpy(), outputs2.numpy(), rtol=1e-3,
atol=1e-3)
```
cc @Lunderberg @junrushao
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]