Mohamad11Dab opened a new issue, #17988:
URL: https://github.com/apache/tvm/issues/17988
### Expected behavior
TVM and ONNXRuntime should give out the same result
### Actual behavior
`––––– MISMATCH DETECTED –––––
Not equal to tolerance rtol=0.01, atol=0.001
Mismatched elements: 8188 / 65536 (12.5%)
Max absolute difference: 0.00307368
Max relative difference: 1.2400934
x: array([[[-0.276592, -0.276592, -0.276592, ..., -0.276592, -0.276592,
-0.276592],
[-0.02829 , -0.02829 , -0.02829 , ..., -0.02829 , -0.02829 ,...
y: array([[[-0.276401, -0.276401, -0.276401, ..., -0.276401, -0.276401,
-0.276401],
[-0.026926, -0.026926, -0.026926, ..., -0.026926, -0.026926,...`
### Environment
TVM:0.17.0
ONNXRuntime: 1.16.3
### Steps to reproduce
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import nas_model_2
import onnx
import onnxruntime as ort
import numpy as np
import tempfile
import torch.nn.functional as F
import tvm
from tvm import relay
from tvm.contrib import graph_executor
from numpy.testing import assert_allclose
import torch
import torch.nn as nn
import torch.nn.functional as F
import nas_model_2
class SimpleBugModel(nn.Module):
def __init__(self):
super().__init__()
self.input_conv = torch.nn.modules.conv.Conv2d(in_channels=3,
out_channels=16, kernel_size=1)
self.block0 = nas_model_2.TanhWrapper()
self.block1 = torch.nn.modules.normalization.GroupNorm(num_groups=4,
num_channels=16)
self.block2 = torch.nn.modules.conv.Conv2d(in_channels=16,
out_channels=16, kernel_size=1,
padding=0)
self.block3 = torch.nn.modules.activation.Softplus()
self.block4 = nas_model_2.CeilWrapper()
self.block5 = torch.nn.modules.activation.GELU()
self.block6 = nas_model_2.SoftsignWrapper()
self.block7 = nas_model_2.InterpNearest2x()
self.block8 =
torch.nn.modules.instancenorm.InstanceNorm2d(num_features=16)
self.block9 = torch.nn.modules.flatten.Flatten(start_dim=2)
def forward(self, x):
__input_conv = self.input_conv(x)
__blocks__0 = self.block0(__input_conv)
__blocks__1 = self.block1(__blocks__0)
__blocks__2 = self.block2(__blocks__1)
__blocks__3 = self.block3(__blocks__2)
__blocks__4 = self.block4(__blocks__3)
__blocks__5 = self.block5(__blocks__4)
__blocks__6 = self.block6(__blocks__5)
__blocks__7 = self.block7(__blocks__6)
__blocks__8 = self.block8(__blocks__7)
__blocks__9 = self.block9(__blocks__8)
return __blocks__9
def main():
model = SimpleBugModel()
model.eval()
dummy = torch.randn(1, 3, 32, 32, dtype=torch.float32)
with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as tmp:
onnx_path = tmp.name
torch.onnx.export(
model, dummy, onnx_path,
opset_version=19,
input_names=["input"],
output_names=["output"],
)
## run ONNX-Runtime
ort_sess = ort.InferenceSession(onnx_path,
providers=["CPUExecutionProvider"])
ort_out = ort_sess.run(None, {"input": dummy.numpy()})[0]
### compile & run TVM
onnx_model = onnx.load(onnx_path)
shape_dict = {"input": dummy.numpy().shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict,
freeze_params=True)
with tvm.transform.PassContext(opt_level=4):
lib = relay.build(mod, target="llvm", params=params)
m = graph_executor.GraphModule(lib["default"](tvm.cpu()))
m.set_input("input", tvm.nd.array(dummy.numpy()))
m.run()
tvm_out = m.get_output(0)
tvm_out = tvm_out.numpy()
try:
assert_allclose(ort_out, tvm_out, rtol=1e-2, atol=1e-3, equal_nan=True)
except AssertionError as e:
print("––––– MISMATCH DETECTED –––––")
print(e) # just the assertion message
except Exception as e:
print("––––– UNEXPECTED ERROR DURING COMPARISON –––––")
print(f"{type(e).__name__}: {e}")
if __name__ == "__main__":
main()
```
```python
# nas_model_2.py
@basic_unit
class TanhWrapper(nni_nn.Module):
def forward(self, x):
return torch.tanh(x)
@basic_unit
class CeilWrapper(nni_nn.Module):
def forward(self, x):
return torch.ceil(x)
@basic_unit
class SoftsignWrapper(nni_nn.Module):
def forward(self, x):
return F.softsign(x)
@basic_unit
class InterpNearest2x(nni_nn.Module):
def forward(self, x):
return F.interpolate(x, scale_factor=2.0, mode='nearest')
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]