jikechao opened a new issue, #15089:
URL: https://github.com/apache/tvm/issues/15089
`grid_sample` operator in pytorch will lead a **divide by zero** error when
`padding_mode='reflection' and align_corners=True`
### Actual behavior
```
Traceback (most recent call last):
File "test.py", line 24, in <module>
exe = relay.create_executor('graph', mod=mod, params=params,
device=tvm.device('llvm', 0), target='llvm').evaluate()
File
"/workplace/software/tvm/tvm_/python/tvm/relay/backend/interpreter.py", line
171, in evaluate
return self._make_executor()
File "/workplace/software/tvm/tvm_/python/tvm/relay/build_module.py", line
519, in _make_executor
mod = build(self.mod, target=self.target)
File "/workplace/software/tvm/tvm_/python/tvm/relay/build_module.py", line
372, in build
mod_name=mod_name,
File "/workplace/software/tvm/tvm_/python/tvm/relay/build_module.py", line
169, in build
mod_name,
File
"/workplace/software/tvm/tvm_/python/tvm/_ffi/_ctypes/packed_func.py", line
237, in __call__
raise get_last_ffi_error()
tvm._ffi.base.TVMError: Traceback (most recent call last):
43: TVMFuncCall
42:
tvm::relay::backend::RelayBuildModule::GetFunction(std::__cxx11::basic_string<char,
std::char_traits<char>, std::allocator<char> > const&,
tvm::runtime::ObjectPtr<tvm::runtime::Object>
const&)::{lambda(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)#3}::operator()(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*) const
41: tvm::relay::backend::RelayBuildModule::Build(tvm::IRModule,
tvm::runtime::Array<tvm::Target, void> const&, tvm::Target const&,
tvm::relay::Executor const&, tvm::relay::Runtime const&,
tvm::WorkspaceMemoryPools const&, tvm::ConstantMemoryPools const&,
tvm::runtime::String)
40: tvm::relay::backend::RelayBuildModule::BuildRelay(tvm::IRModule,
tvm::runtime::String const&)
39: tvm::relay::backend::ExecutorCodegen::Codegen(tvm::IRModule,
tvm::relay::Function const&, tvm::runtime::String)
38: void tvm::relay::backend::ExecutorCodegen::CallFunc<tvm::IRModule,
tvm::relay::Function, tvm::runtime::String>(std::__cxx11::basic_string<char,
std::char_traits<char>, std::allocator<char> > const&, tvm::IRModule,
tvm::relay::Function, tvm::runtime::String)
37:
tvm::relay::backend::GraphExecutorCodegenModule::GetFunction(std::__cxx11::basic_string<char,
std::char_traits<char>, std::allocator<char> > const&,
tvm::runtime::ObjectPtr<tvm::runtime::Object>
const&)::{lambda(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)#2}::operator()(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*) const
36: tvm::relay::backend::GraphExecutorCodegen::Codegen(tvm::IRModule,
tvm::relay::Function, tvm::runtime::String)
35: tvm::transform::Pass::operator()(tvm::IRModule) const
34: tvm::transform::Pass::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
33: tvm::transform::SequentialNode::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
32: tvm::transform::Pass::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
31: tvm::transform::ModulePassNode::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
30:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::TypedPackedFunc<tvm::IRModule
(tvm::IRModule,
tvm::transform::PassContext)>::AssignTypedLambda<tvm::relay::tec::LowerTE(tvm::runtime::String,
tvm::CompilationConfig, std::function<void
(tvm::BaseFunc)>)::$_8>(tvm::relay::tec::LowerTE(tvm::runtime::String,
tvm::CompilationConfig, std::function<void
(tvm::BaseFunc)>)::$_8)::{lambda(tvm::runtime::TVMArgs const&,
tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*,
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
29: tvm::relay::tec::LowerTE(tvm::IRModule const&, tvm::runtime::String
const&, std::function<void (tvm::BaseFunc)>, tvm::CompilationConfig)
28: tvm::transform::Pass::operator()(tvm::IRModule) const
27: tvm::transform::Pass::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
26: tvm::relay::transform::FunctionPassNode::operator()(tvm::IRModule,
tvm::transform::PassContext const&) const
25:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::TypedPackedFunc<tvm::relay::Function
(tvm::relay::Function, tvm::IRModule,
tvm::transform::PassContext)>::AssignTypedLambda<tvm::relay::tec::LowerTensorExpr(tvm::relay::tec::TECompiler,
std::function<void (tvm::BaseFunc)>,
tvm::CompilationConfig)::$_7>(tvm::relay::tec::LowerTensorExpr(tvm::relay::tec::TECompiler,
std::function<void (tvm::BaseFunc)>,
tvm::CompilationConfig)::$_7)::{lambda(tvm::runtime::TVMArgs const&,
tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*,
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
24: tvm::relay::ExprMutator::VisitExpr(tvm::RelayExpr const&)
23: tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr
const&)>::VisitExpr(tvm::RelayExpr const&)
22: tvm::NodeFunctor<tvm::RelayExpr (tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr
const&)>*)>::operator()(tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr const&)>*) const
21: _ZZN3tvm5relay11ExprFunc
20:
tvm::relay::transform::DeviceAwareExprMutator::VisitExpr_(tvm::relay::FunctionNode
const*)
19:
tvm::relay::tec::LowerTensorExprMutator::DeviceAwareVisitExpr_(tvm::relay::FunctionNode
const*)
18: _ZN3tvm5relay9
17: tvm::relay::ExprMutator::VisitExpr_(tvm::relay::FunctionNode const*)
16: tvm::relay::ExprMutator::VisitExpr(tvm::RelayExpr const&)
15: tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr
const&)>::VisitExpr(tvm::RelayExpr const&)
14: tvm::NodeFunctor<tvm::RelayExpr (tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr
const&)>*)>::operator()(tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::RelayExpr (tvm::RelayExpr const&)>*) const
13: _ZZN3tvm5relay11ExprFunc
12:
tvm::relay::transform::DeviceAwareExprMutator::VisitExpr_(tvm::relay::CallNode
const*)
11:
tvm::relay::tec::LowerTensorExprMutator::DeviceAwareVisitExpr_(tvm::relay::CallNode
const*)
10: tvm::relay::tec::TECompilerImpl::Lower(tvm::relay::tec::CCacheKey
const&)
9:
tvm::relay::tec::TECompilerImpl::LowerInternal(tvm::relay::tec::CCacheKey
const&, tvm::GlobalVarSupply)
8: tvm::relay::tec::PrimFuncFor(tvm::relay::Function const&, tvm::Target
const&, tvm::GlobalVarSupply, tvm::NameSupply)
7: tvm::relay::tec::ScheduleBuilder::Create(tvm::relay::Function const&,
tvm::GlobalVarSupply, tvm::NameSupply)
6: tvm::relay::tec::LowerToTECompute::Lower(tvm::relay::Function const&)
5:
tvm::relay::backend::MemoizedExprTranslator<tvm::runtime::Array<tvm::te::Tensor,
void> >::VisitExpr(tvm::RelayExpr const&)
4: tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void>
(tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)
3: tvm::NodeFunctor<tvm::runtime::Array<tvm::te::Tensor, void>
(tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void>
(tvm::RelayExpr const&)>*)>::operator()(tvm::runtime::ObjectRef const&,
tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void>
(tvm::RelayExpr const&)>*) const
2: _ZZN3tvm5relay11ExprFunc
1: tvm::relay::tec::LowerToTECompute::VisitExpr_(tvm::relay::CallNode
const*)
0:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<TVMFuncCreateFromCFunc::$_2>
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)
File
"/workplace/software/tvm/tvm_/python/tvm/_ffi/_ctypes/packed_func.py", line 81,
in cfun
rv = local_pyfunc(*pyargs)
File
"/workplace/software/tvm/tvm_/python/tvm/relay/backend/te_compiler.py", line
320, in lower_call
best_impl, outputs = select_implementation(op, call.attrs, inputs,
ret_type, target)
File
"/workplace/software/tvm/tvm_/python/tvm/relay/backend/te_compiler.py", line
207, in select_implementation
outs = impl.compute(attrs, inputs, out_type)
File "/workplace/software/tvm/tvm_/python/tvm/relay/op/op.py", line 126,
in compute
return _OpImplementationCompute(self, attrs, inputs, out_type)
File
"/workplace/software/tvm/tvm_/python/tvm/_ffi/_ctypes/packed_func.py", line
237, in __call__
raise get_last_ffi_error()
3: TVMFuncCall
2:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::relay::$_3>
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)
1: tvm::relay::OpImplementation::Compute(tvm::Attrs const&,
tvm::runtime::Array<tvm::te::Tensor, void> const&, tvm::Type const&)
0:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<TVMFuncCreateFromCFunc::$_2>
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)
File
"/workplace/software/tvm/tvm_/python/tvm/_ffi/_ctypes/packed_func.py", line 81,
in cfun
rv = local_pyfunc(*pyargs)
File "/workplace/software/tvm/tvm_/python/tvm/relay/op/image/_image.py",
line 371, in compute_grid_sample
topi.image.grid_sample(inputs[0], inputs[1], method, layout,
padding_mode, align_corners)
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 529, in grid_sample
return compute(data, grid, method, layout, padding_mode, align_corners)
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 286, in _grid_sample_2d
return te.compute((batch, in_channel, out_height, out_width),
interpolation, tag="grid_sample")
File "/workplace/software/tvm/tvm_/python/tvm/te/operation.py", line 132,
in compute
body = fcompute(*[v.var for v in dim_var])
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 188, in _bilinear_sample
y, x = _compute_source_index(n, h, w)
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 154, in _compute_source_index
y = _reflect_coordinates(y, in_height)
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 182, in _reflect_coordinates
new_x = __refelection(x, size - 1, 0)
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 178, in __refelection
__reflect(x, size, corner_start),
File "/workplace/software/tvm/tvm_/python/tvm/topi/image/grid_sample.py",
line 168, in __reflect
size_times = te.truncdiv(index_align_corner.astype("int32"),
size).astype("int32")
File "/workplace/software/tvm/tvm_/python/tvm/tir/op.py", line 2736, in
truncdiv
return _ffi_api._OpTruncDiv(a, b, span) # type: ignore
File
"/workplace/software/tvm/tvm_/python/tvm/_ffi/_ctypes/packed_func.py", line
237, in __call__
raise get_last_ffi_error()
5: TVMFuncCall
4:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::TypedPackedFunc<tvm::PrimExpr
(tvm::PrimExpr, tvm::PrimExpr,
tvm::Span)>::AssignTypedLambda<tvm::$_11>(tvm::$_11,
std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char>
>)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}>
>::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)
3: tvm::truncdiv(tvm::PrimExpr, tvm::PrimExpr, tvm::Span)
2: tvm::div(tvm::PrimExpr, tvm::PrimExpr, tvm::Span)
1: tvm::runtime::Optional<tvm::PrimExpr>
tvm::arith::TryConstFold<tvm::tir::Div>(tvm::PrimExpr, tvm::PrimExpr)
0: _ZN3tvm7runtime6detail
File "/workplace/software/tvm/tvm_/src/arith/const_fold.h", line 222
TVMError:
---------------------------------------------------------------
An error occurred during the execution of TVM.
For more information, please see: https://tvm.apache.org/docs/errors.html
---------------------------------------------------------------
Check failed: pb->value != 0 (0 vs. 0) : Divide by zero
```
### Environment
Any environment details, such as: Operating System, TVM version, etc
### Steps to reproduce
```
import torch
from tvm import relay
import tvm
import numpy as np
from torch.nn import Module
input_data = torch.rand([5, 2, 1, 1], dtype=torch.float64)
para_1 = torch.rand([5, 4, 2, 2], dtype=torch.float64)
class grid_sample(Module):
def forward(self, *args):
return torch.nn.functional.grid_sample(args[0], para_1,
padding_mode='reflection',align_corners=True)
m = grid_sample().float().eval()
torch_outputs = m(input_data)
trace = torch.jit.trace(m, input_data)
input_shapes = [('input0', torch.Size([5, 2, 1, 1]))]
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
print(mod)
with tvm.transform.PassContext(opt_level=3):
exe = relay.create_executor('graph', mod=mod, params=params,
device=tvm.device('llvm', 0), target='llvm').evaluate()
```
### Triage
* needs-triage
* frontend:torch
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]