lazycal opened a new issue, #12008:
URL: https://github.com/apache/tvm/issues/12008
This model
```python
def @main(%x: Tensor[(1, 1, 1, 1), float32] /* ty=Tensor[(1, 1, 1, 1),
float32] */) -> Tensor[(1, 1, 1, 1), float32] {
%0 = image.resize2d(%x, size=[2, 4], roi=[0f, 0f, 0f, 0f],
rounding_method="") /* ty=Tensor[(1, 1, 2, 4), float32] */;
%1 = transpose(%0, axes=[0, 1, 3, 2]) /* ty=Tensor[(1, 1, 4, 2), float32]
*/;
%2 = nn.max_pool2d(%1, pool_size=[1, 1], padding=[0, 0, 0, 0]) /*
ty=Tensor[(1, 1, 4, 2), float32] */;
image.resize2d(%2, size=[1, 1], roi=[0f, 0f, 0f, 0f], rounding_method="")
/* ty=Tensor[(1, 1, 1, 1), float32] */
}
```
fails with error `ValueError: NCWH layout is not supported.`, and this
layout is introduced after AlterOpLayout (see below for the IR right after
AlterOpLayout):
```python
def @main(%x: Tensor[(1, 1, 1, 1), float32] /* ty=Tensor[(1, 1, 1, 1),
float32] */) -> Tensor[(1, 1, 1, 1), float32] {
%0 = image.resize2d(%x, size=[2, 4], roi=[0f, 0f, 0f, 0f],
rounding_method="") /* ty=Tensor[(1, 1, 2, 4), float32] */;
%1 = transpose(%0, axes=[0, 1, 3, 2]) /* ty=Tensor[(1, 1, 4, 2), float32]
*/;
%2 = layout_transform(%1, src_layout="NCHW", dst_layout="NCWH") /*
ty=Tensor[(1, 1, 2, 4), float32] */;
%3 = nn.max_pool2d(%2, pool_size=[1, 1], padding=[0, 0, 0, 0],
layout="NCWH") /* ty=Tensor[(1, 1, 2, 4), float32] */;
%4 = image.resize2d(%3, size=[1, 1], roi=[0f, 0f, 0f, 0f], layout="NCWH",
rounding_method="") /* ty=Tensor[(1, 1, 1, 1), float32] */;
layout_transform(%4, src_layout="NCWH", dst_layout="NCHW") /*
ty=Tensor[(1, 1, 1, 1), float32] */
}
```
where the max_pool2d outputs NCWH layout and resize2d accepts it during
AlterOpLayout but complains when lowering in this function:
https://github.com/apache/tvm/blob/5efe8b0bfdff4c9939185a7581dc77e23cbcb6d5/python/tvm/topi/image/resize.py#L807-L824
Not sure if this is a bug, but I guess ideally resize2d should not accept
unsupported layout? The current logic
https://github.com/apache/tvm/blob/5efe8b0bfdff4c9939185a7581dc77e23cbcb6d5/src/relay/op/image/resize.cc#L49-L53
does not check this.
### Expected behavior
Pass the compilation.
### Actual behavior
Pass with opt_level=2 but fail with opt_level=4
### Environment
Ubuntu 18.04
TVM: 1787cca3f90237dff001fba01ffdbaf9a510f886
### Steps to reproduce
Run this code:
```python
import tvm
from tvm import relay
from tvm.relay.transform import InferType
@tvm.instrument.pass_instrument
class PrintIR:
"""Print the name of the pass, the IR, only before passes execute."""
def __init__(self, print_mod=True, show_meta_data=False) -> None:
self.pass_cnt = 0
self.print_mod = print_mod
self.show_meta_data = show_meta_data
def run_before_pass(self, mod, info):
with tvm.transform.PassContext(instruments=[]):
global prev_mod
if self.print_mod:
print(relay.transform.InferType()(mod))
print('>' * 40, f'Running Pass#{self.pass_cnt}:', info)
self.pass_cnt += 1
x = relay.var("x", shape=(1, 1, 1, 1))
y = relay.image.resize2d(x, (2, 4))
z = relay.transpose(y, (0, 1, 3, 2))
a = relay.nn.max_pool2d(z)
b = relay.image.resize2d(a, (1, 1))
func = relay.Function((x,), b)
mod = tvm.IRModule.from_expr(func)
print(InferType()(mod))
with tvm.transform.PassContext(opt_level=4, instruments=[PrintIR()]):
relay.create_executor("graph", mod).evaluate()
```
Part of error log:
```
6: _ZZN3tvm5relay11ExprFunctorIFNS_7runtime5ArrayINS_2te6TensorEvEERKNS_
5: tvm::relay::tec::LowerToTECompute::VisitExpr_(tvm::relay::CallNode
const*)
4:
tvm::relay::backend::MemoizedExprTranslator<tvm::runtime::Array<tvm::te::Tensor,
void> >::VisitExpr(tvm::RelayExpr const&)
3: tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void>
(tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)
2: _ZZN3tvm5relay11ExprFunctorIFNS_7runtime5ArrayINS_2te6TensorEvEERKNS_
1: tvm::relay::tec::LowerToTECompute::VisitExpr_(tvm::relay::CallNode
const*)
0:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<TVMFuncCreateFromCFunc::{lambda(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)#2}> >::Call(tvm::runtime::PackedFuncObj const*,
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in
tvm._ffi._cy3.core.tvm_callback
File
"/workspace/workspace/tvm-intact/python/tvm/relay/backend/te_compiler.py", line
317, in lower_call
best_impl, outputs = select_implementation(op, call.attrs, inputs,
ret_type, target)
File
"/workspace/workspace/tvm-intact/python/tvm/relay/backend/te_compiler.py", line
207, in select_implementation
outs = impl.compute(attrs, inputs, out_type)
File "/workspace/workspace/tvm-intact/python/tvm/relay/op/op.py", line
126, in compute
return _OpImplementationCompute(self, attrs, inputs, out_type)
File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in
tvm._ffi._cy3.core.PackedFuncBase.__call__
File "tvm/_ffi/_cython/./packed_func.pxi", line 276, in
tvm._ffi._cy3.core.FuncCall
File "tvm/_ffi/_cython/./base.pxi", line 181, in
tvm._ffi._cy3.core.CHECK_CALL
3: TVMFuncCall
2:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::relay::{lambda(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)#4}> >::Call(tvm::runtime::PackedFuncObj const*,
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
1: tvm::relay::OpImplementation::Compute(tvm::Attrs const&,
tvm::runtime::Array<tvm::te::Tensor, void> const&, tvm::Type const&)
0:
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<TVMFuncCreateFromCFunc::{lambda(tvm::runtime::TVMArgs,
tvm::runtime::TVMRetValue*)#2}> >::Call(tvm::runtime::PackedFuncObj const*,
tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)
File "tvm/_ffi/_cython/./packed_func.pxi", line 56, in
tvm._ffi._cy3.core.tvm_callback
File
"/workspace/workspace/tvm-intact/python/tvm/relay/op/image/_image.py", line
156, in compute_resize2d
out_dtype,
File "/workspace/workspace/tvm-intact/python/tvm/topi/image/resize.py",
line 824, in resize2d
raise ValueError("%s layout is not supported." % layout)
ValueError: NCWH layout is not supported.
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]