coffezhou opened a new issue, #17964:
URL: https://github.com/apache/tvm/issues/17964

   ### Expected behavior
   
   TVM should compile the model correctly.
   
   ### Actual behavior
   
   When compiling the model with the default relax optimization pipeline, TVM 
crashes as follows:
   ```c
   Traceback (most recent call last):
     File "/home/carla/Documents/test/test.py", line 51, in <module>
       main()
     File "/home/carla/Documents/test/test.py", line 45, in main
       ex = relax.build(tvm_model, target="llvm", relax_pipeline=relax_pipeline)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "/home/carla/Documents/tvm/python/tvm/relax/vm_build.py", line 253, 
in build
       mod = relax_pipeline(mod)
             ^^^^^^^^^^^^^^^^^^^
     File "/home/carla/Documents/tvm/python/tvm/ir/transform.py", line 238, in 
__call__
       return _ffi_transform_api.RunPass(self, mod)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
     File "tvm/ffi/cython/./function.pxi", line 212, in 
tvm.ffi.core.Function.__call__
     File "tvm/ffi/cython/./function.pxi", line 265, in 
tvm.ffi.core.tvm_ffi_callback
     File 
"/home/carla/Documents/tvm/python/tvm/relax/backend/cpu_generic/pipeline.py", 
line 73, in _pipeline
       mod = seq(mod)
     File "/home/carla/Documents/tvm/python/tvm/ir/transform.py", line 238, in 
__call__
       return _ffi_transform_api.RunPass(self, mod)
     File "tvm/ffi/cython/./function.pxi", line 212, in 
tvm.ffi.core.Function.__call__
     File "tvm/ffi/cython/./function.pxi", line 265, in 
tvm.ffi.core.tvm_ffi_callback
     File 
"/home/carla/Documents/tvm/python/tvm/relax/transform/legalize_ops/nn.py", line 
509, in _nn_gelu
       return bb.call_te(te_gelu, call.args[0], primfunc_name_hint="gelu")
     File "/home/carla/Documents/tvm/python/tvm/relax/block_builder.py", line 
356, in call_te
       tir_func, call_args, output_sinfo, tir_vars = gen_call_tir_inputs(func, 
*args, **kwargs)
     File "/home/carla/Documents/tvm/python/tvm/relax/utils.py", line 351, in 
gen_call_tir_inputs
       te_args = _convert_te_arg(args)
     File "/home/carla/Documents/tvm/python/tvm/relax/utils.py", line 289, in 
_convert_te_arg
       new_arg = _convert_te_arg_helper(te_args)
     File "/home/carla/Documents/tvm/python/tvm/relax/utils.py", line 273, in 
<genexpr>
       return tuple(_convert_te_arg_helper(x) for x in arg)
     File "/home/carla/Documents/tvm/python/tvm/relax/utils.py", line 273, in 
<genexpr>
       return tuple(_convert_te_arg_helper(x) for x in arg)
     File "/home/carla/Documents/tvm/python/tvm/relax/utils.py", line 224, in 
_convert_te_arg_helper
       assert isinstance(
   AssertionError: emit_te now only supports Tensor that has ShapeExpr shape
   
   ```
   
   ### Environment
   
   OS: Ubuntu 20.04
   TVM: 0.21.dev0(bcb68b130)
   CUDA: 11.8
   
   ### Steps to reproduce
   
   This bug can be reproduced by the following code with the model in the 
attachment. As shown in the code, the model can be executed by onnxruntime. 
However, tvm failed to compile this model with the default pipeline.
   
   ```python
   import sys
   
   import numpy as np
   import onnx
   import onnxruntime
   
   import tvm
   from tvm import relax
   from tvm.relax.frontend.onnx import from_onnx
   
   import pickle
   
               
   def main():
       onnx_model = onnx.load("a674.onnx")
       
       with open("inputs.pkl", "rb") as fp:
           inputs = pickle.load(fp)
       
       try:
           ort_session = onnxruntime.InferenceSession(
               onnx_model.SerializeToString(), 
providers=["CPUExecutionProvider"]
           )
           ort_output = ort_session.run([], inputs)
       except Exception as e:
           print(e)
           sys.exit(1)
       print(ort_output)    
       # Convert the onnx model into relax through the onnx importer.
       tvm_model = from_onnx(onnx_model, keep_params_in_input=True)
       # Convert operators for inference mode.
       tvm_model = relax.transform.DecomposeOpsForInference()(tvm_model)
       # Legalize any relax ops into tensorir.
       tvm_model = relax.transform.LegalizeOps()(tvm_model)
   
       # Separate model from parameters.
       tvm_model, params = relax.frontend.detach_params(tvm_model)
           
       # Compile the relax graph into a VM then run.
       #----------------------cpu-----------------------
       with tvm.transform.PassContext(opt_level=0):
           target = tvm.target.Target("llvm", host="llvm")
           relax_pipeline = relax.pipeline.get_default_pipeline(target)
           
           ex = relax.build(tvm_model, target="llvm", 
relax_pipeline=relax_pipeline)
          
   if __name__ == "__main__":
       
       main()
          
   
   ```
   
   
[testcase.zip](https://github.com/user-attachments/files/20180080/testcase.zip)
   
   ### Triage
   
   * needs-triage
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to