sleichu opened a new issue, #16139:
URL: https://github.com/apache/tvm/issues/16139

   
   ### Expected behavior
   I want build a cuda exec lib and gen c and cuda code for ref.
   
   ### Actual behavior
   
![image](https://github.com/apache/tvm/assets/140134299/6ce6ee2b-7064-4be8-97b0-f4af68cdf005)
   cg.InitGlobalContext() may not be called
   
   ```
   RuntimeError: Compilation error:
   /tmp/tmpfrjmgdqq/lib0.c: In function ‘int32_t reduce_max(void*, int32_t*, 
int32_t, void*, int32_t*, void*)’:
   /tmp/tmpfrjmgdqq/lib0.c:41:34: error: ‘__tvm_module_ctx’ was not declared in 
this scope
      41 |     if (TVMBackendGetFuncFromEnv(__tvm_module_ctx, 
"__tvm_set_device", &__tvm_set_device_packed) != 0) {
         |                                  ^~~~~~~~~~~~~~~~
   /tmp/tmpfrjmgdqq/lib0.c:67:34: error: ‘__tvm_module_ctx’ was not declared in 
this scope
      67 |     if (TVMBackendGetFuncFromEnv(__tvm_module_ctx, 
"reduce_max_kernel", &reduce_max_kernel_packed) != 0) {
         |                                  ^~~~~~~~~~~~~~~~
   
   Command line: /usr/bin/g++ -shared -fPIC -o lib.so /tmp/tmpfrjmgdqq/lib0.c 
/tmp/tmpfrjmgdqq/devc.o -I/home/nio/data/Build/tvm/include 
-I/home/nio/data/Build/tvm/3rdparty/dlpack/include 
-I/home/nio/data/Build/tvm/3rdparty/dmlc-core/include
   ```
   
   ### Environment
   
   Ubuntu 2004, cuda 114, tvm main
   
   ### Steps to reproduce
   
   ```
   import tvm
   from tvm import te
   import numpy as np
   import os
   from tvm import topi
   from tvm import relay
   from tvm import auto_scheduler
   from tvm.relay.op.contrib.cudnn import _lower_conv2d
   
   
   os.environ["PATH"] = os.environ["PATH"]+":/usr/local/cuda/bin/"
   
   tgt_gpu = tvm.target.Target(target='cuda', host='c')
   
   rt = tvm.relay.backend.Runtime("cpp")
   def gen_reduce_max():
       n = te.var('n')
       A = te.placeholder((n, 2), name='A')
       B = te.compute((n, ), lambda i: te.max(A[i, 0], A[i, 1]))
       s = te.create_schedule(B.op)
       bx, tx = s[B].split(B.op.axis[0], factor=64)
       s[B].bind(bx, te.thread_axis("blockIdx.x"))
       s[B].bind(tx, te.thread_axis("threadIdx.x"))
       reduce_max = tvm.build(s, [A, B], target=tgt_gpu, name="reduce_max")
       return reduce_max
   if __name__ == "__main__":
       flib = gen_reduce_max()
       if (
           tgt_gpu.kind.name == "cuda"
       ):
   
           dev_module = flib.imported_modules[0]
           print("-----GPU code-----")
           print(dev_module.get_source())
           print("-----CPU code-----")
           print(flib.get_source())
       else:
           print(flib.get_source())
       flib.export_library("lib.so")
   ```
   
   ### In addition, the entry func not be found.
   ```
       raise AttributeError(f"Module has no function '{name}'")
   AttributeError: Module has no function '__tvm_main__'
   ```
   ```
    if (name == runtime::symbol::tvm_module_main) {
         const char* entry_name =
             reinterpret_cast<const 
char*>(lib_->GetSymbol(runtime::symbol::tvm_module_main));
         ICHECK(entry_name != nullptr)
             << "Symbol " << runtime::symbol::tvm_module_main << " is not 
presented";
         faddr = 
reinterpret_cast<TVMBackendPackedCFunc>(lib_->GetSymbol(entry_name));
   ```
   ```
   
__tvm_main__:��UH��H��0H�}�H�u��U�H�M�L�E�L�M�L�E�H�}�H�M��U�H�u�H�E�M��I��H����������H�H��
   ```
    may be my personal mistake, Thanks for your time.
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to