abergeron opened a new issue #5704:
URL: https://github.com/apache/incubator-tvm/issues/5704


   To reproduce, build on macOS X with metal enabled (I use min-version=10.11, 
if that makes a difference), install the python modules and run the test with 
pytest.
   
   The error seems to complain about the "float44" dtype which I think doesn't 
exist.
   
   ```
   [23:06:17] 
/Users/anakha/miniconda/conda-bld/tvm-libs_1590891729956/work/src/runtime/metal/metal_device_api.mm:131:
 Intializing Metal device 0, name=AMD Radeon R9 M290
   ___________________________________ test_add 
___________________________________
   
       def test_add():
           def run(dtype):
               # graph
               n = te.size_var('n')
               A = te.placeholder((n,), name='A', dtype=dtype)
               B = te.placeholder((n,), name='B', dtype=dtype)
               bias = te.var("bias", dtype=dtype)
               scale = te.var("scale", dtype=dtype)
               C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
               # schedule
               s = te.create_schedule(C.op)
               # create iter var and assign them tags.
               num_thread = 16
               bx, x = s[C].split(C.op.axis[0], factor=num_thread*4)
               tx, x = s[C].split(x, nparts=num_thread)
               _, x = s[C].split(x, factor=4)
               s[C].bind(bx, te.thread_axis("blockIdx.x"))
               s[C].bind(tx, te.thread_axis("threadIdx.x"))
               s[C].vectorize(x)
       
               # one line to build the function.
               def check_device(device):
                   ctx = tvm.context(device, 0)
                   if not ctx.exist:
                       print("skip because %s is not enabled.." % device)
                       return
                   fadd = tvm.build(s, [A, B, C],
                                    device,
                                    name="myadd")
       
                   # launch the kernel.
                   n = 1024
                   a = tvm.nd.array((np.random.uniform(size=n) * 
256).astype(A.dtype), ctx)
                   b = tvm.nd.array((np.random.uniform(size=n) * 
256).astype(B.dtype), ctx)
                   c = tvm.nd.array(np.zeros(n, dtype=C.dtype), ctx)
                   ftimer = fadd.time_evaluator(fadd.entry_name, ctx, number=1)
                   tcost = ftimer(a, b, c).mean
                   tvm.testing.assert_allclose(
                       c.asnumpy(), a.asnumpy() + b.asnumpy(), rtol=1e-6)
       
               check_device("opencl")
               check_device("cuda")
               if dtype == "float32":
                   check_device("metal")
                   check_device("vulkan")
       
   >       run("float32")
   
   tests/python/integration/test_ewise.py:254: 
   _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
_ _ 
   tests/python/integration/test_ewise.py:251: in run
       check_device("metal")
   tests/python/integration/test_ewise.py:244: in check_device
       tcost = ftimer(a, b, c).mean
   
../_test_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_p/lib/python3.7/site-packages/tvm/runtime/module.py:215:
 in evaluator
       blob = feval(*args)
   tvm/_ffi/_cython/./packed_func.pxi:321: in 
tvm._ffi._cy3.core.PackedFuncBase.__call__
       ???
   tvm/_ffi/_cython/./packed_func.pxi:256: in tvm._ffi._cy3.core.FuncCall
       ???
   tvm/_ffi/_cython/./packed_func.pxi:245: in tvm._ffi._cy3.core.FuncCall3
       ???
   _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
_ _ 
   
   >   ???
   E   tvm._ffi.base.TVMError: Traceback (most recent call last):
   E     [bt] (3) 4   libtvm.dylib                        0x000000011d8f4f48 
TVMFuncCall + 72
   E     [bt] (2) 3   libtvm.dylib                        0x000000011d93c687 
std::__1::__function::__func<tvm::runtime::WrapTimeEvaluator(tvm::runtime::PackedFunc,
 DLContext, int, int, int)::$_1, 
std::__1::allocator<tvm::runtime::WrapTimeEvaluator(tvm::runtime::PackedFunc, 
DLContext, int, int, int)::$_1>, void (tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)>::operator()(tvm::runtime::TVMArgs&&, 
tvm::runtime::TVMRetValue*&&) + 359
   E     [bt] (1) 2   libtvm.dylib                        0x000000011d900103 
std::__1::__function::__func<tvm::runtime::WrapPackedFunc(int (*)(TVMValue*, 
int*, int, TVMValue*, int*), tvm::runtime::ObjectPtr<tvm::runtime::Object> 
const&)::$_0, std::__1::allocator<tvm::runtime::WrapPackedFunc(int 
(*)(TVMValue*, int*, int, TVMValue*, int*), 
tvm::runtime::ObjectPtr<tvm::runtime::Object> const&)::$_0>, void 
(tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)>::operator()(tvm::runtime::TVMArgs&&, 
tvm::runtime::TVMRetValue*&&) + 259
   E     [bt] (0) 1   libtvm.dylib                        0x000000011ceb84bf 
dmlc::LogMessageFatal::~LogMessageFatal() + 111
   E     [bt] (7) 8   ???                                 0x0000001a26946585 
0x0 + 112316409221
   E     [bt] (6) 7   libtvm.dylib                        0x000000011d8f4be4 
TVMBackendGetFuncFromEnv + 164
   E     [bt] (5) 6   libtvm.dylib                        0x000000011d901d77 
tvm::runtime::ModuleNode::GetFuncFromEnv(std::__1::basic_string<char, 
std::__1::char_traits<char>, std::__1::allocator<char> > const&) + 231
   E     [bt] (4) 5   libtvm.dylib                        0x000000011d901466 
tvm::runtime::ModuleNode::GetFunction(std::__1::basic_string<char, 
std::__1::char_traits<char>, std::__1::allocator<char> > const&, bool) + 86
   E     [bt] (3) 4   libtvm.dylib                        0x000000011d95522f 
tvm::runtime::MetalModuleNode::GetFunction(std::__1::basic_string<char, 
std::__1::char_traits<char>, std::__1::allocator<char> > const&, 
tvm::runtime::ObjectPtr<tvm::runtime::Object> const&) + 655
   E     [bt] (2) 3   libtvm.dylib                        0x000000011d955c18 
tvm::runtime::MetalWrappedFunc::Init(tvm::runtime::MetalModuleNode*, 
tvm::runtime::ObjectPtr<tvm::runtime::Object>, std::__1::basic_string<char, 
std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned long, 
unsigned long, std::__1::vector<std::__1::basic_string<char, 
std::__1::char_traits<char>, std::__1::allocator<char> >, 
std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, 
std::__1::allocator<char> > > > const&) + 184
   E     [bt] (1) 2   libtvm.dylib                        0x000000011d9572ca 
tvm::runtime::MetalModuleNode::GetPipelineState(unsigned long, 
std::__1::basic_string<char, std::__1::char_traits<char>, 
std::__1::allocator<char> > const&) + 1146
   E     [bt] (0) 1   libtvm.dylib                        0x000000011ceb84bf 
dmlc::LogMessageFatal::~LogMessageFatal() + 111
   E     File 
"/Users/anakha/miniconda/conda-bld/tvm-libs_1590891729956/work/src/runtime/metal/metal_module.mm",
 line 101
   E     File 
"/Users/anakha/miniconda/conda-bld/tvm-libs_1590891729956/work/src/runtime/library_module.cc",
 line 78
   E   TVMError: Check failed: ret == 0 (-1 vs. 0) : Fail to compile metal 
lib:Compilation failed: 
   E   program_source:26:21: error: use of undeclared identifier 'float44'
   E         float4 _4 = ((float44)(((device float*)A)[_2[0]],((device 
float*)A)[_2[1]],((device float*)A)[_2[2]],((device float*)A)[_2[3]])) + 
((float44)(((device float*)B)[_3[0]],((device float*)B)[_3[1]],((device 
float*)B)[_3[2]],((device float*)B)[_3[3]]));
   E                       ^
   E   program_source:26:140: error: use of undeclared identifier 'float44'
   E         float4 _4 = ((float44)(((device float*)A)[_2[0]],((device 
float*)A)[_2[1]],((device float*)A)[_2[2]],((device float*)A)[_2[3]])) + 
((float44)(((device float*)B)[_3[0]],((device float*)B)[_3[1]],((device 
float*)B)[_3[2]],((device float*)B)[_3[3]]));
   E                                                                            
                                                                  ^
   
   tvm/_ffi/_cython/./base.pxi:160: TVMError
   ```
   


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to