This is an automated email from the ASF dual-hosted git repository.
ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 36522b2e4c [FFI][Bugfix] Enable `load_inline` on macos (#18285)
36522b2e4c is described below
commit 36522b2e4c30ec6106ace79501e58622ec051ee8
Author: Yaoyao Ding <[email protected]>
AuthorDate: Mon Sep 8 22:19:34 2025 -0400
[FFI][Bugfix] Enable `load_inline` on macos (#18285)
This PR fix the bug to enable `tvm_ffi.cpp.load_inline` on macos.
We need to link the `libtvm_ffi.dylib` to the custom module.
---
ffi/python/tvm_ffi/cpp/load_inline.py | 11 ++++-------
ffi/tests/python/test_load_inline.py | 16 ----------------
2 files changed, 4 insertions(+), 23 deletions(-)
diff --git a/ffi/python/tvm_ffi/cpp/load_inline.py
b/ffi/python/tvm_ffi/cpp/load_inline.py
index 754a9d7465..111dee8d52 100644
--- a/ffi/python/tvm_ffi/cpp/load_inline.py
+++ b/ffi/python/tvm_ffi/cpp/load_inline.py
@@ -140,6 +140,9 @@ def _generate_ninja_build(
"""Generate the content of build.ninja for building the module."""
default_include_paths = [find_include_path(), find_dlpack_include_path()]
+ tvm_ffi_lib = find_libtvm_ffi()
+ tvm_ffi_lib_path = os.path.dirname(tvm_ffi_lib)
+ tvm_ffi_lib_name = os.path.splitext(os.path.basename(tvm_ffi_lib))[0]
if IS_WINDOWS:
default_cflags = [
"/std:c++17",
@@ -157,17 +160,11 @@ def _generate_ninja_build(
"/EHsc",
]
default_cuda_cflags = ["-Xcompiler", "/std:c++17", "/O2"]
- # Find the TVM FFI library for linking
- tvm_ffi_lib = find_libtvm_ffi()
- tvm_ffi_lib_path = os.path.dirname(tvm_ffi_lib)
- tvm_ffi_lib_name = os.path.splitext(os.path.basename(tvm_ffi_lib))[
- 0
- ] # Remove .dll extension
default_ldflags = ["/DLL", f"/LIBPATH:{tvm_ffi_lib_path}",
f"{tvm_ffi_lib_name}.lib"]
else:
default_cflags = ["-std=c++17", "-fPIC", "-O2"]
default_cuda_cflags = ["-Xcompiler", "-fPIC", "-std=c++17", "-O2"]
- default_ldflags = ["-shared"]
+ default_ldflags = ["-shared", "-L{}".format(tvm_ffi_lib_path),
"-ltvm_ffi"]
if with_cuda:
# determine the compute capability of the current GPU
diff --git a/ffi/tests/python/test_load_inline.py
b/ffi/tests/python/test_load_inline.py
index dbaf439408..6510cca540 100644
--- a/ffi/tests/python/test_load_inline.py
+++ b/ffi/tests/python/test_load_inline.py
@@ -28,10 +28,6 @@ import tvm_ffi.cpp
from tvm_ffi.module import Module
[email protected](
- not sys.platform.startswith("linux") and not
sys.platform.startswith("win32"),
- reason="need to support other platforms",
-)
def test_load_inline_cpp():
mod: Module = tvm_ffi.cpp.load_inline(
name="hello",
@@ -58,10 +54,6 @@ def test_load_inline_cpp():
numpy.testing.assert_equal(x + 1, y)
[email protected](
- not sys.platform.startswith("linux") and not
sys.platform.startswith("win32"),
- reason="need to support other platforms",
-)
def test_load_inline_cpp_with_docstrings():
mod: Module = tvm_ffi.cpp.load_inline(
name="hello",
@@ -88,10 +80,6 @@ def test_load_inline_cpp_with_docstrings():
numpy.testing.assert_equal(x + 1, y)
[email protected](
- not sys.platform.startswith("linux") and not
sys.platform.startswith("win32"),
- reason="need to support other platforms",
-)
def test_load_inline_cpp_multiple_sources():
mod: Module = tvm_ffi.cpp.load_inline(
name="hello",
@@ -134,10 +122,6 @@ def test_load_inline_cpp_multiple_sources():
numpy.testing.assert_equal(x + 1, y)
[email protected](
- not sys.platform.startswith("linux") and not
sys.platform.startswith("win32"),
- reason="need to support other platforms",
-)
def test_load_inline_cpp_build_dir():
mod: Module = tvm_ffi.cpp.load_inline(
name="hello",