This is an automated email from the ASF dual-hosted git repository.
junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new f9e6018cfe [Runtime] Make `export_library` parameters after
`file_name` keyword-only (#15658)
f9e6018cfe is described below
commit f9e6018cfe624670248a85ebc5a558fc20876d1c
Author: Krzysztof Parzyszek <[email protected]>
AuthorDate: Sat Sep 2 01:45:46 2023 -0500
[Runtime] Make `export_library` parameters after `file_name` keyword-only
(#15658)
This makes the code a bit more readable at a little cost.
---
apps/android_camera/models/prepare_model.py | 2 +-
apps/android_rpc/tests/android_rpc_test.py | 6 +++---
apps/benchmark/adreno/adreno_gpu_bench_clml.py | 2 +-
apps/benchmark/adreno/adreno_gpu_bench_texture.py | 2 +-
apps/benchmark/arm_cpu_imagenet_bench.py | 2 +-
apps/benchmark/mobile_gpu_imagenet_bench.py | 2 +-
apps/hexagon_launcher/README.md | 2 +-
apps/ios_rpc/tests/ios_rpc_mobilenet.py | 2 +-
apps/ios_rpc/tests/ios_rpc_test.py | 4 ++--
apps/topi_recipe/gemm/android_gemm_square.py | 2 +-
docs/how_to/deploy/android.rst | 2 +-
gallery/how_to/deploy_models/deploy_model_on_adreno.py | 2 +-
gallery/how_to/deploy_models/deploy_model_on_android.py | 2 +-
gallery/how_to/tune_with_autoscheduler/tune_network_arm.py | 2 +-
gallery/how_to/tune_with_autoscheduler/tune_network_mali.py | 2 +-
gallery/how_to/tune_with_autotvm/tune_relay_arm.py | 2 +-
gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py | 2 +-
python/tvm/auto_scheduler/measure.py | 2 +-
python/tvm/autotvm/measure/measure_methods.py | 2 +-
python/tvm/contrib/cc.py | 4 ++--
python/tvm/contrib/pipeline_executor_build.py | 2 +-
python/tvm/driver/tvmc/model.py | 5 +++--
python/tvm/meta_schedule/builder/local_builder.py | 2 +-
python/tvm/meta_schedule/testing/custom_builder_runner.py | 2 +-
python/tvm/relay/backend/executor_factory.py | 4 ++--
python/tvm/runtime/module.py | 4 +++-
python/tvm/testing/runner.py | 2 +-
tests/python/relay/opencl_texture/utils/adreno_utils.py | 4 ++--
tests/python/unittest/test_runtime_module_load.py | 2 +-
tests/python/unittest/test_runtime_rpc.py | 2 +-
tests/python/unittest/test_target_codegen_blob.py | 4 ++--
web/tests/python/prepare_test_libs.py | 2 +-
web/tests/python/webgpu_rpc_test.py | 2 +-
web/tests/python/websock_rpc_test.py | 2 +-
34 files changed, 45 insertions(+), 42 deletions(-)
diff --git a/apps/android_camera/models/prepare_model.py
b/apps/android_camera/models/prepare_model.py
index 4e127c0129..9f2cbbdd6d 100644
--- a/apps/android_camera/models/prepare_model.py
+++ b/apps/android_camera/models/prepare_model.py
@@ -100,7 +100,7 @@ def main(model_str, output_path):
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(net, tvm.target.Target(target,
target_host), params=params)
print("dumping lib...")
- lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so",
ndk.create_shared)
+ lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so",
fcompile=ndk.create_shared)
print("dumping graph...")
with open(output_path_str + "/" + "deploy_graph.json", "w") as f:
f.write(graph)
diff --git a/apps/android_rpc/tests/android_rpc_test.py
b/apps/android_rpc/tests/android_rpc_test.py
index eac77c3133..0027cc4ba3 100644
--- a/apps/android_rpc/tests/android_rpc_test.py
+++ b/apps/android_rpc/tests/android_rpc_test.py
@@ -64,7 +64,7 @@ def test_rpc_module():
s[B].pragma(xi, "parallel_barrier_when_finish")
f = tvm.build(s, [A, B], target, name="myadd_cpu")
path_dso_cpu = temp.relpath("cpu_lib.so")
- f.export_library(path_dso_cpu, ndk.create_shared)
+ f.export_library(path_dso_cpu, fcompile=ndk.create_shared)
# Execute the portable graph on cpu target
print("Run CPU test ...")
@@ -88,7 +88,7 @@ def test_rpc_module():
# If we don't want to do metal and only use cpu, just set target to be
target
f = tvm.build(s, [A, B], tvm.target.Target("opencl", host=target),
name="myadd")
path_dso_cl = temp.relpath("dev_lib_cl.so")
- f.export_library(path_dso_cl, ndk.create_shared)
+ f.export_library(path_dso_cl, fcompile=ndk.create_shared)
print("Run GPU(OpenCL Flavor) test ...")
dev = remote.cl(0)
@@ -111,7 +111,7 @@ def test_rpc_module():
# If we don't want to do metal and only use cpu, just set target to be
target
f = tvm.build(s, [A, B], tvm.target.Target("vulkan", host=target),
name="myadd")
path_dso_vulkan = temp.relpath("dev_lib_vulkan.so")
- f.export_library(path_dso_vulkan, ndk.create_shared)
+ f.export_library(path_dso_vulkan, fcompile=ndk.create_shared)
print("Run GPU(Vulkan Flavor) test ...")
dev = remote.vulkan(0)
diff --git a/apps/benchmark/adreno/adreno_gpu_bench_clml.py
b/apps/benchmark/adreno/adreno_gpu_bench_clml.py
index ee2cd82df8..e045f60c3a 100755
--- a/apps/benchmark/adreno/adreno_gpu_bench_clml.py
+++ b/apps/benchmark/adreno/adreno_gpu_bench_clml.py
@@ -224,7 +224,7 @@ def evaluate_network(network, target, target_host, dtype,
repeat):
tmp = tempdir()
filename = "%s.so" % network
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)
# upload library and params
print_progress("%-20s uploading..." % network)
diff --git a/apps/benchmark/adreno/adreno_gpu_bench_texture.py
b/apps/benchmark/adreno/adreno_gpu_bench_texture.py
index bf2c69f61e..7b10111186 100755
--- a/apps/benchmark/adreno/adreno_gpu_bench_texture.py
+++ b/apps/benchmark/adreno/adreno_gpu_bench_texture.py
@@ -220,7 +220,7 @@ def evaluate_network(network, target, target_host, dtype,
repeat):
tmp = tempdir()
filename = "%s.so" % network
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)
# upload library and params
print_progress("%-20s uploading..." % network)
diff --git a/apps/benchmark/arm_cpu_imagenet_bench.py
b/apps/benchmark/arm_cpu_imagenet_bench.py
index dd89f0562b..c618a89c8f 100644
--- a/apps/benchmark/arm_cpu_imagenet_bench.py
+++ b/apps/benchmark/arm_cpu_imagenet_bench.py
@@ -47,7 +47,7 @@ def evaluate_network(network, target, target_host, repeat):
from tvm.contrib import ndk
filename = "%s.so" % network
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)
else:
filename = "%s.tar" % network
lib.export_library(tmp.relpath(filename))
diff --git a/apps/benchmark/mobile_gpu_imagenet_bench.py
b/apps/benchmark/mobile_gpu_imagenet_bench.py
index 295b0c0eb8..83a6e0b109 100644
--- a/apps/benchmark/mobile_gpu_imagenet_bench.py
+++ b/apps/benchmark/mobile_gpu_imagenet_bench.py
@@ -47,7 +47,7 @@ def evaluate_network(network, target, target_host, dtype,
repeat):
from tvm.contrib import ndk
filename = "%s.so" % network
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)
else:
filename = "%s.tar" % network
lib.export_library(tmp.relpath(filename))
diff --git a/apps/hexagon_launcher/README.md b/apps/hexagon_launcher/README.md
index 8a1db1bc2b..69d9fdc98a 100644
--- a/apps/hexagon_launcher/README.md
+++ b/apps/hexagon_launcher/README.md
@@ -186,7 +186,7 @@ lowered = tvm.relay.build(
executor=Executor("aot", {"unpacked-api": False, "interface-api":
"packed"}),
)
-lowered.export_library("model-aot.so", tvm.contrib.hexagon.link_shared)
+lowered.export_library("model-aot.so",
fcompile=tvm.contrib.hexagon.link_shared)
```
diff --git a/apps/ios_rpc/tests/ios_rpc_mobilenet.py
b/apps/ios_rpc/tests/ios_rpc_mobilenet.py
index 0c958a07d8..1872cf6787 100644
--- a/apps/ios_rpc/tests/ios_rpc_mobilenet.py
+++ b/apps/ios_rpc/tests/ios_rpc_mobilenet.py
@@ -98,7 +98,7 @@ def test_mobilenet(host, port, key, mode):
mod, target=tvm.target.Target(target, host=target_host),
params=params
)
path_dso = temp.relpath("deploy.dylib")
- lib.export_library(path_dso, xcode.create_dylib, arch=arch, sdk=sdk)
+ lib.export_library(path_dso, fcompile=xcode.create_dylib, arch=arch,
sdk=sdk)
# connect to the proxy
if mode == "tracker":
diff --git a/apps/ios_rpc/tests/ios_rpc_test.py
b/apps/ios_rpc/tests/ios_rpc_test.py
index 78f1d3a6c5..f0c31cd7d2 100644
--- a/apps/ios_rpc/tests/ios_rpc_test.py
+++ b/apps/ios_rpc/tests/ios_rpc_test.py
@@ -58,7 +58,7 @@ def test_rpc_module(host, port, key, mode):
# If we don't want to do metal and only use cpu, just set target to be
target
f = tvm.build(s, [A, B], tvm.target.Target("metal", host=target),
name="myadd")
path_dso1 = temp.relpath("dev_lib.dylib")
- f.export_library(path_dso1, xcode.create_dylib, arch=arch, sdk=sdk)
+ f.export_library(path_dso1, fcompile=xcode.create_dylib, arch=arch,
sdk=sdk)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
@@ -67,7 +67,7 @@ def test_rpc_module(host, port, key, mode):
s[B].pragma(xi, "parallel_barrier_when_finish")
f = tvm.build(s, [A, B], target, name="myadd_cpu")
path_dso2 = temp.relpath("cpu_lib.dylib")
- f.export_library(path_dso2, xcode.create_dylib, arch=arch, sdk=sdk)
+ f.export_library(path_dso2, fcompile=xcode.create_dylib, arch=arch,
sdk=sdk)
# connect to the proxy
if mode == "tracker":
diff --git a/apps/topi_recipe/gemm/android_gemm_square.py
b/apps/topi_recipe/gemm/android_gemm_square.py
index 5f13d88707..8f957ef7ed 100644
--- a/apps/topi_recipe/gemm/android_gemm_square.py
+++ b/apps/topi_recipe/gemm/android_gemm_square.py
@@ -123,7 +123,7 @@ def test_gemm_gpu(N, times, bn, num_block, num_thread):
f = tvm.build(s, [A, B, C], tvm.target.Target("opencl", host=target),
name="gemm_gpu")
temp = utils.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
- f.export_library(path_dso, ndk.create_shared)
+ f.export_library(path_dso, fcompile=ndk.create_shared)
# connect to the proxy
remote = rpc.connect(proxy_host, proxy_port, key=key)
diff --git a/docs/how_to/deploy/android.rst b/docs/how_to/deploy/android.rst
index 256978d006..2f54697401 100644
--- a/docs/how_to/deploy/android.rst
+++ b/docs/how_to/deploy/android.rst
@@ -27,7 +27,7 @@ The code below will save the compilation output which is
required on android tar
.. code:: python
- lib.export_library("deploy_lib.so", ndk.create_shared)
+ lib.export_library("deploy_lib.so", fcompile=ndk.create_shared)
with open("deploy_graph.json", "w") as fo:
fo.write(graph.json())
with open("deploy_param.params", "wb") as fo:
diff --git a/gallery/how_to/deploy_models/deploy_model_on_adreno.py
b/gallery/how_to/deploy_models/deploy_model_on_adreno.py
index d483fe49b0..0ea76bb288 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_adreno.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_adreno.py
@@ -420,7 +420,7 @@ temp = utils.tempdir()
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
fcompile = ndk.create_shared if not local_demo else None
-lib.export_library(dso_binary_path, fcompile)
+lib.export_library(dso_binary_path, fcompile=fcompile)
remote_path = "/data/local/tmp/" + dso_binary
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
diff --git a/gallery/how_to/deploy_models/deploy_model_on_android.py
b/gallery/how_to/deploy_models/deploy_model_on_android.py
index 2e5d916cd6..6ed69e64d4 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_android.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_android.py
@@ -281,7 +281,7 @@ with tvm.transform.PassContext(opt_level=3):
tmp = utils.tempdir()
lib_fname = tmp.relpath("net.so")
fcompile = ndk.create_shared if not local_demo else None
-lib.export_library(lib_fname, fcompile)
+lib.export_library(lib_fname, fcompile=fcompile)
######################################################################
# Deploy the Model Remotely by RPC
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
index a109acba06..adc9c9fbb2 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
@@ -331,7 +331,7 @@ def tune_and_evaluate():
from tvm.contrib import ndk
filename = "net.so"
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
index e72e261e4b..ab754ea30f 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
@@ -255,7 +255,7 @@ def tune_and_evaluate():
temp = utils.tempdir()
filename = "deploy_lib.so"
path_lib = temp.relpath(filename)
- lib.export_library(path_lib, ndk.create_shared)
+ lib.export_library(path_lib, fcompile=ndk.create_shared)
remote.upload(path_lib)
loaded_lib = remote.load_module(filename)
module = graph_executor.GraphModule(loaded_lib["default"](dev))
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
index 3e5d7551e8..4f2e952ce3 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
@@ -357,7 +357,7 @@ def tune_and_evaluate(tuning_opt):
from tvm.contrib import ndk
filename = "net.so"
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename),
fcompile=ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
index 09bc046836..3c2f173c23 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
@@ -356,7 +356,7 @@ def tune_and_evaluate(tuning_opt):
from tvm.contrib import ndk
filename = "net.so"
- lib.export_library(tmp.relpath(filename), ndk.create_shared)
+ lib.export_library(tmp.relpath(filename),
fcompile=ndk.create_shared)
else:
filename = "net.tar"
lib.export_library(tmp.relpath(filename))
diff --git a/python/tvm/auto_scheduler/measure.py
b/python/tvm/auto_scheduler/measure.py
index 64da59031c..fa5f06c38f 100644
--- a/python/tvm/auto_scheduler/measure.py
+++ b/python/tvm/auto_scheduler/measure.py
@@ -631,7 +631,7 @@ def _local_build_worker(inp_serialized, build_func,
verbose):
try:
with transform.PassContext().current():
func = build_module.build(sch, args, target=task.target)
- func.export_library(filename, build_func)
+ func.export_library(filename, fcompile=build_func)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.COMPILE_HOST
diff --git a/python/tvm/autotvm/measure/measure_methods.py
b/python/tvm/autotvm/measure/measure_methods.py
index eee15d8d87..c57ebfc88b 100644
--- a/python/tvm/autotvm/measure/measure_methods.py
+++ b/python/tvm/autotvm/measure/measure_methods.py
@@ -571,7 +571,7 @@ class _WrappedBuildFunc:
raise ImportError("Requires USE_MICRO")
micro.export_model_library_format(func, filename)
else:
- func.export_library(filename, self.build_func)
+ func.export_library(filename, fcompile=self.build_func)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
return BuildResult(None, None, (tb, e), time.time() - tic)
diff --git a/python/tvm/contrib/cc.py b/python/tvm/contrib/cc.py
index b69c925411..ad6a82c49c 100644
--- a/python/tvm/contrib/cc.py
+++ b/python/tvm/contrib/cc.py
@@ -235,12 +235,12 @@ def cross_compiler(
# export using arm gcc
mod = build_runtime_module()
mod.export_library(path_dso,
- cc.cross_compiler("arm-linux-gnueabihf-gcc"))
+
fcompile=cc.cross_compiler("arm-linux-gnueabihf-gcc"))
# specialize ndk compilation options.
specialized_ndk = cc.cross_compiler(
ndk.create_shared,
["--sysroot=/path/to/sysroot", "-shared", "-fPIC", "-lm"])
- mod.export_library(path_dso, specialized_ndk)
+ mod.export_library(path_dso, fcompile=specialized_ndk)
"""
base_options = [] if options is None else options
kwargs = {}
diff --git a/python/tvm/contrib/pipeline_executor_build.py
b/python/tvm/contrib/pipeline_executor_build.py
index 8ea70f670a..9a16d1b7af 100644
--- a/python/tvm/contrib/pipeline_executor_build.py
+++ b/python/tvm/contrib/pipeline_executor_build.py
@@ -146,7 +146,7 @@ def export_library(factory, directory_path):
# Get the graph, lib, and parameters from GraphExecutorFactoryModule.
lib = factory.pipeline_mods[lib_index]["lib"]
# Export the lib, graph, and parameters to disk.
- lib.export_library(mconfig["lib_name"], fcompile)
+ lib.export_library(mconfig["lib_name"], fcompile=fcompile)
with open(mconfig["json_name"], "w") as file_handle:
file_handle.write(lib.graph_json)
with open(mconfig["params_name"], "wb") as file_handle:
diff --git a/python/tvm/driver/tvmc/model.py b/python/tvm/driver/tvmc/model.py
index edf107c184..f39aefdc92 100644
--- a/python/tvm/driver/tvmc/model.py
+++ b/python/tvm/driver/tvmc/model.py
@@ -263,11 +263,12 @@ class TVMCModel(object):
else:
if not cross_options:
executor_factory.get_lib().export_library(
- path_lib, tvm.contrib.cc.cross_compiler(cross)
+ path_lib, fcompile=tvm.contrib.cc.cross_compiler(cross)
)
else:
executor_factory.get_lib().export_library(
- path_lib, tvm.contrib.cc.cross_compiler(cross,
options=cross_options.split(" "))
+ path_lib,
+ fcompile=tvm.contrib.cc.cross_compiler(cross,
options=cross_options.split(" ")),
)
self.lib_path = path_lib
diff --git a/python/tvm/meta_schedule/builder/local_builder.py
b/python/tvm/meta_schedule/builder/local_builder.py
index 3ddca032ef..e95459e816 100644
--- a/python/tvm/meta_schedule/builder/local_builder.py
+++ b/python/tvm/meta_schedule/builder/local_builder.py
@@ -278,5 +278,5 @@ def default_export(mod: Module) -> str:
from tvm.contrib.tar import tar # pylint: disable=import-outside-toplevel
artifact_path = os.path.join(tempfile.mkdtemp(), "tvm_tmp_mod." +
tar.output_format)
- mod.export_library(artifact_path, tar)
+ mod.export_library(artifact_path, fcompile=tar)
return artifact_path
diff --git a/python/tvm/meta_schedule/testing/custom_builder_runner.py
b/python/tvm/meta_schedule/testing/custom_builder_runner.py
index 7129546dd8..0e8ee435f7 100644
--- a/python/tvm/meta_schedule/testing/custom_builder_runner.py
+++ b/python/tvm/meta_schedule/testing/custom_builder_runner.py
@@ -161,7 +161,7 @@ def run_module_via_rpc(
filename = os.path.join(tmp_dir, "tvm_tmp_mod." + tar.output_format)
if backend == "vm":
code, lib = lib.save()
- lib.export_library(filename, tar)
+ lib.export_library(filename, fcompile=tar)
session = rpc_config.connect_server()
session.upload(filename)
_, filename = os.path.split(filename)
diff --git a/python/tvm/relay/backend/executor_factory.py
b/python/tvm/relay/backend/executor_factory.py
index eee3169400..dd77a3f9bd 100644
--- a/python/tvm/relay/backend/executor_factory.py
+++ b/python/tvm/relay/backend/executor_factory.py
@@ -142,7 +142,7 @@ class AOTExecutorFactoryModule(ExecutorFactoryModule):
return self.lib
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
- return self.module.export_library(file_name, fcompile, addons,
**kwargs)
+ return self.module.export_library(file_name, fcompile=fcompile,
addons=addons, **kwargs)
class GraphExecutorFactoryModule(ExecutorFactoryModule):
@@ -200,7 +200,7 @@ class GraphExecutorFactoryModule(ExecutorFactoryModule):
self.function_metadata = function_metadata
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
- return self.module.export_library(file_name, fcompile, addons,
**kwargs)
+ return self.module.export_library(file_name, fcompile=fcompile,
addons=addons, **kwargs)
def get_devices(self):
return []
diff --git a/python/tvm/runtime/module.py b/python/tvm/runtime/module.py
index 2a1db2cbb2..15c2a5a258 100644
--- a/python/tvm/runtime/module.py
+++ b/python/tvm/runtime/module.py
@@ -438,7 +438,9 @@ class Module(object):
def _collect_dso_modules(self):
return self._collect_from_import_tree(lambda m: m.is_dso_exportable)
- def export_library(self, file_name, fcompile=None, addons=None,
workspace_dir=None, **kwargs):
+ def export_library(
+ self, file_name, *, fcompile=None, addons=None, workspace_dir=None,
**kwargs
+ ):
"""
Export the module and all imported modules into a single device
library.
diff --git a/python/tvm/testing/runner.py b/python/tvm/testing/runner.py
index 03533ba167..a4615f7a46 100644
--- a/python/tvm/testing/runner.py
+++ b/python/tvm/testing/runner.py
@@ -61,7 +61,7 @@ def _normalize_export_func(export_func, output_format) ->
Tuple[Callable, str]:
from tvm.contrib import ndk, tar
def export_with(func):
- return lambda mod, path: mod.export_library(path, func)
+ return lambda mod, path: mod.export_library(path, fcompile=func)
if export_func == "tar":
export_func = export_with(tar.tar)
diff --git a/tests/python/relay/opencl_texture/utils/adreno_utils.py
b/tests/python/relay/opencl_texture/utils/adreno_utils.py
index de325d822c..309243df16 100644
--- a/tests/python/relay/opencl_texture/utils/adreno_utils.py
+++ b/tests/python/relay/opencl_texture/utils/adreno_utils.py
@@ -103,7 +103,7 @@ def build_run_compare(
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
ctx = remote.cl(0)
- lib.export_library(dso_binary_path, ndk.create_shared)
+ lib.export_library(dso_binary_path, fcompile=ndk.create_shared)
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
m = graph_runtime.create(graph, rlib, ctx)
@@ -183,7 +183,7 @@ def build_run_compare_vm(
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
dev = remote.cl(0)
- vmc.mod.export_library(dso_binary_path, ndk.create_shared)
+ vmc.mod.export_library(dso_binary_path, fcompile=ndk.create_shared)
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
vm = VirtualMachine(rlib, dev, "naive")
diff --git a/tests/python/unittest/test_runtime_module_load.py
b/tests/python/unittest/test_runtime_module_load.py
index 31e0faf0d4..ecaa7067a5 100644
--- a/tests/python/unittest/test_runtime_module_load.py
+++ b/tests/python/unittest/test_runtime_module_load.py
@@ -125,7 +125,7 @@ def test_device_module_dump():
path_dso = temp.relpath("dev_lib.so")
# test cross compiler function
- f.export_library(path_dso, cc.cross_compiler("g++"))
+ f.export_library(path_dso, fcompile=cc.cross_compiler("g++"))
def popen_check():
import tvm
diff --git a/tests/python/unittest/test_runtime_rpc.py
b/tests/python/unittest/test_runtime_rpc.py
index de441948b1..9591e3ea4d 100644
--- a/tests/python/unittest/test_runtime_rpc.py
+++ b/tests/python/unittest/test_runtime_rpc.py
@@ -294,7 +294,7 @@ def test_rpc_remote_module():
runtime = Runtime("cpp", {"system-lib": True})
f = tvm.build(s, [A, B], "llvm", name="myadd", runtime=runtime)
path_minrpc = temp.relpath("dev_lib.minrpc")
- f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
+ f.export_library(path_minrpc,
fcompile=rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
diff --git a/tests/python/unittest/test_target_codegen_blob.py
b/tests/python/unittest/test_target_codegen_blob.py
index 2848c875a4..5266f481f5 100644
--- a/tests/python/unittest/test_target_codegen_blob.py
+++ b/tests/python/unittest/test_target_codegen_blob.py
@@ -106,8 +106,8 @@ def test_cuda_multi_lib():
pathAll = temp.relpath("libAll.a")
path_dso = temp.relpath("mylib.so")
- libA.export_library(pathA, tar.tar)
- libB.export_library(pathB, tar.tar)
+ libA.export_library(pathA, fcompile=tar.tar)
+ libB.export_library(pathB, fcompile=tar.tar)
cc.create_staticlib(pathAll, [pathA, pathB])
# package two static libs together
cc.create_shared(path_dso, ["-Wl,--whole-archive", pathAll,
"-Wl,--no-whole-archive"])
diff --git a/web/tests/python/prepare_test_libs.py
b/web/tests/python/prepare_test_libs.py
index 5c1f7c68c4..0c34a28828 100644
--- a/web/tests/python/prepare_test_libs.py
+++ b/web/tests/python/prepare_test_libs.py
@@ -35,7 +35,7 @@ def prepare_test_libs(base_path):
fadd = tvm.build(s, [A, B], target, runtime=runtime, name="add_one")
wasm_path = os.path.join(base_path, "test_addone.wasm")
- fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
+ fadd.export_library(wasm_path, fcompile=emcc.create_tvmjs_wasm)
if __name__ == "__main__":
diff --git a/web/tests/python/webgpu_rpc_test.py
b/web/tests/python/webgpu_rpc_test.py
index 6e34a8a2b3..6eced5bd32 100644
--- a/web/tests/python/webgpu_rpc_test.py
+++ b/web/tests/python/webgpu_rpc_test.py
@@ -52,7 +52,7 @@ def test_rpc():
temp = utils.tempdir()
wasm_path = temp.relpath("addone_gpu.wasm")
- fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
+ fadd.export_library(wasm_path, fcompile=emcc.create_tvmjs_wasm)
wasm_binary = open(wasm_path, "rb").read()
remote = rpc.connect(
diff --git a/web/tests/python/websock_rpc_test.py
b/web/tests/python/websock_rpc_test.py
index 7de5ee956e..66fc41bab1 100644
--- a/web/tests/python/websock_rpc_test.py
+++ b/web/tests/python/websock_rpc_test.py
@@ -48,7 +48,7 @@ def test_rpc():
temp = utils.tempdir()
wasm_path = temp.relpath("addone.wasm")
- fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
+ fadd.export_library(wasm_path, fcompile=emcc.create_tvmjs_wasm)
wasm_binary = open(wasm_path, "rb").read()