tkonolige commented on a change in pull request #8715:
URL: https://github.com/apache/tvm/pull/8715#discussion_r687908021
##########
File path: apps/bundle_deploy/crt_config/crt_config.h
##########
@@ -43,7 +43,7 @@
#define TVM_CRT_MAX_REGISTERED_MODULES 2
/*! Size of the global function registry, in bytes. */
-#define TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES 200
+#define TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES 512
Review comment:
I'm not familiar with this registry. Why does the size need to be bumped?
##########
File path: python/tvm/support.py
##########
@@ -29,6 +33,10 @@ def libinfo():
info: Dict[str, str]
The dictionary of compile-time info.
"""
+ local_dict = globals()
+ if "GetLibInfo" not in local_dict:
+ raise LibInfoUnavailableError()
+
Review comment:
I'd prefer checking this with `tvm.get_global_func("GetLibInfo")`. Also,
how about we return None instead of throwing an exception? Or we could return
an empty dict? (I think returning an empty dict would be the best option)
##########
File path: apps/microtvm/pyproject.toml
##########
@@ -111,6 +111,8 @@ tensorflow-estimator = {version = "^2.1", optional = true}
# TFLite frontend
tflite = {version = "2.1.0", optional = true}
wheel = "*"
+cloudpickle = "^1.6.0"
+colorama = "^0.4.4"
Review comment:
What is the reason for this dependency?
##########
File path: python/tvm/micro/build.py
##########
@@ -57,3 +61,57 @@ def get_standalone_crt_dir() -> str:
raise CrtNotFoundError()
return STANDALONE_CRT_DIR
+
+
+def autotvm_module_loader(
+ template_project_dir: str,
+ project_options: dict = None,
+):
+ """Configure a new adapter.
+
+ Parameters
+ ----------
+ template_project_dir: str
+ Path to the template project directory on the runner.
+
+ project_options : dict
+ Opt
+ compiler options specific to this build.
+
+ workspace_kw : Optional[dict]
+ Keyword args passed to the Workspace constructor.
+ """
+ if isinstance(template_project_dir, pathlib.Path):
+ template_project_dir = str(template_project_dir)
+ elif not isinstance(template_project_dir, str):
+ raise TypeError(f"Incorrect type {type(template_project_dir)}.")
+
+ @contextlib.contextmanager
+ def module_loader(remote_kw, build_result):
+ with open(build_result.filename, "rb") as build_file:
+ build_result_bin = build_file.read()
+
+ tracker = _rpc.connect_tracker(remote_kw["host"], remote_kw["port"])
+ remote = tracker.request(
+ remote_kw["device_key"],
+ priority=remote_kw["priority"],
+ session_timeout=remote_kw["timeout"],
+ session_constructor_args=[
+ "tvm.micro.compile_and_create_micro_session",
+ build_result_bin,
+ template_project_dir,
+ json.dumps(project_options),
+ ],
+ )
+ system_lib = remote.get_function("runtime.SystemLib")()
+ yield remote, system_lib
Review comment:
This function is used with multiprocessing. On certain platforms you
cannot pass local functions across the process boundary, so this needs to be
either a class or a top-level function.
##########
File path: python/tvm/micro/session.py
##########
@@ -234,3 +237,69 @@ def create_local_debug_executor(graph_json_str, mod,
device, dump_root=None):
graph_json_str,
dump_root=dump_root,
)
+
+
+RPC_SESSION = None
+
+
+@register_func("tvm.micro.compile_and_create_micro_session")
+def compile_and_create_micro_session(
+ mod_src_bytes: bytes,
+ template_project_dir: str,
+ project_options: dict = None,
+):
+ """Compile the given libraries and sources into a MicroBinary, then invoke
create_micro_session.
+
+ Parameters
+ ----------
+ mod_src_bytes : bytes
+ The content of a tarfile which contains the TVM-generated sources
which together form the
+ SystemLib. This tar is expected to be created by export_library. The
tar will be extracted
+ into a directory and the sources compiled into a MicroLibrary using
the Compiler.
+
+ template_project_dir: str
+ The path to a template microTVM Project API project which is used to
generate the embedded
+ project that is built and flashed onto the target device.
+
+ project_options: dict
+ Options for the microTVM API Server contained in template_project_dir.
+ """
+ global RPC_SESSION
Review comment:
I believe you can avoid the global here by returning an object whose
destructor closes the session. Is there a reason this would not work?
##########
File path: tests/micro/zephyr/test_zephyr.py
##########
@@ -400,5 +396,137 @@ def test_tensors(sess):
test_tensors(sess)
+def _get_conv2d_model(input_shape: tuple):
+ """Build a conv2d operator in Keras and returns an (IRModule,
parameters)"""
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=input_shape))
+ model.build()
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ return mod, params
+
+
[email protected]_micro
+def test_autotune_conv2d(temp_dir, platform, west_cmd, tvm_debug):
+ """Test AutoTune for microTVM Zephyr"""
+ model, zephyr_board = PLATFORMS[platform]
+
+ sample = np.random.rand(1, 3, 16, 16)
+ mod, params = _get_conv2d_model((16, 16, 3))
+
+ target = tvm.target.target.micro(model)
+ pass_context = tvm.transform.PassContext(opt_level=3,
config={"tir.disable_vectorize": True})
+ with pass_context:
+ tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
+ assert len(tasks) > 0
+
+ repo_root = pathlib.Path(
+ subprocess.check_output(["git", "rev-parse", "--show-toplevel"],
encoding="utf-8").strip()
+ )
+ module_loader = tvm.micro.autotvm_module_loader(
+ template_project_dir=repo_root / "apps" / "microtvm" / "zephyr" /
"template_project",
+ project_options={
+ "zephyr_board": zephyr_board,
+ "west_cmd": west_cmd,
+ "verbose": 1,
+ "project_type": "host_driven",
+ },
+ )
+ builder = tvm.autotvm.LocalBuilder(
+ n_parallel=1,
+ build_kwargs={"build_option": {"tir.disable_vectorize": True}},
+ do_fork=False,
+ build_func=tvm.micro.autotvm_build_func,
+ ) # do_fork=False needed to persist stateful builder.
+ runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=0,
module_loader=module_loader)
+
+ measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
+
+ log_path = pathlib.Path("zephyr_autotune.log")
+ if log_path.exists():
+ log_path.unlink()
+
+ n_trial = 10
+ for task in tasks:
+ print(f"mehrdad: {task}")
Review comment:
remove
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,269 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+from tvm.contrib import utils
+
+
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Keras to be executed on-device. This
shouldn't look any different
+ # from a usual Keras model definition. Let's define a relatively small
model here for efficiency's
+ # sake.
+
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=(16, 16, 3)))
+ model.build()
+
+ model.summary()
+
+ ####################
+ # Importing into TVM
+ ####################
+ # Now, use `from_keras
<https://tvm.apache.org/docs/api/python/relay/frontend.html#tvm.relay.frontend.from_keras>`_
to import the Keras model into TVM.
+
+ import tvm
+ from tvm import relay
+ import numpy as np
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ tvm_model, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ print(tvm_model)
+
+ #######################
+ # Defining the target #
+ #######################
+ # Now we define the TVM target that describes the execution environment.
This looks very similar
+ # to target definitions from other microTVM tutorials.
+ #
+ # When running on physical hardware, choose a target and a board that
+ # describe the hardware. There are multiple hardware targets that could be
selected from
+ # PLATFORM list in this tutorial. You can chose the platform by passing
--platform argument when running
+ # this tutorial.
+ #
+ TARGET = tvm.target.target.micro(PLATFORMS[args.platform][0])
+ BOARD = PLATFORMS[args.platform][1]
Review comment:
```suggestion
target = tvm.target.target.micro(PLATFORMS[args.platform][0])
board = PLATFORMS[args.platform][1]
```
##########
File path: tests/micro/zephyr/test_zephyr.py
##########
@@ -400,5 +396,137 @@ def test_tensors(sess):
test_tensors(sess)
+def _get_conv2d_model(input_shape: tuple):
+ """Build a conv2d operator in Keras and returns an (IRModule,
parameters)"""
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=input_shape))
+ model.build()
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ return mod, params
Review comment:
Why are you importing from Keras instead of just using relay directly?
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,269 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+from tvm.contrib import utils
+
+
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Keras to be executed on-device. This
shouldn't look any different
+ # from a usual Keras model definition. Let's define a relatively small
model here for efficiency's
+ # sake.
+
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=(16, 16, 3)))
+ model.build()
+
+ model.summary()
+
+ ####################
+ # Importing into TVM
+ ####################
+ # Now, use `from_keras
<https://tvm.apache.org/docs/api/python/relay/frontend.html#tvm.relay.frontend.from_keras>`_
to import the Keras model into TVM.
+
+ import tvm
+ from tvm import relay
+ import numpy as np
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ tvm_model, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ print(tvm_model)
Review comment:
This seems a little irrelevant to the tutorial. Why not just use a relay
model?
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,269 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+from tvm.contrib import utils
+
+
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Keras to be executed on-device. This
shouldn't look any different
+ # from a usual Keras model definition. Let's define a relatively small
model here for efficiency's
+ # sake.
+
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=(16, 16, 3)))
+ model.build()
+
+ model.summary()
+
+ ####################
+ # Importing into TVM
+ ####################
+ # Now, use `from_keras
<https://tvm.apache.org/docs/api/python/relay/frontend.html#tvm.relay.frontend.from_keras>`_
to import the Keras model into TVM.
+
+ import tvm
+ from tvm import relay
+ import numpy as np
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ tvm_model, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ print(tvm_model)
+
+ #######################
+ # Defining the target #
+ #######################
+ # Now we define the TVM target that describes the execution environment.
This looks very similar
+ # to target definitions from other microTVM tutorials.
+ #
+ # When running on physical hardware, choose a target and a board that
+ # describe the hardware. There are multiple hardware targets that could be
selected from
+ # PLATFORM list in this tutorial. You can chose the platform by passing
--platform argument when running
+ # this tutorial.
+ #
+ TARGET = tvm.target.target.micro(PLATFORMS[args.platform][0])
+ BOARD = PLATFORMS[args.platform][1]
+
+ #########################
+ # Extracting tuning tasks
+ #########################
+ # Not all operators in the Relay program printed above can be tuned. Some
are so trivial that only
+ # a single implementation is defined; others don't make sense as tuning
tasks. Using
+ # `extract_from_program`, you can produce a list of tunable tasks.
+ #
+ # Because task extraction involves running the compiler, we first
configure the compiler's
+ # transformation passes; we'll apply the same configuration later on
during autotuning.
+
+ pass_context = tvm.transform.PassContext(opt_level=3,
config={"tir.disable_vectorize": True})
+ with pass_context:
+ # with tvm.transform.PassContext(opt_level=3):
Review comment:
remove or uncomment
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,269 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+from tvm.contrib import utils
+
+
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Keras to be executed on-device. This
shouldn't look any different
+ # from a usual Keras model definition. Let's define a relatively small
model here for efficiency's
+ # sake.
+
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=(16, 16, 3)))
+ model.build()
+
+ model.summary()
+
+ ####################
+ # Importing into TVM
+ ####################
+ # Now, use `from_keras
<https://tvm.apache.org/docs/api/python/relay/frontend.html#tvm.relay.frontend.from_keras>`_
to import the Keras model into TVM.
+
+ import tvm
+ from tvm import relay
+ import numpy as np
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ tvm_model, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ print(tvm_model)
+
+ #######################
+ # Defining the target #
+ #######################
+ # Now we define the TVM target that describes the execution environment.
This looks very similar
+ # to target definitions from other microTVM tutorials.
+ #
+ # When running on physical hardware, choose a target and a board that
+ # describe the hardware. There are multiple hardware targets that could be
selected from
+ # PLATFORM list in this tutorial. You can chose the platform by passing
--platform argument when running
+ # this tutorial.
+ #
+ TARGET = tvm.target.target.micro(PLATFORMS[args.platform][0])
+ BOARD = PLATFORMS[args.platform][1]
+
+ #########################
+ # Extracting tuning tasks
+ #########################
+ # Not all operators in the Relay program printed above can be tuned. Some
are so trivial that only
+ # a single implementation is defined; others don't make sense as tuning
tasks. Using
+ # `extract_from_program`, you can produce a list of tunable tasks.
+ #
+ # Because task extraction involves running the compiler, we first
configure the compiler's
+ # transformation passes; we'll apply the same configuration later on
during autotuning.
+
+ pass_context = tvm.transform.PassContext(opt_level=3,
config={"tir.disable_vectorize": True})
+ with pass_context:
+ # with tvm.transform.PassContext(opt_level=3):
+ tasks = tvm.autotvm.task.extract_from_program(tvm_model["main"], {},
TARGET)
+ assert len(tasks) > 0
+
+ ######################
+ # Configuring microTVM
+ ######################
+ # Before autotuning, we need to define a module loader and then pass that
to
+ # a `tvm.autotvm.LocalBuilder`. Then we create a `tvm.autotvm.LocalRunner`
and use
+ # both builder and runner to generates multiple measurements for auto
tunner.
+ #
+ # In this tutorial, we have the option to use x86 host as an example or
use different targets
+ # from Zephyr RTOS. If you choose pass `--platform=host` to this tutorial
it will uses x86. You can
+ # choose other options by choosing from `PLATFORM` list.
+ #
+
+ import subprocess
+ import pathlib
+ import tvm.micro
+
+ repo_root = pathlib.Path(
+ subprocess.check_output(["git", "rev-parse", "--show-toplevel"],
encoding="utf-8").strip()
+ )
+
+ if args.platform == "host":
+ module_loader = tvm.micro.autotvm_module_loader(
+ template_project_dir=repo_root / "src" / "runtime" / "crt" /
"host",
+ project_options={},
+ )
+ builder = tvm.autotvm.LocalBuilder(
+ n_parallel=1,
+ build_kwargs={"build_option": {"tir.disable_vectorize": True}},
+ do_fork=False,
+ build_func=tvm.micro.autotvm_build_func,
+ ) # do_fork=False needed to persist stateful builder.
+ runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=0,
module_loader=module_loader)
+
+ measure_option = tvm.autotvm.measure_option(builder=builder,
runner=runner)
+
+ else:
+ module_loader = tvm.micro.autotvm_module_loader(
+ template_project_dir=repo_root / "apps" / "microtvm" / "zephyr" /
"template_project",
+ project_options={
+ "zephyr_board": BOARD,
+ "west_cmd": "west",
+ "verbose": 1,
+ "project_type": "host_driven",
+ },
+ )
+ builder = tvm.autotvm.LocalBuilder(
+ n_parallel=1,
+ build_kwargs={"build_option": {"tir.disable_vectorize": True}},
+ do_fork=False,
+ build_func=tvm.micro.autotvm_build_func,
+ ) # do_fork=False needed to persist stateful builder.
+ runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=0,
module_loader=module_loader)
+
+ measure_option = tvm.autotvm.measure_option(builder=builder,
runner=runner)
+
+ ################
+ # Run Autotuning
+ ################
+ # Now we can run autotuning separately on each extracted task.
+ NUM_TRIALS = 1
Review comment:
```suggestion
num_trials = 1
```
##########
File path: tests/micro/zephyr/test_zephyr.py
##########
@@ -400,5 +396,137 @@ def test_tensors(sess):
test_tensors(sess)
+def _get_conv2d_model(input_shape: tuple):
+ """Build a conv2d operator in Keras and returns an (IRModule,
parameters)"""
+ import tensorflow as tf
+ from tensorflow import keras
+
+ model = keras.models.Sequential()
+ model.add(keras.layers.Conv2D(2, 3, input_shape=input_shape))
+ model.build()
+
+ inputs = {
+ i.name.split(":", 2)[0]: [x if x is not None else 1 for x in
i.shape.as_list()]
+ for i in model.inputs
+ }
+ inputs = {k: [v[0], v[3], v[1], v[2]] for k, v in inputs.items()}
+ mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
+ return mod, params
+
+
[email protected]_micro
+def test_autotune_conv2d(temp_dir, platform, west_cmd, tvm_debug):
+ """Test AutoTune for microTVM Zephyr"""
+ model, zephyr_board = PLATFORMS[platform]
+
+ sample = np.random.rand(1, 3, 16, 16)
+ mod, params = _get_conv2d_model((16, 16, 3))
+
+ target = tvm.target.target.micro(model)
+ pass_context = tvm.transform.PassContext(opt_level=3,
config={"tir.disable_vectorize": True})
+ with pass_context:
+ tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
+ assert len(tasks) > 0
+
+ repo_root = pathlib.Path(
+ subprocess.check_output(["git", "rev-parse", "--show-toplevel"],
encoding="utf-8").strip()
+ )
+ module_loader = tvm.micro.autotvm_module_loader(
+ template_project_dir=repo_root / "apps" / "microtvm" / "zephyr" /
"template_project",
+ project_options={
+ "zephyr_board": zephyr_board,
+ "west_cmd": west_cmd,
+ "verbose": 1,
+ "project_type": "host_driven",
+ },
+ )
+ builder = tvm.autotvm.LocalBuilder(
+ n_parallel=1,
+ build_kwargs={"build_option": {"tir.disable_vectorize": True}},
+ do_fork=False,
Review comment:
Can we test with `do_fork=True`? I'm a little worried that the parallel
stuff might break.
##########
File path: tests/python/unittest/test_crt.py
##########
@@ -219,5 +219,109 @@ def test_platform_timer():
assert len(result.results) == 3
[email protected]_micro
+def test_autotune():
+ """Verify that autotune works with micro."""
+
+ RELAY_MODEL = """
+#[version = "0.0.5"]
+def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(8, 3, 5,
5), int8]) {
+ %1 = nn.conv2d(
+ %data,
+ %weight,
+ padding=[2, 2],
+ channels=8,
+ kernel_size=[5, 5],
+ data_layout="NCHW",
+ kernel_layout="OIHW",
+ out_dtype="int32");
+ %1
+}
+"""
+ mod = tvm.parser.fromtext(RELAY_MODEL)
Review comment:
Why parse a string model instead of just writing the relay directly?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]