tkonolige commented on a change in pull request #8715:
URL: https://github.com/apache/tvm/pull/8715#discussion_r688689500
##########
File path: python/tvm/support.py
##########
@@ -29,7 +33,11 @@ def libinfo():
info: Dict[str, str]
The dictionary of compile-time info.
"""
- return {k: v for k, v in GetLibInfo().items()} # pylint:
disable=unnecessary-comprehension
+ get_lib_info = get_global_func("support.GetLibInfo")()
+ if not get_lib_info:
Review comment:
Is this checking for None? If so use `is not None`.
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,271 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+
+# A mapping of a microTVM device to its target and board.
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Relay to be executed on-device. Then
create an IRModule from relay model and
+ # fill parameters with random numbers.
+ #
+
+ import tvm
+
+ data_shape = (1, 3, 10, 10)
+ weight_shape = (6, 3, 5, 5)
+
+ data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32"))
+ weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape,
"float32"))
+
+ y = tvm.relay.nn.conv2d(
+ data,
+ weight,
+ padding=(2, 2),
+ kernel_size=(5, 5),
+ kernel_layout="OIHW",
+ out_dtype="float32",
+ )
+ f = tvm.relay.Function([data, weight], y)
+
+ relay_mod = tvm.IRModule.from_expr(f)
+ relay_mod = tvm.relay.transform.InferType()(relay_mod)
+
+ import numpy as np
+
+ weight_sample = np.random.rand(
+ weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
+ ).astype("float32")
+ params = {"weight": weight_sample}
+
+ #######################
+ # Defining the target #
+ #######################
+ # Now we define the TVM target that describes the execution environment.
This looks very similar
+ # to target definitions from other microTVM tutorials.
+ #
+ # When running on physical hardware, choose a target and a board that
+ # describe the hardware. There are multiple hardware targets that could be
selected from
+ # PLATFORM list in this tutorial. You can chose the platform by passing
--platform argument when running
+ # this tutorial.
+ #
+ target = tvm.target.target.micro(PLATFORMS[args.platform][0])
+ board = PLATFORMS[args.platform][1]
+
+ #########################
+ # Extracting tuning tasks
+ #########################
+ # Not all operators in the Relay program printed above can be tuned. Some
are so trivial that only
+ # a single implementation is defined; others don't make sense as tuning
tasks. Using
+ # `extract_from_program`, you can produce a list of tunable tasks.
+ #
+ # Because task extraction involves running the compiler, we first
configure the compiler's
+ # transformation passes; we'll apply the same configuration later on
during autotuning.
+
+ pass_context = tvm.transform.PassContext(opt_level=3,
config={"tir.disable_vectorize": True})
+ with pass_context:
+ tasks = tvm.autotvm.task.extract_from_program(relay_mod["main"], {},
target)
+ assert len(tasks) > 0
+
+ ######################
+ # Configuring microTVM
+ ######################
+ # Before autotuning, we need to define a module loader and then pass that
to
+ # a `tvm.autotvm.LocalBuilder`. Then we create a `tvm.autotvm.LocalRunner`
and use
+ # both builder and runner to generates multiple measurements for auto
tunner.
+ #
+ # In this tutorial, we have the option to use x86 host as an example or
use different targets
+ # from Zephyr RTOS. If you choose pass `--platform=host` to this tutorial
it will uses x86. You can
+ # choose other options by choosing from `PLATFORM` list.
+ #
+
+ import subprocess
+ import pathlib
Review comment:
ditto above
##########
File path: python/tvm/support.py
##########
@@ -21,6 +21,10 @@
from . import get_global_func
+class LibInfoUnavailableError(Exception):
+ """Raised when libinfo is not available e.g. because libtvm_runtime.so is
used."""
+
+
Review comment:
This is unnecessary now.
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,271 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+
+# A mapping of a microTVM device to its target and board.
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Relay to be executed on-device. Then
create an IRModule from relay model and
+ # fill parameters with random numbers.
+ #
+
+ import tvm
Review comment:
Is there a reason to import tvm in the function vs at the top of the
module? If so, could you comment it?
##########
File path: tutorials/micro/micro_autotune.py
##########
@@ -0,0 +1,271 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+.. _tutorial-micro-autotune:
+
+Autotuning with micro TVM
+=========================
+**Author**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar
<https://github.com/mehrdadh>`
+
+This tutorial explains how to autotune a model using the C runtime.
+"""
+
+import argparse
+
+# A mapping of a microTVM device to its target and board.
+PLATFORMS = {
+ "host": ("host", None),
+ "qemu_x86": ("host", "qemu_x86"),
+ "nrf5340dk": ("nrf5340dk", "nrf5340dk_nrf5340_cpuapp"),
+ "stm32f746xx_disco": ("stm32f746xx", "stm32f746g_disco"),
+ "stm32f746xx_nucleo": ("stm32f746xx", "nucleo_f746zg"),
+ "stm32l4r5zi_nucleo": ("stm32l4r5zi", "nucleo_l4r5zi"),
+}
+
+
+def main(args):
+ ####################
+ # Defining the model
+ ####################
+ #
+ # To begin with, define a model in Relay to be executed on-device. Then
create an IRModule from relay model and
+ # fill parameters with random numbers.
+ #
+
+ import tvm
+
+ data_shape = (1, 3, 10, 10)
+ weight_shape = (6, 3, 5, 5)
+
+ data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32"))
+ weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape,
"float32"))
+
+ y = tvm.relay.nn.conv2d(
+ data,
+ weight,
+ padding=(2, 2),
+ kernel_size=(5, 5),
+ kernel_layout="OIHW",
+ out_dtype="float32",
+ )
+ f = tvm.relay.Function([data, weight], y)
+
+ relay_mod = tvm.IRModule.from_expr(f)
+ relay_mod = tvm.relay.transform.InferType()(relay_mod)
+
+ import numpy as np
Review comment:
Is there a reason to import numpy in the function vs at the top of the
module? If so, could you comment it?
##########
File path: python/tvm/__init__.py
##########
@@ -68,6 +68,13 @@
# Contrib initializers
from .contrib import rocm as _rocm, nvcc as _nvcc, sdaccel as _sdaccel
+try:
+ if support.libinfo()["USE_MICRO"] != "NOT-FOUND":
+ from . import micro
+except support.LibInfoUnavailableError:
+ pass
+
Review comment:
Libinfo will now always return a dictionary, so update this to check
that the dictionary contains USE_MICRO.
##########
File path: python/tvm/micro/session.py
##########
@@ -234,3 +237,69 @@ def create_local_debug_executor(graph_json_str, mod,
device, dump_root=None):
graph_json_str,
dump_root=dump_root,
)
+
+
+RPC_SESSION = None
+
+
+@register_func("tvm.micro.compile_and_create_micro_session")
+def compile_and_create_micro_session(
+ mod_src_bytes: bytes,
+ template_project_dir: str,
+ project_options: dict = None,
+):
+ """Compile the given libraries and sources into a MicroBinary, then invoke
create_micro_session.
+
+ Parameters
+ ----------
+ mod_src_bytes : bytes
+ The content of a tarfile which contains the TVM-generated sources
which together form the
+ SystemLib. This tar is expected to be created by export_library. The
tar will be extracted
+ into a directory and the sources compiled into a MicroLibrary using
the Compiler.
+
+ template_project_dir: str
+ The path to a template microTVM Project API project which is used to
generate the embedded
+ project that is built and flashed onto the target device.
+
+ project_options: dict
+ Options for the microTVM API Server contained in template_project_dir.
+ """
+ global RPC_SESSION
+
+ temp_dir = utils.tempdir()
+ # Keep temp directory for generate project
+ temp_dir.set_keep_for_debug(True)
+ model_library_format_path = temp_dir / "model.tar.gz"
+ with open(model_library_format_path, "wb") as mlf_f:
+ mlf_f.write(mod_src_bytes)
+
+ try:
+ template_project = project.TemplateProject.from_directory(
+ template_project_dir, options=json.loads(project_options)
+ )
+ generated_project = template_project.generate_project_from_mlf(
+ model_library_format_path, temp_dir / "generated-project"
+ )
+ except Exception as exception:
+ logging.error("Project Generate Error: %s", str(exception))
+ raise exception
+
+ generated_project.build()
+ generated_project.flash()
+ transport = generated_project.transport()
+
+ RPC_SESSION = Session(transport_context_manager=transport)
+ RPC_SESSION.__enter__()
+ return RPC_SESSION._rpc._sess
+
+
+@register_func
+def destroy_micro_session():
+ global RPC_SESSION
+ if RPC_SESSION is not None:
+ exc_type, exc_value, traceback = RPC_SESSION.__exit__(None, None, None)
+ RPC_SESSION = None
+ if (exc_type, exc_value, traceback) != (None, None, None):
+ exc = exc_type(exc_value) # See PEP 3109
+ exc.__traceback__ = traceback
Review comment:
Where is this called? I can't seem to find another reference to it in
this PR or the codebase.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]