This is an automated email from the ASF dual-hosted git repository.

mehrdadh pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new c2cc01910c [microTVM] Update tutorials (#13845)
c2cc01910c is described below

commit c2cc01910c1b88ad2b593a138c8b984385d47db8
Author: Mehrdad Hessar <[email protected]>
AuthorDate: Fri Jan 27 14:13:21 2023 -0800

    [microTVM] Update tutorials (#13845)
    
    This PR updates microTVM tutorials to use updated APIs.
    It also adds an ordering to the tutorials that are useful for first time 
users.
    RVM tutorial is also removed as it is not supported anymore.
---
 docs/conf.py                                       |  12 +-
 docs/topic/microtvm/index.rst                      |  11 +-
 gallery/how_to/work_with_microtvm/micro_aot.py     |  17 +--
 .../how_to/work_with_microtvm/micro_autotune.py    |  13 +-
 gallery/how_to/work_with_microtvm/micro_ethosu.py  |   6 +-
 .../how_to/work_with_microtvm/micro_mlperftiny.py  |   7 +-
 gallery/how_to/work_with_microtvm/micro_pytorch.py |  18 +--
 .../work_with_microtvm/micro_reference_vm.py       | 159 ---------------------
 gallery/how_to/work_with_microtvm/micro_tflite.py  |  72 ++++------
 gallery/how_to/work_with_microtvm/micro_train.py   |   9 +-
 gallery/how_to/work_with_microtvm/micro_tvmc.sh    |  43 +++---
 python/tvm/micro/testing/utils.py                  |   8 +-
 tests/scripts/request_hook/request_hook.py         |   2 +-
 13 files changed, 105 insertions(+), 272 deletions(-)

diff --git a/docs/conf.py b/docs/conf.py
index eb2b39d4b1..8d24f05b9b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -511,15 +511,15 @@ within_subsection_order = {
         "use_pass_instrument.py",
         "bring_your_own_datatypes.py",
     ],
-    "micro": [
-        "micro_train.py",
-        "micro_autotune.py",
-        "micro_reference_vm.py",
-        "micro_tflite.py",
-        "micro_ethosu.py",
+    "work_with_microtvm": [
         "micro_tvmc.py",
+        "micro_tflite.py",
         "micro_aot.py",
         "micro_pytorch.py",
+        "micro_train.py",
+        "micro_autotune.py",
+        "micro_ethosu.py",
+        "micro_mlperftiny.py",
     ],
 }
 
diff --git a/docs/topic/microtvm/index.rst b/docs/topic/microtvm/index.rst
index ebcadb3442..4dd4ab5d51 100644
--- a/docs/topic/microtvm/index.rst
+++ b/docs/topic/microtvm/index.rst
@@ -50,13 +50,12 @@ Getting Started with microTVM
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Before working with microTVM, we recommend you have a supported development 
board. Then, follow these
-tutorials to get started with microTVM:
+tutorials to get started with microTVM. Tutorials are in the order that could 
help developers to learn
+more as they follow through them. Here is a list of tutorials that you can 
start with:
 
-1. :ref:`Start the microTVM Reference VM <tutorial-micro-reference-vm>`. The 
microTVM tutorials
-   depend on Zephyr and on a compiler toolchain for your hardware. The 
reference VM is a convenient
-   way to install those dependencies.
-2. Try the :ref:`microTVM with TFLite Tutorial <microTVM-with-TFLite>`.
-3. Try running a more complex `CIFAR10-CNN model 
<https://github.com/areusch/microtvm-blogpost-eval>`_.
+1. Try :ref:`microTVM CLI Tool <tutorial-micro-cli-tool>`.
+2. Try the :ref:`microTVM TFLite Tutorial <tutorial_micro_tflite>`.
+3. Try running a more complex tutorial: :ref:`Creating Your MLPerfTiny 
Submission with microTVM <tutorial-micro-mlperftiny>`.
 
 
 How microTVM Works
diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py 
b/gallery/how_to/work_with_microtvm/micro_aot.py
index c1b29ba5c5..f31ffa1570 100644
--- a/gallery/how_to/work_with_microtvm/micro_aot.py
+++ b/gallery/how_to/work_with_microtvm/micro_aot.py
@@ -15,10 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-.. _tutorial-micro-AoT:
+.. _tutorial-micro-aot:
 
-microTVM Host-Driven AoT
-===========================
+3. microTVM Ahead-of-Time (AOT) Compilation
+===========================================
 **Authors**:
 `Mehrdad Hessar <https://github.com/mehrdadh>`_,
 `Alan MacDonald <https://github.com/alanmacd>`_
@@ -59,6 +59,7 @@ import json
 
 import tvm
 from tvm import relay
+import tvm.micro.testing
 from tvm.relay.backend import Executor, Runtime
 from tvm.contrib.download import download_testdata
 
@@ -102,8 +103,7 @@ relay_mod, params = relay.frontend.from_tflite(
 # using AOT host driven executor. We use the host micro target which is for 
running a model
 # on x86 CPU using CRT runtime or running a model with Zephyr platform on 
qemu_x86 simulator
 # board. In the case of a physical microcontroller, we get the target model 
for the physical
-# board (E.g. nucleo_l4r5zi) and pass it to `tvm.target.target.micro` to 
create a full
-# micro target.
+# board (E.g. nucleo_l4r5zi) and change `BOARD` to supported Zephyr board.
 #
 
 # Use the C runtime (crt) and enable static linking by setting system-lib to 
True
@@ -111,18 +111,15 @@ RUNTIME = Runtime("crt", {"system-lib": True})
 
 # Simulate a microcontroller on the host machine. Uses the main() from 
`src/runtime/crt/host/main.cc`.
 # To use physical hardware, replace "host" with something matching your 
hardware.
-TARGET = tvm.target.target.micro("host")
+TARGET = tvm.micro.testing.get_target("crt")
 
 # Use the AOT executor rather than graph or vm executors. Don't use unpacked 
API or C calling style.
 EXECUTOR = Executor("aot")
 
 if use_physical_hw:
-    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
-    with open(boards_file) as f:
-        boards = json.load(f)
     BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
     SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None)
-    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+    TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
 
 ######################################################################
 # Compile the model
diff --git a/gallery/how_to/work_with_microtvm/micro_autotune.py 
b/gallery/how_to/work_with_microtvm/micro_autotune.py
index 9be257a57a..e8c032b70e 100644
--- a/gallery/how_to/work_with_microtvm/micro_autotune.py
+++ b/gallery/how_to/work_with_microtvm/micro_autotune.py
@@ -18,8 +18,8 @@
 """
 .. _tutorial-micro-autotune:
 
-Autotuning with microTVM
-=========================
+6. Model Tuning with microTVM
+=============================
 **Authors**:
 `Andrew Reusch <https://github.com/areusch>`_,
 `Mehrdad Hessar <https://github.com/mehrdadh>`_
@@ -55,6 +55,7 @@ import pathlib
 
 import tvm
 from tvm.relay.backend import Runtime
+import tvm.micro.testing
 
 ####################
 # Defining the model
@@ -102,20 +103,16 @@ params = {"weight": weight_sample}
 #
 
 RUNTIME = Runtime("crt", {"system-lib": True})
-TARGET = tvm.target.target.micro("host")
+TARGET = tvm.micro.testing.get_target("crt")
 
 # Compiling for physical hardware
 # --------------------------------------------------------------------------
 #  When running on physical hardware, choose a TARGET and a BOARD that 
describe the hardware. The
 #  STM32L4R5ZI Nucleo target and board is chosen in the example below.
 if use_physical_hw:
-    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
-    with open(boards_file) as f:
-        boards = json.load(f)
-
     BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
     SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None)
-    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+    TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
 
 
 #########################
diff --git a/gallery/how_to/work_with_microtvm/micro_ethosu.py 
b/gallery/how_to/work_with_microtvm/micro_ethosu.py
index 74a9d59d77..e6f47321c8 100644
--- a/gallery/how_to/work_with_microtvm/micro_ethosu.py
+++ b/gallery/how_to/work_with_microtvm/micro_ethosu.py
@@ -15,8 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-Running TVM on bare metal Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with 
CMSIS-NN
-======================================================================================
+.. _tutorial-micro-ethosu:
+
+7. Running TVM on bare metal Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU 
with CMSIS-NN
+=========================================================================================
 **Author**:
 `Grant Watson <https://github.com/grant-arm>`_
 
diff --git a/gallery/how_to/work_with_microtvm/micro_mlperftiny.py 
b/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
index 79308e0723..e8c6a253ad 100644
--- a/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
+++ b/gallery/how_to/work_with_microtvm/micro_mlperftiny.py
@@ -15,10 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-.. _tutorial-micro-MLPerfTiny:
+.. _tutorial-micro-mlperftiny:
 
-Creating Your MLPerfTiny Submission with microTVM
-=================================================
+8. Creating Your MLPerfTiny Submission with microTVM
+====================================================
 **Authors**:
 `Mehrdad Hessar <https://github.com/mehrdadh>`_
 
@@ -69,6 +69,7 @@ from tvm.relay.backend import Executor, Runtime
 from tvm.contrib.download import download_testdata
 from tvm.micro import export_model_library_format
 from tvm.micro.model_library_format import generate_c_interface_header
+import tvm.micro.testing
 from tvm.micro.testing.utils import (
     create_header_file,
     mlf_extract_workspace_size_bytes,
diff --git a/gallery/how_to/work_with_microtvm/micro_pytorch.py 
b/gallery/how_to/work_with_microtvm/micro_pytorch.py
index 370e4d7e80..a7f5f10280 100644
--- a/gallery/how_to/work_with_microtvm/micro_pytorch.py
+++ b/gallery/how_to/work_with_microtvm/micro_pytorch.py
@@ -15,10 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-.. _tutorial-micro-Pytorch:
+.. _tutorial-micro-pytorch:
 
-microTVM PyTorch Tutorial
-===========================
+4. microTVM PyTorch Tutorial
+============================
 **Authors**:
 `Mehrdad Hessar <https://github.com/mehrdadh>`_
 
@@ -46,6 +46,7 @@ import tvm
 from tvm import relay
 from tvm.contrib.download import download_testdata
 from tvm.relay.backend import Executor
+import tvm.micro.testing
 
 ##################################
 # Load a pre-trained PyTorch model
@@ -91,13 +92,14 @@ relay_mod, params = 
relay.frontend.from_pytorch(scripted_model, shape_list)
 # and we use `host` micro target. Using this setup, TVM compiles the model
 # for C runtime which can run on a x86 CPU machine with the same flow that
 # would run on a physical microcontroller.
+# CRT Uses the main() from `src/runtime/crt/host/main.cc`
+# To use physical hardware, replace `board` with another physical micro 
target, e.g. `nrf5340dk_nrf5340_cpuapp`
+# or `mps2_an521` and change the platform type to Zephyr.
+# See more target examples in :ref:`Training Vision Models for microTVM on 
Arduino <tutorial-micro-train-arduino>`
+# and :ref:`microTVM TFLite Tutorial<tutorial_micro_tflite>`.
 #
 
-
-# Simulate a microcontroller on the host machine. Uses the main() from 
`src/runtime/crt/host/main.cc`
-# To use physical hardware, replace "host" with another physical micro target, 
e.g. `nrf52840`
-# or `mps2_an521`. See more more target examples in micro_train.py and 
micro_tflite.py tutorials.
-target = tvm.target.target.micro("host")
+target = tvm.micro.testing.get_target(platform="crt", board=None)
 
 # Use the C runtime (crt) and enable static linking by setting system-lib to 
True
 runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True})
diff --git a/gallery/how_to/work_with_microtvm/micro_reference_vm.py 
b/gallery/how_to/work_with_microtvm/micro_reference_vm.py
deleted file mode 100644
index 3121bca353..0000000000
--- a/gallery/how_to/work_with_microtvm/micro_reference_vm.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""
-.. _tutorial-micro-reference-vm:
-
-===================================
-microTVM Reference Virtual Machines
-===================================
-**Author**: `Andrew Reusch <[email protected]>`_
-
-This tutorial explains how to launch microTVM Reference Virtual Machines. You 
can use these to
-develop on real physical hardware without needing to individually install the 
microTVM
-dependencies. These are also particularly useful when trying to reproduce 
behavior with
-microTVM, such as when filing bug reports.
-
-microTVM is the effort to allow TVM to build and execute models on bare-metal 
microcontrollers.
-microTVM aims to be compatible with a wide variety of SoCs and runtime 
environments (i.e. bare metal,
-RTOS, etc). However, some stable software environment is needed to allow 
developers to share and
-reproduce bugs and results. The microTVM Reference Virtual Machines are 
intended to provide that
-environment.
-
-How it works
-============
-
-No Virtual Machines are stored in the TVM repository--instead, the files 
stored in
-``apps/microtvm/reference-vm`` describe how to build VMs to the Vagrant_ VM 
builder tool.
-
-The Reference VMs are split into two parts:
-
-1. A Vagrant Base Box, which contains all of the stable dependencies for that 
platform. Build
-   scripts are stored in ``apps/microtvm/reference-vm/<platform>/base-box``. 
TVM committers run
-   these when a platform's "stable" dependencies change, and the generated 
base boxes are stored in
-   `Vagrant Cloud`_.
-2. A per-workspace VM, which users normally build using the Base Box as a 
starting point. Build
-   scripts are stored in ``apps/microtvm/reference-vm/<platform>`` (everything 
except ``base-box``).
-
-.. _Vagrant: https://vagrantup.com
-.. _Vagrant Cloud: https://app.vagrantup.com/tlcpack
-
-Setting up the VM
-=================
-
-Installing prerequisites
-------------------------
-
-A minimal set of prerequisites are needed:
-
-1. `Vagrant <https://vagrantup.com>`__
-2. A supported Virtual Machine hypervisor (**VirtualBox**, **Parallels**, or 
**VMWare Fusion/Workstation**).
-   `VirtualBox <https://www.virtualbox.org>`__ is a suggested free hypervisor, 
but please note
-   that the `VirtualBox Extension Pack`_ is required for proper USB 
forwarding. If using VirtualBox,
-   also consider installing the `vbguest 
<https://github.com/dotless-de/vagrant-vbguest>`_ plugin.
-
-.. _VirtualBox Extension Pack: 
https://www.virtualbox.org/wiki/Downloads#VirtualBox6.1.16OracleVMVirtualBoxExtensionPack
-
-3. If required for your hypervisor, the
-   `Vagrant provider plugin 
<https://github.com/hashicorp/vagrant/wiki/Available-Vagrant-Plugins#providers>`__
 (or see `here <https://www.vagrantup.com/vmware>`__ for VMWare).
-
-First boot
-----------
-
-The first time you use a reference VM, you need to create the box locally and 
then provision it.
-
-.. code-block:: bash
-
-    # Replace zephyr with the name of a different platform, if you are not 
using Zephyr.
-    ~/.../tvm $ cd apps/microtvm/reference-vm/zephyr
-    # Replace <provider_name> with the name of the hypervisor you wish to use 
(i.e. virtualbox, parallels, vmware_desktop).
-    ~/.../tvm/apps/microtvm/reference-vm/zephyr $ vagrant up 
--provider=<provider_name>
-
-
-This command will take a couple of minutes to run and will require 4 to 5GB of 
storage on your
-machine. It does the following:
-
-1. Downloads the `microTVM base box`_ and clones it to form a new VM specific 
to this TVM directory.
-2. Mounts your TVM directory (and, if using ``git-subtree``, the original 
``.git`` repo) into the
-   VM.
-3. Builds TVM and installs a Python virtualenv with the dependencies 
corresponding with your TVM
-   build.
-
-.. _microTVM base box: https://app.vagrantup.com/tlcpack/boxes/microtvm
-
-Connect Hardware to the VM
---------------------------
-
-Next, you need to configure USB passthrough to attach your physical 
development board to the virtual
-machine (rather than directly to your laptop's host OS).
-
-It's suggested you setup a device filter, rather than doing a one-time 
forward, because often the
-device may reboot during the programming process and you may, at that time, 
need to enable
-forwarding again. It may not be obvious to the end user when this occurs. 
Instructions to do that:
-
- * `VirtualBox <https://www.virtualbox.org/manual/ch03.html#usb-support>`__
- * `Parallels <https://kb.parallels.com/122993>`__
- * `VMWare Workstation 
<https://docs.vmware.com/en/VMware-Workstation-Pro/15.0/com.vmware.ws.using.doc/GUID-E003456F-EB94-4B53-9082-293D9617CB5A.html>`__
-
-Rebuilding TVM inside the Reference VM
---------------------------------------
-
-After the first boot, you'll need to ensure you keep the build, in 
``$TVM_HOME/build-microtvm-zephyr``,
-up-to-date when you modify the C++ runtime or checkout a different revision. 
You can either
-re-provision the machine (``vagrant provision`` in the same directory you ran 
``vagrant up`` before)
-or manually rebuild TVM yourself.
-
-Remember: the TVM ``.so`` built inside the VM is different from the one you 
may use on your host
-machine. This is why it's built inside the special directory 
``build-microtvm-zephyr``.
-
-Logging in to the VM
---------------------
-
-The VM should be available to your host only with the hostname ``microtvm``. 
You can SSH to the VM
-as follows:
-
-.. code-block:: bash
-
-    $ vagrant ssh
-
-Then ``cd`` to the same path used on your host machine for TVM. For example, 
on Mac:
-
-.. code-block:: bash
-
-    $ cd /Users/yourusername/path/to/tvm
-
-Running tests
-=============
-
-Once the VM has been provisioned, tests can be executed using ``poetry``:
-
-.. code-block:: bash
-
-    $ cd apps/microtvm/reference-vm/zephyr
-    $ poetry run python3 ../../../../tests/micro/zephyr/test_zephyr.py 
--board=stm32f746g_disco
-
-If you do not have physical hardware attached, but wish to run the tests using 
the
-local QEMU emulator running within the VM, run the following commands instead:
-
-.. code-block:: bash
-
-    $ cd /Users/yourusername/path/to/tvm
-    $ cd apps/microtvm/reference-vm/zephyr/
-    $ poetry run pytest ../../../../tests/micro/zephyr/test_zephyr.py 
--board=qemu_x86
-
-
-
-"""
diff --git a/gallery/how_to/work_with_microtvm/micro_tflite.py 
b/gallery/how_to/work_with_microtvm/micro_tflite.py
index 0770d472c9..67b3e66e33 100644
--- a/gallery/how_to/work_with_microtvm/micro_tflite.py
+++ b/gallery/how_to/work_with_microtvm/micro_tflite.py
@@ -15,9 +15,9 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-.. _microTVM-with-TFLite:
+.. _tutorial_micro_tflite:
 
-microTVM with TFLite Models
+2. microTVM TFLite Tutorial
 ===========================
 **Author**: `Tom Gall <https://github.com/tom-gall>`_
 
@@ -55,11 +55,16 @@ import tempfile
 import numpy as np
 
 import tvm
+import tvm.micro
+import tvm.micro.testing
 from tvm import relay
 import tvm.contrib.utils
+from tvm.micro import export_model_library_format
 from tvm.contrib.download import download_testdata
 
-model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite";
+model_url = (
+    
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/sine_model.tflite";
+)
 model_file = "sine_model.tflite"
 model_path = download_testdata(model_url, model_file, module="data")
 
@@ -105,55 +110,44 @@ mod, params = relay.frontend.from_tflite(
 #
 # Now we create a build config for relay, turning off two options and then 
calling relay.build which
 # will result in a C source file for the selected TARGET. When running on a 
simulated target of the
-# same architecture as the host (where this Python script is executed) choose 
"host" below for the
+# same architecture as the host (where this Python script is executed) choose 
"crt" below for the
 # TARGET, the C Runtime as the RUNTIME and a proper board/VM to run it (Zephyr 
will create the right
 # QEMU VM based on BOARD. In the example below the x86 arch is selected and a 
x86 VM is picked up accordingly:
 #
 RUNTIME = tvm.relay.backend.Runtime("crt", {"system-lib": True})
-TARGET = tvm.target.target.micro("host")
+TARGET = tvm.micro.testing.get_target("crt")
 
-#
-# Compiling for physical hardware
-#  When running on physical hardware, choose a TARGET and a BOARD that 
describe the hardware. The
-#  STM32F746 Nucleo target and board is chosen in the example below. Another 
option would be to
-#  choose the STM32F746 Discovery board instead. Since that board has the same 
MCU as the Nucleo
-#  board but a couple of wirings and configs differ, it's necessary to select 
the "stm32f746g_disco"
-#  board to generated the right firmware image.
-#
+# When running on physical hardware, choose a TARGET and a BOARD that describe 
the hardware. The
+# STM32L4R5ZI Nucleo target and board is chosen in the example below. You 
could change the testing
+# board by simply exporting `TVM_MICRO_BOARD` variable with a different Zephyr 
supported board.
 
 if use_physical_hw:
-    boards_file = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
-    with open(boards_file) as f:
-        boards = json.load(f)
     BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
     SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None)
-    TARGET = tvm.target.target.micro(boards[BOARD]["model"])
+    TARGET = tvm.micro.testing.get_target("zephyr", BOARD)
 
+# For some boards, Zephyr runs them emulated by default, using QEMU. For 
example, below is the
+# TARGET and BOARD used to build a microTVM firmware for the mps2-an521 board.
 #
-#  For some boards, Zephyr runs them emulated by default, using QEMU. For 
example, below is the
-#  TARGET and BOARD used to build a microTVM firmware for the mps2-an521 
board. Since that board
-#  runs emulated by default on Zephyr the suffix "-qemu" is added to the board 
name to inform
-#  microTVM that the QEMU transporter must be used to communicate with the 
board. If the board name
-#  already has the prefix "qemu_", like "qemu_x86", then it's not necessary to 
add that suffix.
-#
-#  TARGET = tvm.target.target.micro("mps2_an521")
-#  BOARD = "mps2_an521-qemu"
+# `mps2_an521 = "mps2_an521"`
+# `TARGET = tvm.micro.testing.get_target("zephyr", BOARD)`
 
 ######################################################################
-# Now, compile the model for the target:
+# Now, compile the model for the target. If you do not specify Executor,
+# by default it uses GraphExecutor.
 
-with tvm.transform.PassContext(
-    opt_level=3, config={"tir.disable_vectorize": True}, 
disabled_pass=["AlterOpLayout"]
-):
+with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": 
True}):
     module = relay.build(mod, target=TARGET, runtime=RUNTIME, params=params)
 
 
+######################################################################
 # Inspecting the compilation output
 # ---------------------------------
 #
 # The compilation process has produced some C code implementing the operators 
in this graph. We
 # can inspect it by printing the CSourceModule contents (for the purposes of 
this tutorial, let's
 # just print the first 10 lines):
+#
 
 c_source_module = module.get_lib().imported_modules[0]
 assert c_source_module.type_key == "c", "tutorial is broken"
@@ -166,27 +160,23 @@ assert any(
 print("\n".join(first_few_lines))
 
 
+######################################################################
 # Compiling the generated code
 # ----------------------------
 #
 # Now we need to incorporate the generated C code into a project that allows 
us to run inference on the
 # device. The simplest way to do this is to integrate it yourself, using 
microTVM's standard output format
-# (:doc:`Model Library Format` </dev/model_library_format>`). This is a 
tarball with a standard layout:
+# model library format. This is a tarball with a standard layout.
 
 # Get a temporary path where we can store the tarball (since this is running 
as a tutorial).
 
-fd, model_library_format_tar_path = tempfile.mkstemp()
-os.close(fd)
-os.unlink(model_library_format_tar_path)
-tvm.micro.export_model_library_format(module, model_library_format_tar_path)
+temp_dir = tvm.contrib.utils.tempdir()
+model_tar_path = temp_dir / "model.tar"
+export_model_library_format(module, model_tar_path)
 
-with tarfile.open(model_library_format_tar_path, "r:*") as tar_f:
+with tarfile.open(model_tar_path, "r:*") as tar_f:
     print("\n".join(f" - {m.name}" for m in tar_f.getmembers()))
 
-# Cleanup for tutorial:
-os.unlink(model_library_format_tar_path)
-
-
 # TVM also provides a standard way for embedded platforms to automatically 
generate a standalone
 # project, compile and flash it to a target, and communicate with it using the 
standard TVM RPC
 # protocol. The Model Library Format serves as the model input to this 
process. When embedded
@@ -201,11 +191,8 @@ os.unlink(model_library_format_tar_path)
 template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
 project_options = {}  # You can use options to provide platform-specific 
options through TVM.
 
-# Compiling for physical hardware (or an emulated board, like the mps_an521)
-# --------------------------------------------------------------------------
 #  For physical hardware, you can try out the Zephyr platform by using a 
different template project
 #  and options:
-#
 
 if use_physical_hw:
     template_project_path = 
pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
@@ -218,7 +205,6 @@ if use_physical_hw:
     }
 
 # Create a temporary directory
-
 temp_dir = tvm.contrib.utils.tempdir()
 generated_project_dir = temp_dir / "generated-project"
 generated_project = tvm.micro.generate_project(
diff --git a/gallery/how_to/work_with_microtvm/micro_train.py 
b/gallery/how_to/work_with_microtvm/micro_train.py
index 9b8a9a68dd..56ff54616f 100644
--- a/gallery/how_to/work_with_microtvm/micro_train.py
+++ b/gallery/how_to/work_with_microtvm/micro_train.py
@@ -15,10 +15,10 @@
 # specific language governing permissions and limitations
 # under the License.
 """
-.. _microtvm-train-arduino:
+.. _tutorial-micro-train-arduino:
 
-Training Vision Models for microTVM on Arduino
-==============================================
+5. Training Vision Models for microTVM on Arduino
+=================================================
 **Author**: `Gavin Uberti <https://github.com/guberti>`_
 
 This tutorial shows how MobileNetV1 models can be trained
@@ -441,6 +441,7 @@ with open(QUANTIZED_MODEL_PATH, "wb") as f:
 import shutil
 import tflite
 import tvm
+import tvm.micro.testing
 
 # Method to load model is different in TFLite 1 vs 2
 try:  # TFLite 2.1 and above
@@ -452,7 +453,7 @@ except AttributeError:  # Fall back to TFLite 1.14 method
 mod, params = tvm.relay.frontend.from_tflite(tflite_model)
 
 # Set configuration flags to improve performance
-target = tvm.target.target.micro("nrf52840")
+target = tvm.micro.testing.get_target("zephyr", "nrf5340dk_nrf5340_cpuapp")
 runtime = tvm.relay.backend.Runtime("crt")
 executor = tvm.relay.backend.Executor("aot", {"unpacked-api": True})
 
diff --git a/gallery/how_to/work_with_microtvm/micro_tvmc.sh 
b/gallery/how_to/work_with_microtvm/micro_tvmc.sh
index 0eaef9c6a8..f7f27ed182 100755
--- a/gallery/how_to/work_with_microtvm/micro_tvmc.sh
+++ b/gallery/how_to/work_with_microtvm/micro_tvmc.sh
@@ -16,30 +16,26 @@
 # under the License.
 
 : '
-.. _tutorial-micro-tvmc:
+.. _tutorial-micro-cli-tool:
 
-Executing a Tiny Model with TVMC Micro
-======================================
+1. microTVM CLI Tool
+====================
 **Author**: `Mehrdad Hessar <https://github.com/mehrdadh>`_
 
 This tutorial explains how to compile a tiny model for a micro device,
 build a program on Zephyr platform to execute this model, flash the program
 and run the model all using `tvmc micro` command.
+You need to install python and Zephyr dependencies before processing with this 
tutorial.
 '
 
 ######################################################################
-# .. note::
-#     This tutorial is explaining using TVMC Mirco on Zephyr platform. You need
-#     to install Zephyr dependencies before processing with this tutorial. 
Alternatively,
-#     you can run this tutorial in one of the following ways which has Zephyr 
depencencies already installed.
 #
-#     * Use `microTVM Reference Virtual Machines 
<https://tvm.apache.org/docs/how_to/work_with_microtvm/micro_reference_vm.html#sphx-glr-how-to-work-with-microtvm-micro-reference-vm-py>`_.
-#     * Use QEMU docker image provided by TVM. Following these you will 
download and login to the docker image:
+#     .. include:: 
../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
 #
-#     .. code-block:: bash
+
+######################################################################
 #
-#       cd tvm
-#       ./docker/bash.sh tlcpack/ci-qemu
+#     .. include:: 
../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst
 #
 
 # bash-ignore
@@ -93,7 +89,7 @@ wget 
https://github.com/tensorflow/tflite-micro/raw/main/tensorflow/lite/micro/e
 #
 # Model Library Format (MLF) is an output format that TVM provides for micro 
targets. MLF is a tarball
 # containing a file for each piece of the TVM compiler output which can be 
used on micro targets outside
-# TVM environment. Read more about `Model Library Format 
<https://tvm.apache.org/docs//arch/model_library_format.html>`_.
+# TVM environment. Read more about :ref:`Model Library Format 
<model_library_format>`.
 #
 # Here, we generate a MLF file for ``qemu_x86`` Zephyr board. To generate MLF 
output for the ``magic_wand`` tflite model:
 #
@@ -183,12 +179,17 @@ tvmc run \
     --fill-mode ones \
     --print-top 4
 # bash
-#     # Output:
-#     #
-#     # INFO:__main__:b'[100%] [QEMU] CPU: qemu32,+nx,+pae\n'
-#     # remote: microTVM Zephyr runtime - running
-#     # INFO:__main__:b'[100%] Built target run\n'
-#     # [[3.         1.         2.         0.        ]
-#     # [0.47213247 0.41364592 0.07525456 0.03896701]]
+
+############################################################
+# Specifically, this command sets the input of the model
+# to all ones and shows the four values of the output with their indices.
+#
+# .. code-block:: bash
+#
+#      # Output:
+#      # INFO:__main__:b'[100%] [QEMU] CPU: qemu32,+nx,+pae\n'
+#      # remote: microTVM Zephyr runtime - running
+#      # INFO:__main__:b'[100%] Built target run\n'
+#      # [[3.         1.         2.         0.        ]
+#      # [0.47213247 0.41364592 0.07525456 0.03896701]]
 #
-# Specifically, this command sets the input of the model to all ones and shows 
the four values of the output with their indices.
diff --git a/python/tvm/micro/testing/utils.py 
b/python/tvm/micro/testing/utils.py
index 170c576314..43cad78507 100644
--- a/python/tvm/micro/testing/utils.py
+++ b/python/tvm/micro/testing/utils.py
@@ -47,10 +47,16 @@ def get_supported_boards(platform: str):
         return json.load(f)
 
 
-def get_target(platform: str, board: str) -> tvm.target.Target:
+def get_target(platform: str, board: str = None) -> tvm.target.Target:
     """Intentionally simple function for making Targets for microcontrollers.
     If you need more complex arguments, one should call target.micro directly. 
Note
     that almost all, but not all, supported microcontrollers are Arm-based."""
+    if platform == "crt":
+        return tvm.target.target.micro("host")
+
+    if not board:
+        raise ValueError(f"`board` type is required for {platform} platform.")
+
     model = get_supported_boards(platform)[board]["model"]
     return tvm.target.target.micro(model, options=["-device=arm_cpu"])
 
diff --git a/tests/scripts/request_hook/request_hook.py 
b/tests/scripts/request_hook/request_hook.py
index b033f1ca84..f093ab789b 100644
--- a/tests/scripts/request_hook/request_hook.py
+++ b/tests/scripts/request_hook/request_hook.py
@@ -152,7 +152,7 @@ URL_MAP = {
     "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": 
f"{BASE}/vta_cat.jpg",
     
"https://objects.githubusercontent.com/github-production-release-asset-2e65be/130932608/4b196a8a-4e2d-11e8-9a11-be3c41846711?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221004%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221004T170456Z&X-Amz-Expires=300&X-Amz-Signature=0602b68e8864b9b01c9142eee22aed3543fe98a5482686eec33d98e2617a2295&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=130932608&response-content-disposition=attachment%3B%20filename%3Dmob
 [...]
     
"https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResNet/resnet18.zip":
 f"{BASE}/oneflow/resnet18.zip",
-    "https://people.linaro.org/~tom.gall/sine_model.tflite": 
f"{BASE}/sine_model.tflite",
+    
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/sine_model.tflite":
 f"{BASE}/tlc-pack/web-data/testdata/microTVM/model/sine_model.tflite",
     "https://pjreddie.com/media/files/yolov3-tiny.weights?raw=true": 
f"{BASE}/yolov3-tiny.weights",
     "https://pjreddie.com/media/files/yolov3.weights": 
f"{BASE}/yolov3.weights",
     
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_classes.txt":
 f"{BASE}/2022-10-05/imagenet_classes.txt",

Reply via email to