This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 12440895e4 [MetaSchedule] Add Testing Script with ONNX Support (#11587)
12440895e4 is described below

commit 12440895e4baad1de494f0a3876edee3e1df06ee
Author: Xiyou Zhou <[email protected]>
AuthorDate: Tue Jun 7 11:08:32 2022 -0700

    [MetaSchedule] Add Testing Script with ONNX Support (#11587)
    
    This PR introduces 2 tuning script for meta schedule and auto scheduler 
tuning support with onnx files. Now we can easily introduce onnx models 
benchmarking with command line scripts. Sample tuning call looks similar to the 
following script
    
    For Meta Schedule ONNX tuning:
    ```
    python3 -m tvm.meta_schedule.testing.tune_onnx_meta_schedule \
        --model-name   "$MODEL_NAME"                             \
        --onnx-path    "$ONNX_PATH"                              \
        --input-shape  "$INPUT_SHAPE"                            \
        --target       "$TARGET"                                 \
        --num-trials   $NUM_TRIALS                               \
        --rpc-host     $RPC_HOST                                 \
        --rpc-port     $RPC_PORT                                 \
        --rpc-key      $RPC_KEY                                  \
        --rpc-workers  $RPC_WORKERS                              \
        --work-dir     $WORK_DIR                                 \
        |& tee         "$WORK_DIR/$MODEL_NAME.log"
    ```
    
    For AutoScheduler ONNX tuning:
    ```
    python3 -m tvm.meta_schedule.testing.tune_onnx_auto_scheduler \
        --model-name   "$MODEL_NAME"                              \
        --onnx-path    "$ONNX_PATH"                               \
        --input-shape  "$INPUT_SHAPE"                             \
        --target       "$TARGET"                                  \
        --num-trials   $NUM_TRIALS                                \
        --rpc-host     $RPC_HOST                                  \
        --rpc-port     $RPC_PORT                                  \
        --rpc-key      $RPC_KEY                                   \
        --rpc-workers  $RPC_WORKERS                               \
        --log-dir      $WORK_DIR                                  \
        |& tee         "$WORK_DIR/$MODEL_NAME.log"
    ```
---
 ...to_scheduler.py => tune_onnx_auto_scheduler.py} |  46 +++----
 ...uto_scheduler.py => tune_onnx_meta_schedule.py} | 137 ++++++++-------------
 .../testing/tune_relay_auto_scheduler.py           |   4 +-
 3 files changed, 76 insertions(+), 111 deletions(-)

diff --git a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py 
b/python/tvm/meta_schedule/testing/tune_onnx_auto_scheduler.py
similarity index 88%
copy from python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
copy to python/tvm/meta_schedule/testing/tune_onnx_auto_scheduler.py
index abac49c50c..e916f5ace3 100644
--- a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
+++ b/python/tvm/meta_schedule/testing/tune_onnx_auto_scheduler.py
@@ -20,18 +20,24 @@ import json
 import os
 
 import numpy as np  # type: ignore
+import onnx  # type: ignore
 import tvm
+from tvm.relay.frontend import from_onnx
 from tvm import auto_scheduler
 from tvm import meta_schedule as ms
 from tvm import relay
 from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
-from tvm.meta_schedule.testing.relay_workload import get_network
 
 
 def _parse_args():
     args = argparse.ArgumentParser()
     args.add_argument(
-        "--workload",
+        "--model-name",
+        type=str,
+        required=True,
+    )
+    args.add_argument(
+        "--onnx-path",
         type=str,
         required=True,
     )
@@ -39,6 +45,7 @@ def _parse_args():
         "--input-shape",
         type=str,
         required=True,
+        help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 
8]}]',
     )
     args.add_argument(
         "--target",
@@ -71,15 +78,10 @@ def _parse_args():
         required=True,
     )
     args.add_argument(
-        "--log-dir",
+        "--work-dir",
         type=str,
         required=True,
     )
-    args.add_argument(
-        "--cache-dir",
-        type=str,
-        default=None,
-    )
     parsed = args.parse_args()
     parsed.target = tvm.target.Target(parsed.target)
     parsed.input_shape = json.loads(parsed.input_shape)
@@ -96,7 +98,7 @@ ARGS = _parse_args()
 
 
 def main():
-    log_file = os.path.join(ARGS.log_dir, f"{ARGS.workload}.json")
+    log_file = os.path.join(ARGS.work_dir, f"{ARGS.model_name}.json")
 
     runner = auto_scheduler.RPCRunner(
         key=ARGS.rpc_key,
@@ -129,18 +131,16 @@ def main():
         )
     else:
         raise NotImplementedError(f"Unsupported target {ARGS.target}")
-    mod, params, (input_name, input_shape, input_dtype) = get_network(
-        ARGS.workload,
-        ARGS.input_shape,
-        cache_dir=ARGS.cache_dir,
-    )
-    input_info = {input_name: input_shape}
-    input_data = {}
-    print(f"Workload: {ARGS.workload}")
-    for input_name, input_shape in input_info.items():
-        print(f"  input_name: {input_name}")
-        print(f"  input_shape: {input_shape}")
-        print(f"  input_dtype: {input_dtype}")
+
+    print(f"Workload: {ARGS.model_name}")
+    onnx_model = onnx.load(ARGS.onnx_path)
+    shape_dict = {}
+    for item in ARGS.input_shape:
+        print(f"  input_name: {item['name']}")
+        print(f"  input_shape: {item['shape']}")
+        print(f"  input_dtype: {item['dtype']}")
+        shape_dict[item["name"]] = item["shape"]
+    mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
     tasks, task_weights = auto_scheduler.extract_tasks(
         mod["main"],
         params,
@@ -173,7 +173,9 @@ def main():
                 params=params,
             )
     graph, rt_mod, params = lib.graph_json, lib.lib, lib.params
-    for input_name, input_shape in input_info.items():
+    input_data = {}
+    for item in ARGS.input_shape:
+        input_name, input_shape, input_dtype = item["name"], item["shape"], 
item["dtype"]
         if input_dtype.startswith("float"):
             input_data[input_name] = 
np.random.uniform(size=input_shape).astype(input_dtype)
         else:
diff --git a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py 
b/python/tvm/meta_schedule/testing/tune_onnx_meta_schedule.py
similarity index 60%
copy from python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
copy to python/tvm/meta_schedule/testing/tune_onnx_meta_schedule.py
index abac49c50c..f5c7d1cde8 100644
--- a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
+++ b/python/tvm/meta_schedule/testing/tune_onnx_meta_schedule.py
@@ -17,21 +17,24 @@
 # pylint: disable=missing-docstring
 import argparse
 import json
-import os
-
+import logging
 import numpy as np  # type: ignore
+import onnx  # type: ignore
 import tvm
-from tvm import auto_scheduler
+from tvm.relay.frontend import from_onnx
 from tvm import meta_schedule as ms
-from tvm import relay
 from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
-from tvm.meta_schedule.testing.relay_workload import get_network
 
 
 def _parse_args():
     args = argparse.ArgumentParser()
     args.add_argument(
-        "--workload",
+        "--model-name",
+        type=str,
+        required=True,
+    )
+    args.add_argument(
+        "--onnx-path",
         type=str,
         required=True,
     )
@@ -39,6 +42,7 @@ def _parse_args():
         "--input-shape",
         type=str,
         required=True,
+        help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 
8]}]',
     )
     args.add_argument(
         "--target",
@@ -71,15 +75,10 @@ def _parse_args():
         required=True,
     )
     args.add_argument(
-        "--log-dir",
+        "--work-dir",
         type=str,
         required=True,
     )
-    args.add_argument(
-        "--cache-dir",
-        type=str,
-        default=None,
-    )
     parsed = args.parse_args()
     parsed.target = tvm.target.Target(parsed.target)
     parsed.input_shape = json.loads(parsed.input_shape)
@@ -92,88 +91,52 @@ def _parse_args():
     return parsed
 
 
+logging.basicConfig(
+    format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", 
datefmt="%Y-%m-%d %H:%M:%S"
+)
+logging.getLogger("tvm.meta_schedule").setLevel(logging.INFO)
 ARGS = _parse_args()
 
 
 def main():
-    log_file = os.path.join(ARGS.log_dir, f"{ARGS.workload}.json")
-
-    runner = auto_scheduler.RPCRunner(
-        key=ARGS.rpc_key,
-        host=ARGS.rpc_host,
-        port=ARGS.rpc_port,
-        n_parallel=ARGS.rpc_workers,
-        number=3,
-        repeat=1,
-        min_repeat_ms=100,  # TODO
-        enable_cpu_cache_flush=False,  # TODO
-    )
-
-    if ARGS.target.kind.name == "llvm":
-        hardware_params = auto_scheduler.HardwareParams(
-            num_cores=int(ARGS.target.attrs["num-cores"]),
-            target=ARGS.target,
-        )
-    elif ARGS.target.kind.name == "cuda":
-        hardware_params = auto_scheduler.HardwareParams(
-            num_cores=-1,
-            vector_unit_bytes=16,
-            cache_line_bytes=64,
-            
max_shared_memory_per_block=int(ARGS.target.attrs["max_shared_memory_per_block"]),
-            
max_threads_per_block=int(ARGS.target.attrs["max_threads_per_block"]),
-            # The value `max_local_memory_per_block` is not used in 
AutoScheduler,
-            # but is required by the API.
-            max_local_memory_per_block=12345678,
-            max_vthread_extent=8,
-            warp_size=32,
-        )
-    else:
-        raise NotImplementedError(f"Unsupported target {ARGS.target}")
-    mod, params, (input_name, input_shape, input_dtype) = get_network(
-        ARGS.workload,
-        ARGS.input_shape,
-        cache_dir=ARGS.cache_dir,
-    )
-    input_info = {input_name: input_shape}
-    input_data = {}
-    print(f"Workload: {ARGS.workload}")
-    for input_name, input_shape in input_info.items():
-        print(f"  input_name: {input_name}")
-        print(f"  input_shape: {input_shape}")
-        print(f"  input_dtype: {input_dtype}")
-    tasks, task_weights = auto_scheduler.extract_tasks(
-        mod["main"],
-        params,
+    print(f"Workload: {ARGS.model_name}")
+    onnx_model = onnx.load(ARGS.onnx_path)
+    shape_dict = {}
+    for item in ARGS.input_shape:
+        print(f"  input_name: {item['name']}")
+        print(f"  input_shape: {item['shape']}")
+        print(f"  input_dtype: {item['dtype']}")
+        shape_dict[item["name"]] = item["shape"]
+    mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
+    alloc_repeat = 1
+    runner = ms.runner.RPCRunner(
+        rpc_config=ARGS.rpc_config,
+        evaluator_config=ms.runner.EvaluatorConfig(
+            number=3,
+            repeat=1,
+            min_repeat_ms=100,
+            enable_cpu_cache_flush=False,
+        ),
+        alloc_repeat=alloc_repeat,
+        max_workers=ARGS.rpc_workers,
+    )
+    lib = ms.tune_relay(
+        mod=mod,
         target=ARGS.target,
-        hardware_params=hardware_params,
+        config=ms.TuneConfig(
+            strategy="evolutionary",
+            num_trials_per_iter=64,
+            max_trials_per_task=ARGS.num_trials,
+            max_trials_global=ARGS.num_trials,
+        ),
+        runner=runner,  # type: ignore
+        work_dir=ARGS.work_dir,
+        params=params,
     )
-    for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
-        print(f"==== Task {idx}: {task.desc} (weight {task_weight} key: 
{task.workload_key}) =====")
-        print(task.compute_dag)
-
-    tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
-    tuner.tune(
-        auto_scheduler.TuningOptions(
-            num_measure_trials=ARGS.num_trials,
-            runner=runner,
-            measure_callbacks=[
-                auto_scheduler.RecordToFile(log_file),
-            ],
-        )
-    )
-
-    with auto_scheduler.ApplyHistoryBest(log_file):
-        with tvm.transform.PassContext(
-            opt_level=3,
-            config={"relay.backend.use_auto_scheduler": True},
-        ):
-            lib = relay.build(
-                mod,
-                target=ARGS.target,
-                params=params,
-            )
     graph, rt_mod, params = lib.graph_json, lib.lib, lib.params
-    for input_name, input_shape in input_info.items():
+    input_data = {}
+    for item in ARGS.input_shape:
+        input_name, input_shape, input_dtype = item["name"], item["shape"], 
item["dtype"]
         if input_dtype.startswith("float"):
             input_data[input_name] = 
np.random.uniform(size=input_shape).astype(input_dtype)
         else:
diff --git a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py 
b/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
index abac49c50c..ff4f931347 100644
--- a/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
+++ b/python/tvm/meta_schedule/testing/tune_relay_auto_scheduler.py
@@ -71,7 +71,7 @@ def _parse_args():
         required=True,
     )
     args.add_argument(
-        "--log-dir",
+        "--work-dir",
         type=str,
         required=True,
     )
@@ -96,7 +96,7 @@ ARGS = _parse_args()
 
 
 def main():
-    log_file = os.path.join(ARGS.log_dir, f"{ARGS.workload}.json")
+    log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
 
     runner = auto_scheduler.RPCRunner(
         key=ARGS.rpc_key,

Reply via email to