This is an automated email from the ASF dual-hosted git repository.
lmzheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 750e7f4 [AutoScheduler] Add tips on resuming the search from a log
file (#7039)
750e7f4 is described below
commit 750e7f456e492c392c5f42676d20d042ec945d41
Author: Lianmin Zheng <[email protected]>
AuthorDate: Sat Dec 5 15:38:41 2020 -0800
[AutoScheduler] Add tips on resuming the search from a log file (#7039)
* [AutoScheduler] Add tips on resuming the search from a log file
* Trigger CI
---
python/tvm/auto_scheduler/search_policy.py | 2 +-
python/tvm/auto_scheduler/task_scheduler.py | 15 +++++++++++++--
tutorials/auto_scheduler/tune_network_cuda.py | 10 +++++++---
tutorials/auto_scheduler/tune_network_x86.py | 10 +++++++---
4 files changed, 28 insertions(+), 9 deletions(-)
diff --git a/python/tvm/auto_scheduler/search_policy.py
b/python/tvm/auto_scheduler/search_policy.py
index 6f565ed..5b15a48 100644
--- a/python/tvm/auto_scheduler/search_policy.py
+++ b/python/tvm/auto_scheduler/search_policy.py
@@ -57,7 +57,7 @@ class PreloadMeasuredStates(SearchCallback):
The name of the record file.
"""
- def __init__(self, filename="auto_scheduler_tuning.json"):
+ def __init__(self, filename):
self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates,
filename)
diff --git a/python/tvm/auto_scheduler/task_scheduler.py
b/python/tvm/auto_scheduler/task_scheduler.py
index a3dbcae..ab83ff4 100644
--- a/python/tvm/auto_scheduler/task_scheduler.py
+++ b/python/tvm/auto_scheduler/task_scheduler.py
@@ -29,7 +29,7 @@ import logging
import numpy as np
-from .search_policy import SearchPolicy, SketchPolicy
+from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates
from .cost_model import RandomModel, XGBModel
from .utils import array_mean
from .measure import ProgramMeasurer
@@ -94,8 +94,19 @@ def make_search_policies(
raise ValueError("Invalid search policy: " + search_policy)
if policy_type == "sketch":
+ if load_log_file:
+ # use the log file to restore the status of search policies.
+ init_search_callbacks = [PreloadMeasuredStates(load_log_file)]
+ else:
+ init_search_callbacks = None
search_policies = [
- SketchPolicy(task, cost_model, params=search_policy_params,
verbose=verbose)
+ SketchPolicy(
+ task,
+ cost_model,
+ params=search_policy_params,
+ verbose=verbose,
+ init_search_callbacks=init_search_callbacks,
+ )
for task in tasks
]
else:
diff --git a/tutorials/auto_scheduler/tune_network_cuda.py
b/tutorials/auto_scheduler/tune_network_cuda.py
index 03be05a..3da9f3f 100644
--- a/tutorials/auto_scheduler/tune_network_cuda.py
+++ b/tutorials/auto_scheduler/tune_network_cuda.py
@@ -299,10 +299,14 @@ print("Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), n
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster
search.
-# 2. If you have multiple target GPUs, you can use all of them for
measurements to
+# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode
distill --i log.json`
+# to distill the large log file and only save the best useful records.
+# 3. You can resume a search from the previous log file. You just need to
+# add a new argument :code:`load_log_file` when creating the task scheduler
+# in function :code:`run_tuning`. Say,
+# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights,
load_log_file=log_file)`
+# 4. If you have multiple target GPUs, you can use all of them for
measurements to
# parallelize the measurements. Check this :ref:`section
<tutorials-autotvm-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in
:code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
-# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode
distill --i log.json`
-# to distill the large log file and only save the best useful records.
diff --git a/tutorials/auto_scheduler/tune_network_x86.py
b/tutorials/auto_scheduler/tune_network_x86.py
index aba75b2..a491759 100644
--- a/tutorials/auto_scheduler/tune_network_x86.py
+++ b/tutorials/auto_scheduler/tune_network_x86.py
@@ -298,10 +298,14 @@ print("Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), n
# 1. During the tuning, the auto-scheduler needs to compile many programs and
# extract feature from them. This part is CPU-intensive,
# so a high-performance CPU with many cores is recommended for faster
search.
-# 2. If you have multiple target CPUs, you can use all of them for
measurements to
+# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode
distill --i log.json`
+# to distill the large log file and only save the best useful records.
+# 3. You can resume a search from the previous log file. You just need to
+# add a new argument :code:`load_log_file` when creating the task scheduler
+# in function :code:`run_tuning`. Say,
+# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights,
load_log_file=log_file)`
+# 4. If you have multiple target CPUs, you can use all of them for
measurements to
# parallelize the measurements. Check this :ref:`section
<tutorials-autotvm-rpc-tracker>`
# to learn how to use the RPC Tracker and RPC Server.
# To use the RPC Tracker in auto-scheduler, replace the runner in
:code:`TuningOptions`
# with :any:`auto_scheduler.RPCRunner`.
-# 3. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode
distill --i log.json`
-# to distill the large log file and only save the best useful records.