jwfromm commented on a change in pull request #7823:
URL: https://github.com/apache/tvm/pull/7823#discussion_r616953691
##########
File path: python/tvm/driver/tvmc/autotuner.py
##########
@@ -228,24 +240,137 @@ def drive_tune(args):
args: argparse.Namespace
Arguments from command line parser.
"""
- # extra arguments validation before importing the model, so that obvious
errors
- # are pointed in advance.
- if args.rpc_tracker:
- parsed_url = urlparse("//%s" % args.rpc_tracker)
+ tvmc_model = frontends.load_model(args.FILE, args.model_format,
shape_dict=args.input_shapes)
+ tvmc_model.tuning_records = args.tuning_records
+ # Specify hardware parameters, although they'll only be used if
autoscheduling.
+ hardware_params = auto_scheduler.HardwareParams(
+ args.num_cores,
+ args.vector_unit_bytes,
+ args.cache_line_bytes,
+ args.max_shared_memory_per_block,
+ args.max_local_memory_per_block,
+ args.max_threads_per_block,
+ args.max_vthread_extent,
+ args.warp_size,
+ args.target,
+ args.target_host,
+ )
+
+ tune_model(
+ tvmc_model,
+ args.target,
+ args.output,
+ args.enable_autoscheduler,
+ args.rpc_key,
+ args.rpc_tracker,
+ args.trials,
+ args.target_host,
+ args.tuner,
+ args.min_repeat_ms,
+ args.early_stopping,
+ args.desired_layout,
+ args.timeout,
+ args.number,
+ args.repeat,
+ args.parallel,
+ hardware_params,
+ args.include_simple_tasks,
+ args.log_estimated_latency,
+ )
+
+
+def tune_model(
+ tvmc_model: TVMCModel,
+ target: str,
+ tuning_records: Optional[str] = None,
+ enable_autoscheduler: bool = False,
+ rpc_key: Optional[str] = None,
+ rpc_tracker: Optional[str] = None,
+ trials: Optional[int] = None,
+ target_host: str = "llvm",
+ tuner: str = "xgb",
+ min_repeat_ms: Optional[int] = None,
+ early_stopping: Optional[int] = None,
+ desired_layout: Optional[str] = None,
+ timeout: int = 10,
+ number: int = 10,
+ repeat: int = 1,
+ parallel: int = 4,
+ hardware_params: Optional[HardwareParams] = None,
+ include_simple_tasks: bool = False,
+ log_estimated_latency: bool = False,
+):
+ """Use tuning to automatically optimize the functions in a model.
+
+ Parameters
+ ----------
+ tvmc_model : TVMCModel
+ The model to be optimized.
+ target : str
+ Compilation target as plain string, inline JSON or path to a JSON file.
+ tuning_records: str, optional
+ The path to a file that tuning results will be saved to. If not
specified,
+ a temporary file will be used.
Review comment:
Having `tuning_records` be optional allows very new users to not
interact with files at all. A lot of first time TVM users will just run a
simple script like this:
```
model = tvmc.load('my_model.onnx')
tuning_record = tvmc.tune(model)
package = tvmc.compile(model, tuning_record=tuning_record)
print(tvmc.run(package)
```
If you're just trying TVM out, not having to worry about where to save the
tuning_records or what extension they should have is quite nice. If you guys
don't think this specific case matters we can make the output path required.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]