junrushao commented on code in PR #12895:
URL: https://github.com/apache/tvm/pull/12895#discussion_r988320000
##########
python/tvm/contrib/torch/optimize_torch.py:
##########
@@ -62,75 +51,32 @@ def forward(self, *torch_inputs: Tuple[torch.Tensor]):
return ret
-def llvm_target():
- return "llvm -num-cores"
-
-
@register_func("script_torch.save_to_base64")
def save_to_base64(obj) -> bytes:
with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
obj.export_library(tmpfile.name)
- with open(tmpfile.name, "rb") as tfile:
- return base64.b64encode(tfile.read())
-
-
-def tune_relay_auto(
- mod: IRModule,
- target: Union[str, Target],
- config: TuneConfig,
- work_dir: str,
- backend: str = "graph",
- params: Optional[Dict[str, NDArray]] = None,
-) -> Union[Module, vm.Executable]:
- """A wrapper of `tune_relay` but provide a default setting for the config.
-
- Parameters
- ----------
- mod : IRModule
- The module to tune.
- target : Union[str, Target]
- The target to tune for.
- config : TuneConfig
- The search strategy config.
- params : Optional[Dict[str, tvm.runtime.NDArray]]
- The associated parameters of the program
- work_dir : Optional[str]
- The working directory to save intermediate results.
- backend : str = "graph"
- The backend to use for relay compilation(graph / vm).
-
- Returns
- -------
- lib : Union[Module, tvm.runtime.vm.Executable]
- The built runtime module or vm Executable for the given relay workload.
- """
- target = default_config.target(target)
- extracted_tasks = extract_task_from_relay(mod, target, params)
- if config is None:
- config = TuneConfig(
- num_trials_per_iter=16,
- max_trials_global=16 * len(extracted_tasks),
- )
- database = tune_extracted_tasks(extracted_tasks, config, work_dir)
- relay_build = {"graph": relay.build, "vm": relay.vm.compile}[backend]
- with target, autotvm_silencer(), database:
- with PassContext(
- opt_level=3,
- config={
- "relay.backend.use_meta_schedule": True,
- "relay.backend.use_meta_schedule_dispatch": target.kind.name
!= "cuda",
- "relay.backend.tir_converter": "default",
- },
- ):
- return relay_build(mod, target=target, params=params)
+ with open(tmpfile.name, "rb") as temp_file:
+ return base64.b64encode(temp_file.read())
def optimize_torch(
func,
example_inputs,
- tuning_config=None,
- target=None,
+ *,
work_dir=None,
+ target: Union[str, Target] = "cpu",
+ max_trials_global: int,
+ max_trials_per_task: Optional[int] = None,
+ num_trials_per_iter: int = 64,
+ builder: ms.Builder.BuilderType = "local",
+ runner: ms.Runner.RunnerType = "local",
+ database: ms.Database.DatabaseType = "json",
+ cost_model: ms.CostModel.CostModelType = "xgb",
+ measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
+ task_scheduler: ms.TaskScheduler.TaskSchedulerType = "gradient",
+ space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
+ strategy: ms.SearchStrategy.SearchStrategyType = "evolutionary",
Review Comment:
sorry i forgot! thanks for pointing it out!
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]