FrozenGene commented on a change in pull request #7053:
URL: https://github.com/apache/tvm/pull/7053#discussion_r538060242
##########
File path: python/tvm/auto_scheduler/auto_schedule.py
##########
@@ -210,6 +253,35 @@ def auto_schedule(task, search_policy=None,
tuning_options=TuningOptions()):
if search_policy is None:
cost_model = XGBModel()
search_policy = SketchPolicy(task, cost_model)
+
+ if tuning_options.check_correctness == True:
+ empty_sch, args = task.compute_dag.apply_steps_from_state(
+ task.compute_dag.get_init_state(), layout_rewrite=True)
+ cpu_func = build_module.build(
+ empty_sch, args, target="llvm",
target_host=task.target_host
+ )
+ buffer_path = os.path.join(tuning_options.working_dir, "buffer.pkl")
+ if os.path.exists(buffer_path) is True:
+ with open(buffer_path, "rb") as fi:
+ buffer = pickle.load(fi)
+ if len(buffer) == len(args):
+ # we skip check each arg shape here
+ pass
+ elif len(buffer) == len(args) - 1:
+ # assume only one output
+ np_args =
np.zeros(size=get_const_tuple(args[-1].shape)).astype(args[-1].dtype)
+ cpu_args = [v for _, v in buffer.items()] +
[ndarray.array(np_args, ctx=tvm.cpu())]
+ cpu_func(*cpu_args)
+ ### save cpu result
+ answer = [x.asnumpy() for x in cpu_args]
+ tuning_options.register_buffer(args[-1].name, answer[-1])
+ else:
+ np_args = [np.random.uniform(-0.1, 0.1,
size=get_const_tuple(x.shape)).astype(x.dtype) for x in args]
Review comment:
We should use `random_fill` function as it help us handle different
types. For example, for quantized uint8 dtype, [-0.1, 0.1] of
`np.random.uniform(-0.1, 0.1, size=get_const_tuple(x.shape)).astype(x.dtype)
for x in args` will be all zeros, which is not we want.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]