areusch opened a new issue #9084:
URL: https://github.com/apache/tvm/issues/9084


   ### Branch/PR Failing
   
   main
   
   ### Jenkins Link
   
   
https://ci.tlcpack.ai/blue/organizations/jenkins/tvm/detail/main/1759/pipeline
   
   
   ### Flakiness
   
   Have you seen this multiple times in this branch or in other branches? 
unsure, seems to be flaky on main
   
   
   ________________________________ test_autotune 
_________________________________
   
   
   
       @tvm.testing.requires_micro
   
       def test_autotune():
   
           """Verify that autotune works with micro."""
   
           import tvm.relay as relay
   
       
   
           data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
   
           weight = relay.var("weight", relay.TensorType((8, 3, 5, 5), 
"float32"))
   
           y = relay.nn.conv2d(
   
               data,
   
               weight,
   
               padding=(2, 2),
   
               kernel_size=(5, 5),
   
               kernel_layout="OIHW",
   
               out_dtype="float32",
   
           )
   
           f = relay.Function([data, weight], y)
   
           mod = tvm.IRModule.from_expr(f)
   
           mod = relay.transform.InferType()(mod)
   
       
   
           main_func = mod["main"]
   
           shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in 
main_func.params}
   
           type_dict = {p.name_hint: p.checked_type.dtype for p in 
main_func.params}
   
       
   
           weight_data = 
np.ones(shape_dict["weight"]).astype(type_dict["weight"])
   
           input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
   
           params = {"weight": weight_data}
   
           inputs = {"data": input_data}
   
       
   
           target = tvm.target.target.micro("host")
   
           template_project_dir = 
pathlib.Path(tvm.micro.get_standalone_crt_dir()) / "template" / "host"
   
       
   
           pass_context = tvm.transform.PassContext(opt_level=3, 
config={"tir.disable_vectorize": True})
   
           with pass_context:
   
               tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, 
target)
   
           assert len(tasks) > 0
   
       
   
           module_loader = tvm.micro.AutoTvmModuleLoader(
   
               template_project_dir=template_project_dir,
   
               project_options={},
   
           )
   
           builder = tvm.autotvm.LocalBuilder(
   
               n_parallel=1,
   
               build_kwargs={"build_option": {"tir.disable_vectorize": True}},
   
               do_fork=True,
   
               build_func=tvm.micro.autotvm_build_func,
   
           )
   
           runner = tvm.autotvm.LocalRunner(number=1, repeat=1, 
module_loader=module_loader)
   
       
   
           measure_option = tvm.autotvm.measure_option(builder=builder, 
runner=runner)
   
       
   
           tune_log_file = pathlib.Path("crt_autotune.log")
   
           if tune_log_file.exists():
   
               tune_log_file.unlink()
   
       
   
           num_trials = 10
   
           for task in tasks:
   
               tuner = tvm.autotvm.tuner.GATuner(task)
   
               tuner.tune(
   
                   n_trial=num_trials,
   
                   measure_option=measure_option,
   
                   callbacks=[
   
                       tvm.autotvm.callback.log_to_file(str(tune_log_file)),
   
                       tvm.autotvm.callback.progress_bar(num_trials, 
si_prefix="M"),
   
                   ],
   
                   si_prefix="M",
   
               )
   
               assert tuner.best_flops > 0
   
       
   
   >       check_tune_log(tune_log_file)
   
   
   
   tests/python/unittest/test_crt.py:288: 
   
   _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
_ _ 
   
   
   
   log_path = PosixPath('crt_autotune.log')
   
   
   
       def check_tune_log(log_path: Union[pathlib.Path, str]):
   
           """Read the tuning log and check each result."""
   
           with open(log_path, "r") as f:
   
               lines = f.readlines()
   
       
   
           for line in lines:
   
               if len(line) > 0:
   
                   tune_result = json.loads(line)
   
   >               assert tune_result["result"][0][0] < 1000000000.0
   
   E               AssertionError
   
   
   
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to