This is an automated email from the ASF dual-hosted git repository.

lmzheng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 28b1577  [AutoScheduler] Fix task extraction (#6965)
28b1577 is described below

commit 28b157727206d05bfdfb5571656ee3239360b59d
Author: Lianmin Zheng <[email protected]>
AuthorDate: Tue Nov 24 03:22:13 2020 -0800

    [AutoScheduler] Fix task extraction (#6965)
    
    * [AutoScheduler] Fix task extraction
    
    * fix
    
    * fix
    
    * trigger CI
---
 python/tvm/relay/backend/compile_engine.py | 10 ++++++----
 python/tvm/relay/op/strategy/cuda.py       |  2 +-
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/python/tvm/relay/backend/compile_engine.py 
b/python/tvm/relay/backend/compile_engine.py
index a3108a7..32affe7 100644
--- a/python/tvm/relay/backend/compile_engine.py
+++ b/python/tvm/relay/backend/compile_engine.py
@@ -186,6 +186,11 @@ def select_implementation(op, attrs, inputs, out_type, 
target, use_autotvm=True)
     all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
     best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
 
+    # Disable autotvm if auto_scheduler is enabled.
+    # (i.e., always return the implementation with the highest priority for 
auto-scheduler).
+    if PassContext.current().config.get("relay.backend.use_auto_scheduler", 
False):
+        use_autotvm = False
+
     # If not use autotvm, always return the implementation with the highest 
priority
     if not use_autotvm:
         logger.info(
@@ -288,10 +293,7 @@ def lower_call(call, inputs, target):
             env.tracing = False
             reenable_tracing = True
 
-    # check if auto_scheduler is enabled, and use pevel to select the 
implementation if so
-    use_auto_scheduler = 
PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
-
-    if not is_dyn and not use_auto_scheduler:
+    if not is_dyn:
         best_impl, outputs = select_implementation(op, call.attrs, inputs, 
ret_type, target)
     else:
         # TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic 
shapes.
diff --git a/python/tvm/relay/op/strategy/cuda.py 
b/python/tvm/relay/op/strategy/cuda.py
index ceaf9dd..f37fc2a 100644
--- a/python/tvm/relay/op/strategy/cuda.py
+++ b/python/tvm/relay/op/strategy/cuda.py
@@ -107,7 +107,7 @@ def naive_schedule(_, outs, target):
         # For GPU, we at least need thread binding to make a valid schedule.
         # So the naive schedule cannot be compiled.
         raise RuntimeError(
-            "Cannot compile for GPU targets if no tuned schedule is found."
+            "Cannot compile for GPU targets if no tuned schedule is found. "
             "Please see the warning messages above for more information about 
the failed workloads."
         )
     return tvm.te.create_schedule(outs[-1].op)

Reply via email to