masahi commented on code in PR #12113:
URL: https://github.com/apache/tvm/pull/12113#discussion_r922605734


##########
python/tvm/meta_schedule/testing/schedule_rule.py:
##########
@@ -114,18 +115,29 @@ def multi_level_tiling(target: Target) -> ScheduleRule:
 
 def multi_level_tiling_tensor_core(
     target: Target,
-    write_reuse_scope="shared",
-    in_dtype="float16",
-    out_dtype="float32",
-    trans_b=False,
+    write_reuse_scope: str = "shared",
+    in_dtype: Union[str, List[str]] = "float16",
+    out_dtype: Union[str, List[str]] = "float32",
+    trans_b: Union[bool, List[bool]] = False,
 ) -> ScheduleRule:
     """Default schedule rules for with multi-level tiling reuse for tensor 
core"""
     assert write_reuse_scope in ["shared", "global"]
+    if not isinstance(in_dtype, list):
+        in_dtype = [in_dtype]
+    if not isinstance(out_dtype, list):
+        out_dtype = [out_dtype]
+    if not isinstance(trans_b, list):
+        trans_b = [trans_b]
+
     if target.kind.name == "cuda":
+        intrin_groups = [
+            tensor_intrin.get_wmma_intrin_group(write_reuse_scope, _in_dtype, 
_out_dtype, _trans_b)
+            for _in_dtype in in_dtype
+            for _out_dtype in out_dtype
+            for _trans_b in trans_b

Review Comment:
   Given a compute, isn't there only one group that's valid? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to