Wheest commented on a change in pull request #6137:
URL: https://github.com/apache/tvm/pull/6137#discussion_r596998966
##########
File path: python/tvm/relay/op/strategy/x86.py
##########
@@ -205,22 +205,20 @@ def conv2d_strategy_cpu(attrs, inputs, out_type, target):
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
- if not is_auto_scheduler_enabled():
- logger.warning("group_conv2d is not optimized for x86 with
autotvm.")
strategy.add_implementation(
- wrap_compute_conv2d(topi.nn.group_conv2d_nchw,
has_groups=True),
- wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
- name="group_conv2d_nchw.generic",
+ wrap_compute_conv2d(topi.x86.group_conv2d_nchw,
has_groups=True),
+ wrap_topi_schedule(topi.x86.schedule_group_conv2d_nchw),
+ name="group_conv2d_nchw.x86",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
if not is_auto_scheduler_enabled():
logger.warning("group_conv2d is not optimized for x86 with
autotvm.")
- strategy.add_implementation(
- wrap_compute_conv2d(topi.nn.group_conv2d_nhwc,
has_groups=True),
- wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
- name="group_conv2d_nhwc.generic",
- )
+ strategy.add_implementation(
+ wrap_compute_conv2d(topi.nn.group_conv2d_nhwc,
has_groups=True),
+
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
+ name="group_conv2d_nhwc.generic",
+ )
Review comment:
Bug has been fixed by removing the indentation.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]