icemelon9 commented on a change in pull request #4644: [Relay][AutoTVM] Relay
op strategy
URL: https://github.com/apache/incubator-tvm/pull/4644#discussion_r380302988
##########
File path: topi/python/topi/cuda/conv3d.py
##########
@@ -126,24 +78,55 @@ def schedule_conv3d_ncdhw_cuda(cfg, outs):
s: Schedule
The computation schedule for conv2d.
"""
- target = tvm.target.Target.current()
- if 'cudnn' in target.libs:
- return generic.schedule_extern(outs)
-
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == 'conv3d_ncdhw':
- schedule_direct_3d_cuda(cfg, s, op.output(0))
+ schedule_direct_conv3d_cuda(cfg, s, op.output(0), "NCDHW",
+ "conv3d_ncdhw.cuda")
traverse_inline(s, outs[0].op, _callback)
return s
[email protected]_topi_schedule(generic.schedule_conv3d_ndhwc, ["cuda", "gpu"],
- ["direct"])
-def schedule_conv3d_ndhwc_cuda(cfg, outs):
[email protected]_topi_compute("conv3d_ndhwc.cuda")
+def conv3d_ndhwc(cfg, data, kernel, strides, padding, dilation,
out_dtype='float32'):
+ """Conv3D operator for cuda backend.
+
+ Parameters
+ ----------
+ cfg: ConfigEntity
+ The config for this template
+
+ data : tvm.Tensor
+ 5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Review comment:
fixed.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services