masahi commented on a change in pull request #4418: [RUNTIME] Add cudnn conv3d
URL: https://github.com/apache/incubator-tvm/pull/4418#discussion_r351924652
##########
File path: tests/python/contrib/test_cudnn.py
##########
@@ -37,52 +38,127 @@ def verify_conv2d(data_dtype, conv_dtype,
tensor_format=0):
if not tvm.module.enabled("cuda"):
print("skip because cuda is not enabled...")
return
- if not tvm.get_global_func("tvm.contrib.cudnn.conv2d.output_shape", True):
+ if not tvm.get_global_func("tvm.contrib.cudnn.conv.output_shape", True):
print("skip because cudnn is not enabled...")
return
-
- xshape = [batch, in_channel, height, weight]
- wshape = cudnn.conv2d_w_shape(in_channel,
- out_channel,
- filter_h,
- filter_w)
+ if tensor_format == 0:
+ xshape = [batch, in_channel, height, weight]
+ wshape = [out_channel, in_channel, filter_h, filter_w]
+ else:
+ xshape = [batch, height, weight, in_channel]
+ wshape = [out_channel, filter_h, filter_w, in_channel]
X = tvm.placeholder(xshape, name='X', dtype=data_dtype)
W = tvm.placeholder(wshape, name='W', dtype=data_dtype)
- Y = cudnn.conv2d_forward(X,
- W,
- stride_h,
- stride_w,
- pad_h,
- pad_w,
- dilation_h,
- dilation_w,
- conv_mode=1,
- tensor_format=tensor_format,
- conv_dtype=conv_dtype,
- algo=-1)
+ Y = cudnn.conv_forward(X,
+ W,
+ 2,
+ [pad_h, pad_w],
+ [stride_h, stride_w],
+ [dilation_h, dilation_w],
+ conv_mode=1,
+ tensor_format=tensor_format,
+ conv_dtype=conv_dtype,
+ algo=-1)
yshape = [x.value for x in Y.shape]
s = tvm.create_schedule(Y.op)
def verify():
ctx = tvm.gpu(0)
f = tvm.build(s, [X, W, Y], "cuda", target_host="llvm", name="conv2d")
- x = tvm.nd.array(np.random.uniform(-1, 1, xshape).astype(data_dtype),
- ctx)
- w = tvm.nd.array(np.random.uniform(-1, 1, wshape).astype(data_dtype),
- ctx)
- y = tvm.nd.array(np.random.uniform(-1, 1, yshape).astype(data_dtype),
- ctx)
+ x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)
+ w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)
+ y_np = np.zeros(yshape).astype(data_dtype)
+ x = tvm.nd.array(x_np, ctx)
+ w = tvm.nd.array(w_np, ctx)
+ y = tvm.nd.array(y_np, ctx)
+ if tensor_format == 0:
+ c_np = topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1)
+ elif tensor_format == 1:
+ wt = w_np.transpose((1, 2, 3, 0)) #OHWI => HWIO
+ c_np = topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1)
+
f(x, w, y)
+ tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=1e-5, rtol=1e-3)
verify()
def test_conv2d():
verify_conv2d("float32", "float32", tensor_format=0)
verify_conv2d("float16", "float32", tensor_format=1)
- verify_conv2d("float16", "float16", tensor_format=0)
+ #Not pass accuracy test, need check
+ #verify_conv2d("float16", "float16", tensor_format=0)
verify_conv2d("int8", "int32", tensor_format=1)
+def verify_conv3d(data_dtype, conv_dtype, tensor_format=0):
+ in_channel = 4
+ out_channel = 16
+ filter_d = 3
+ filter_h = 3
+ filter_w = 3
+ pad_d = 1
+ pad_h = 1
+ pad_w = 1
+ stride_d = 1
+ stride_h = 1
+ stride_w = 1
+ dilation_d = 1
+ dilation_h = 1
+ dilation_w = 1
+ batch = 3
+ depth = 32
+ height = 32
+ weight = 32
Review comment:
Clean these up using list
Also do the same for conv2d test.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services