This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new dd954b4 fix flaky test (ran 50000 times locally) (#11595)
dd954b4 is described below
commit dd954b45a35d5eb9e8cdb6c6e19dd872a0a3d84d
Author: Haibin Lin <[email protected]>
AuthorDate: Fri Jul 6 18:48:57 2018 -0700
fix flaky test (ran 50000 times locally) (#11595)
---
src/operator/contrib/deformable_convolution-inl.h | 2 +-
tests/python/gpu/test_operator_gpu.py | 20 +++++---------------
2 files changed, 6 insertions(+), 16 deletions(-)
diff --git a/src/operator/contrib/deformable_convolution-inl.h
b/src/operator/contrib/deformable_convolution-inl.h
index 20386d4..480f675 100644
--- a/src/operator/contrib/deformable_convolution-inl.h
+++ b/src/operator/contrib/deformable_convolution-inl.h
@@ -231,7 +231,7 @@ class DeformableConvolutionOp : public Operator {
in_grad[conv::kData].shape_, col_buffer.shape_,
param_.kernel, param_.pad, param_.stride, param_.dilate,
param_.num_deformable_group,
in_grad[conv::kOffset].dptr<DType>() + n*input_offset_dim_,
- req[conv::kData]);
+ req[conv::kOffset]);
// gradient w.r.t. input data
deformable_col2im(s, col_buffer.dptr<DType>(),
diff --git a/tests/python/gpu/test_operator_gpu.py
b/tests/python/gpu/test_operator_gpu.py
index 5622bee..f8930e1 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -1483,6 +1483,8 @@ def test_deformable_convolution_with_type():
@with_seed()
def test_deformable_convolution_options():
+ tol = {np.dtype(np.float32): 1e-1,
+ np.dtype(np.float64): 1e-3}
# 2D convolution
# Pad > 0
@@ -1495,13 +1497,9 @@ def test_deformable_convolution_options():
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32,
'deformable_conv_offset': np.float32}},
- # {'ctx': mx.gpu(0),
- # 'deformable_conv_data': (2, 2, 7, 7),
- # 'deformable_offset': (2, 18, 7, 7),
- # 'type_dict': {'deformable_conv_data': np.float16,
'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3),
pad=(1,1), name='deformable_conv')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in
backward), we do not test fp16 here
@@ -1513,13 +1511,9 @@ def test_deformable_convolution_options():
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32,
'deformable_conv_offset': np.float32}},
- # {'ctx': mx.gpu(0),
- # 'deformable_conv_data': (2, 2, 7, 7),
- # 'deformable_conv_offset': (2, 18, 3, 3),
- # 'type_dict': {'deformable_conv_data': np.float16,
'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3),
stride=(2,2), name='deformable_conv')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in
backward), we do not test fp16 here
@@ -1531,13 +1525,9 @@ def test_deformable_convolution_options():
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32,
'deformable_conv_offset': np.float32}},
- # {'ctx': mx.gpu(0),
- # 'deformable_conv_data': (2, 2, 7, 7),
- # 'deformable_conv_offset': (2, 18, 3, 3),
- # 'type_dict': {'deformable_conv_data': np.float16,
'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3),
dilate=(2,2), name='deformable_conv')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in
backward), we do not test fp16 here