szha closed pull request #11595: Fix deformable_convolution req and increase 
tolerance 
URL: https://github.com/apache/incubator-mxnet/pull/11595
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/src/operator/contrib/deformable_convolution-inl.h 
b/src/operator/contrib/deformable_convolution-inl.h
index 20386d47bc6..480f675bdbf 100644
--- a/src/operator/contrib/deformable_convolution-inl.h
+++ b/src/operator/contrib/deformable_convolution-inl.h
@@ -231,7 +231,7 @@ class DeformableConvolutionOp : public Operator {
         in_grad[conv::kData].shape_, col_buffer.shape_,
         param_.kernel, param_.pad, param_.stride, param_.dilate, 
param_.num_deformable_group,
         in_grad[conv::kOffset].dptr<DType>() + n*input_offset_dim_,
-        req[conv::kData]);
+        req[conv::kOffset]);
 
       // gradient w.r.t. input data
       deformable_col2im(s, col_buffer.dptr<DType>(),
diff --git a/tests/python/gpu/test_operator_gpu.py 
b/tests/python/gpu/test_operator_gpu.py
index 45f52b32a34..d3a0547f647 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -1480,6 +1480,8 @@ def test_deformable_convolution_with_type():
 
 @with_seed()
 def test_deformable_convolution_options():
+    tol = {np.dtype(np.float32): 1e-1,
+           np.dtype(np.float64): 1e-3}
     # 2D convolution
 
     # Pad > 0
@@ -1492,13 +1494,9 @@ def test_deformable_convolution_options():
                  'deformable_conv_data': (2, 2, 7, 7),
                  'deformable_conv_offset': (2, 18, 7, 7),
                  'type_dict': {'deformable_conv_data': np.float32, 
'deformable_conv_offset': np.float32}},
-                # {'ctx': mx.gpu(0),
-                #  'deformable_conv_data': (2, 2, 7, 7),
-                #  'deformable_offset': (2, 18, 7, 7),
-                #  'type_dict': {'deformable_conv_data': np.float16, 
'deformable_offset': np.float16}},
                 ]
     sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), 
pad=(1,1), name='deformable_conv')
-    check_consistency(sym, ctx_list)
+    check_consistency(sym, ctx_list, scale=0.1, tol=tol)
 
     # Stride > 1
     # since atomicAdd does not support fp16 (which deformable conv uses in 
backward), we do not test fp16 here
@@ -1510,13 +1508,9 @@ def test_deformable_convolution_options():
                  'deformable_conv_data': (2, 2, 7, 7),
                  'deformable_conv_offset': (2, 18, 3, 3),
                  'type_dict': {'deformable_conv_data': np.float32, 
'deformable_conv_offset': np.float32}},
-                # {'ctx': mx.gpu(0),
-                #  'deformable_conv_data': (2, 2, 7, 7),
-                # 'deformable_conv_offset': (2, 18, 3, 3),
-                #  'type_dict': {'deformable_conv_data': np.float16, 
'deformable_offset': np.float16}},
                 ]
     sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), 
stride=(2,2), name='deformable_conv')
-    check_consistency(sym, ctx_list)
+    check_consistency(sym, ctx_list, scale=0.1, tol=tol)
 
     # Dilate > 1
     # since atomicAdd does not support fp16 (which deformable conv uses in 
backward), we do not test fp16 here
@@ -1528,13 +1522,9 @@ def test_deformable_convolution_options():
                  'deformable_conv_data': (2, 2, 7, 7),
                  'deformable_conv_offset': (2, 18, 3, 3),
                  'type_dict': {'deformable_conv_data': np.float32, 
'deformable_conv_offset': np.float32}},
-                # {'ctx': mx.gpu(0),
-                #  'deformable_conv_data': (2, 2, 7, 7),
-                # 'deformable_conv_offset': (2, 18, 3, 3),
-                #  'type_dict': {'deformable_conv_data': np.float16, 
'deformable_offset': np.float16}},
                 ]
     sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), 
dilate=(2,2), name='deformable_conv')
-    check_consistency(sym, ctx_list)
+    check_consistency(sym, ctx_list, scale=0.1, tol=tol)
 
     # Deformable group > 1
     # since atomicAdd does not support fp16 (which deformable conv uses in 
backward), we do not test fp16 here


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to