wkcn commented on issue #9939: add multi proposal operator (cpu version) and 
fix the bug in proposal op (gpu version)
URL: https://github.com/apache/incubator-mxnet/pull/9939#issuecomment-369797499
 
 
   I wrote a cpu/gpu consistency test for Proposal and MultiProposal.
   I found there is difference between the CPU output and the GPU output for 
mx.nd.contrib.Proposal.
   
   It seems that the index order of the Non-Maximum-Suppression result may be 
different between the CPU implementation and the GPU implementation.
   And another problem is that it may need to add the condition `num_to_keep < 
rpn_post_nms_top_n`
   
https://github.com/apache/incubator-mxnet/blob/master/src/operator/contrib/proposal.cu#L341
   reference: 
https://github.com/apache/incubator-mxnet/blob/master/src/operator/contrib/proposal.cc#L235
   
   Here is the cpu/gpu consistency test.
   ```python
   import mxnet as mx
   import numpy as np
   
   # @with_seed()
   def test_multi_proposal_op():
       # paramters
       feature_stride = 16
       scales = (8, 16, 32)
       ratios = (0.5, 1, 2)
       rpn_pre_nms_top_n = 12000
       rpn_post_nms_top_n = 2000
       threshold = 0.7
       rpn_min_size = 16
   
       feat_len = 14
       H, W = feat_len, feat_len
       num_anchors = len(scales) * len(ratios)
       count_anchors = H * W * num_anchors
   
       def get_new_data(batch_size, ctx):
           '''
           cls_prob: (batch_size, 2 * num_anchors, H, W)
           bbox_pred: (batch_size, 4 * num_anchors, H, W)
           im_info: (batch_size, 3)
           '''
   
           cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = 
np.float32, ctx = ctx)
           bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = 
np.float32, ctx = ctx)
           im_info = mx.nd.empty((batch_size, 3), dtype = np.float32, ctx = ctx)
   
           cls_prob = mx.nd.array(np.random.random(cls_prob.shape), ctx = ctx)
           bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape), ctx = ctx)
   
           for i in range(batch_size):
               im_size = np.random.randint(100, feat_len * feature_stride, size 
= (2,))
               im_scale = np.random.randint(70, 100) / 100.0
               im_info[i, :] = [im_size[0], im_size[1], im_scale]
           return cls_prob, bbox_pred, im_info
   
       def check_proposal_consistency(op, batch_size):
           '''
               op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
           '''
           cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
           rois_cpu, score_cpu = op(
                   cls_score = cls_prob,
                   bbox_pred = bbox_pred,
                   im_info = im_info,
                   feature_stride = feature_stride,
                   scales = scales,
                   ratios = ratios,
                   rpn_pre_nms_top_n = rpn_pre_nms_top_n,
                   rpn_post_nms_top_n = rpn_post_nms_top_n,
                   threshold = threshold,
                   rpn_min_size = rpn_min_size, output_score = True)
   
           gpu_ctx = mx.gpu(0)
   
           # copy data to gpu from cpu
           cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
           bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
           im_info_gpu = im_info.as_in_context(gpu_ctx)
   
           rois_gpu, score_gpu = op(
                   cls_score = cls_prob_gpu,
                   bbox_pred = bbox_pred_gpu,
                   im_info = im_info_gpu,
                   feature_stride = feature_stride,
                   scales = scales,
                   ratios = ratios,
                   rpn_pre_nms_top_n = rpn_pre_nms_top_n,
                   rpn_post_nms_top_n = rpn_post_nms_top_n,
                   threshold = threshold,
                   rpn_min_size = rpn_min_size, output_score = True)
   
           print (rois_cpu.asnumpy(), rois_gpu.asnumpy())
           assert np.allclose(rois_cpu.asnumpy(), rois_gpu.asnumpy())
           assert np.allclose(score_cpu.asnumpy(), score_gpu.asnumpy())
   
       check_proposal_consistency(mx.nd.contrib.Proposal, 1)
       check_proposal_consistency(mx.nd.contrib.MultiProposal, 20)
   
   test_multi_proposal_op()
   print ("test ok")
   ```
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to