pengzhao-intel commented on a change in pull request #9939: add multi proposal 
operator (cpu version) and fix the bug in proposal op (gpu version)
URL: https://github.com/apache/incubator-mxnet/pull/9939#discussion_r171791480
 
 

 ##########
 File path: src/operator/contrib/multi_proposal.cc
 ##########
 @@ -22,11 +22,253 @@
  * Licensed under The Apache-2.0 License [see LICENSE for details]
  * \file multi_proposal.cc
  * \brief
- * \author Xizhou Zhu
+ * \author Xizhou Zhu, Kan Wu
 */
 
 #include "./multi_proposal-inl.h"
 
+//============================
+// Bounding Box Transform Utils
+//============================
+namespace mxnet {
+namespace op {
+namespace utils {
+
+// bbox prediction and clip to the image borders
+inline void BBoxTransformInv(const mshadow::Tensor<cpu, 2>& boxes,
+                             const mshadow::Tensor<cpu, 3>& deltas,
+                             const float im_height,
+                             const float im_width,
+                             const int real_height,
+                             const int real_width,
+                             mshadow::Tensor<cpu, 2> *out_pred_boxes) {
+  CHECK_GE(boxes.size(1), 4);
+  CHECK_GE(out_pred_boxes->size(1), 4);
+  int anchors = deltas.size(0) / 4;
+  int heights = deltas.size(1);
+  int widths = deltas.size(2);
+
+  for (int a = 0; a < anchors; ++a) {
+    for (int h = 0; h < heights; ++h) {
+      for (int w = 0; w < widths; ++w) {
+        index_t index = h * (widths * anchors) + w * (anchors) + a;
+        float width = boxes[index][2] - boxes[index][0] + 1.0;
+        float height = boxes[index][3] - boxes[index][1] + 1.0;
+        float ctr_x = boxes[index][0] + 0.5 * (width - 1.0);
+        float ctr_y = boxes[index][1] + 0.5 * (height - 1.0);
+
+        float dx = deltas[a*4 + 0][h][w];
+        float dy = deltas[a*4 + 1][h][w];
+        float dw = deltas[a*4 + 2][h][w];
+        float dh = deltas[a*4 + 3][h][w];
+
+        float pred_ctr_x = dx * width + ctr_x;
+        float pred_ctr_y = dy * height + ctr_y;
+        float pred_w = exp(dw) * width;
+        float pred_h = exp(dh) * height;
+
+        float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0);
+        float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0);
+        float pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0);
+        float pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0);
+
+        pred_x1 = std::max(std::min(pred_x1, im_width - 1.0f), 0.0f);
+        pred_y1 = std::max(std::min(pred_y1, im_height - 1.0f), 0.0f);
+        pred_x2 = std::max(std::min(pred_x2, im_width - 1.0f), 0.0f);
+        pred_y2 = std::max(std::min(pred_y2, im_height - 1.0f), 0.0f);
+
+        (*out_pred_boxes)[index][0] = pred_x1;
+        (*out_pred_boxes)[index][1] = pred_y1;
+        (*out_pred_boxes)[index][2] = pred_x2;
+        (*out_pred_boxes)[index][3] = pred_y2;
+
+        if (h >= real_height || w >= real_width) {
+          (*out_pred_boxes)[index][4] = -1.0;
+        }
+      }
+    }
+  }
+}
+
+// iou prediction and clip to the image border
+inline void IoUTransformInv(const mshadow::Tensor<cpu, 2>& boxes,
+                            const mshadow::Tensor<cpu, 3>& deltas,
+                            const float im_height,
+                            const float im_width,
+                            const int real_height,
+                            const int real_width,
+                            mshadow::Tensor<cpu, 2> *out_pred_boxes) {
+  CHECK_GE(boxes.size(1), 4);
+  CHECK_GE(out_pred_boxes->size(1), 4);
+  int anchors = deltas.size(0) / 4;
+  int heights = deltas.size(1);
+  int widths = deltas.size(2);
+
+  for (int a = 0; a < anchors; ++a) {
+    for (int h = 0; h < heights; ++h) {
+      for (int w = 0; w < widths; ++w) {
+        index_t index = h * (widths * anchors) + w * (anchors) + a;
 
 Review comment:
   @cjolivier01 In general, we prefer to parallelize in the outer loop because 
more tasks can be run simultaneously, like task-level parallelization.  When we 
parallelize the inner-loop (not this case), most likely we want to do 
vectorization by OMP (simd).
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to