TaoLv commented on a change in pull request #16735: Use single-bit for mask in 
dropout operator
URL: https://github.com/apache/incubator-mxnet/pull/16735#discussion_r360939746
 
 

 ##########
 File path: src/operator/nn/dropout-inl.h
 ##########
 @@ -187,28 +188,101 @@ class DropoutOp {
                                     const index_t N,
                                     const index_t step,
                                     DType *dropout_out,
-                                    DType *mask_out,
+                                    uint8_t *mask_out,
                                     const DType *input_data,
                                     const real_t pkeep) {
       RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
         const real_t rand_num = static_cast<real_t>(genImpl.uniform());
-        mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * 
(1.0f / pkeep);
-        dropout_out[i] = input_data[i] * mask_out[i];
-      });
+        // mask_out is set per bit position
+        // therefore bitwise shift need to be performed here
+        auto maskIdx = i / 8;
+        auto maskOffset = i % 8;
+        bool maskVal = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep);
+        if (maskVal) {
+          // set bit
+          mask_out[maskIdx] |= 1U << maskOffset;
 
 Review comment:
   Is this for loop parallelized?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to