haojin2 commented on a change in pull request #17254: [numpy] change unary 
infer type
URL: https://github.com/apache/incubator-mxnet/pull/17254#discussion_r366778186
 
 

 ##########
 File path: src/operator/tensor/elemwise_binary_op.h
 ##########
 @@ -525,6 +525,67 @@ class ElemwiseBinaryOp : public OpBase {
     });
   }
 
+  template<typename xpu, typename OP>
+  static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs,
+                                             const OpContext &ctx,
+                                             const std::vector<TBlob> &inputs,
+                                             const std::vector<OpReqType> &req,
+                                             const std::vector<TBlob> 
&outputs) {
+    using namespace mxnet_op;
+    if (req[0] == kNullOp) return;
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    CHECK_EQ(inputs.size(), 2U);
+    CHECK_EQ(outputs.size(), 1U);
+    if (!mxnet::common::is_float(inputs[1].type_flag_)) {
+      LOG(FATAL) << "Operator " << attrs.op->name <<
+                    " does not support type " << inputs[1].type_flag_;
+    }
+    if (outputs[0].type_flag_ == mshadow::kBool) {
+      LOG(FATAL) << "Operator " << attrs.op->name << " does not support 
boolean type";
+    }
+    MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
+      MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
+        const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), 
inputs[1].Size())
+        + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
+        if (size != 0) {
+          Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
+          outputs[0].dptr<DType>(),
+          inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
+        }
+      });
+    });
+  }
+
+  template<typename xpu, typename OP>
+  static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs,
+                                    const OpContext &ctx,
+                                    const std::vector<TBlob> &inputs,
+                                    const std::vector<OpReqType> &req,
+                                    const std::vector<TBlob> &outputs) {
+    using namespace mxnet_op;
+    if (req[0] == kNullOp) return;
+    Stream<xpu> *s = ctx.get_stream<xpu>();
+    CHECK_EQ(inputs.size(), 3U);
+    CHECK_EQ(outputs.size(), 1U);
+    if (mxnet::common::is_int(outputs[0].type_flag_)) {
+      LOG(FATAL) << "Operator " << attrs.op->name << " does not support int 
type";
+    }
+    if (outputs[0].type_flag_ == mshadow::kBool) {
 
 Review comment:
   this case and the above case could be merged.
   Plus, the error message is not meaningful enough.
   `mshadow::dtype_string(outputs[0].type_flag_)` will give you the 
corresponding string that represents the data type.
   Also the problem with showing the op's name is that it will be something in 
the form of `_backward_npi_xxx`. I think here simply say that "gradient 
computation for xxx type is not supported" is better. ("xxx" should be the 
string returned by call to `dtype_string`)

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to