juliusshufan commented on a change in pull request #14893: Integrating the MKL
VML functions to MXNET to speed-up the (element-wised) mathematic computation
URL: https://github.com/apache/incubator-mxnet/pull/14893#discussion_r285436688
##########
File path: src/operator/tensor/elemwise_unary_op.h
##########
@@ -554,14 +560,48 @@ struct ReshapeLikeParam : public
dmlc::Parameter<ReshapeLikeParam> {
NNVM_REGISTER_OP(__name$) \
.set_num_inputs(1) \
.set_num_outputs(1) \
- .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
+ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "The input array.")
+#if MSHADOW_USE_MKL == 1
+ /*! \bried MKL Unary compute.
+ * * With this macro means mxnet compile with MKL to accelerate math
function with mkl.
+ * * Will Register FCompute with UnaryOp::MKL_Compute() to compelet the
math function.
+ */
+ #define MXNET_MKL_OPERATOR_REGISTER_UNARY_WITH_RSP_CSR(__name$, __xpu$,
\
+ __kernel$,
__mkl_kernel$) \
+ MXNET_OPERATOR_REGISTER_UNARY(__name$)
\
+ MXNET_ADD_SPARSE_OP_ALIAS(__name$)
\
+ .set_attr<FInferStorageType>("FInferStorageType", ElemwiseStorageType<1,
1, \
+ false, true, true>)
\
+ .set_attr<FCompute>("FCompute<" #__xpu$ ">",
UnaryOp::MKL_Compute<__kernel$, __mkl_kernel$>) \
+ .set_attr<FComputeEx>("FComputeEx<" #__xpu$ ">",
UnaryOp::MKL_ComputeEx<__kernel$, \
Review comment:
@eric-haibin-lin @TaoLv Friendly ping, May I know your decison on the
sparse part?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services