[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182811309 ## File path: src/operator/tensor/elemwise_binary_op-inl.h ## @@ -374,6 +374,72 @@ void ElemwiseBinaryOp::CsrCsrOp(mshadow::Stream *s, } } +template +struct ElemwiseDnsZeroKernel { + template + static void inline Map(int i, const OpReqType req, DType* out, const DType* dns_data, + const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { +if (i < num_rows*num_cols) { + KERNEL_ASSIGN(out[i], req, OP::Map(dns_data[i], DType(0.0f))); +} + } +}; + +template +struct ElemwiseDnsCsrDnsKernel { + template + static void inline Map(int i, const OpReqType req, DType* out, DType* dns_data, + const DType* csr_data, const IType* csr_indices, const CType* csr_indptr, + const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { +if (i < num_rows) { + for (int j = csr_indptr[i]; j < csr_indptr[i+1]; ++j) { +KERNEL_ASSIGN(out[i * num_cols + csr_indices[j]], req, + OP::Map(dns_data[i * num_cols + csr_indices[j]], csr_data[j])); + } +} + } +}; + +/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ Review comment: Better to move/add description in elemwise_binary_op.h This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182810306 ## File path: src/operator/elemwise_op_common.h ## @@ -102,6 +102,48 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs, in_attrs, out_attrs); } +template +inline bool ElemwisePreferDenseStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector *in_attrs, + std::vector *out_attrs) { + using namespace common; + CHECK_EQ(in_attrs->size(), 2); + CHECK_EQ(out_attrs->size(), 1); + const auto lhs_stype = (*in_attrs)[0]; + const auto rhs_stype = (*in_attrs)[1]; + bool dispatched = false; + const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; + const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : + DispatchMode::kFComputeEx; + if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { +// dns, dns ... -> dns +dispatched = storage_type_assign(out_attrs, kDefaultStorage, + dispatch_mode, DispatchMode::kFCompute); + } + if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { +// rsp, rsp, ... -> rsp +dispatched = storage_type_assign(out_attrs, kRowSparseStorage, + dispatch_mode, dispatch_ex); + } + if (!dispatched && csr && common::ContainsOnlyStorage(*in_attrs, kCSRStorage)) { Review comment: nit: common::ContainsOnlyStorage -> ContainsOnlyStorage This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182813227 ## File path: src/operator/tensor/elemwise_binary_op-inl.h ## @@ -374,6 +374,72 @@ void ElemwiseBinaryOp::CsrCsrOp(mshadow::Stream *s, } } +template +struct ElemwiseDnsZeroKernel { + template + static void inline Map(int i, const OpReqType req, DType* out, const DType* dns_data, + const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { +if (i < num_rows*num_cols) { + KERNEL_ASSIGN(out[i], req, OP::Map(dns_data[i], DType(0.0f))); +} + } +}; + +template +struct ElemwiseDnsCsrDnsKernel { + template + static void inline Map(int i, const OpReqType req, DType* out, DType* dns_data, + const DType* csr_data, const IType* csr_indices, const CType* csr_indptr, + const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { +if (i < num_rows) { + for (int j = csr_indptr[i]; j < csr_indptr[i+1]; ++j) { +KERNEL_ASSIGN(out[i * num_cols + csr_indices[j]], req, + OP::Map(dns_data[i * num_cols + csr_indices[j]], csr_data[j])); + } +} + } +}; + +/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ +template +void ElemwiseBinaryOp::DnsCsrDnsOp(mshadow::Stream *s, + const nnvm::NodeAttrs , + const OpContext , + const NDArray , + const NDArray , + const OpReqType req, + const NDArray , + const bool reverse) { + using namespace mshadow; + using namespace mxnet_op; + CHECK_EQ(dns.storage_type(), kDefaultStorage); + CHECK_EQ(csr.storage_type(), kCSRStorage); + const nnvm::dim_t num_csr_rows = csr.shape()[0]; + const nnvm::dim_t num_csr_cols = csr.shape()[1]; + mxnet_op::Kernel::Launch( +s, output.data().Size(), req, output.data().dptr(), dns.data().dptr(), +num_csr_rows, num_csr_cols); + TBlob csr_data = csr.data(); + TBlob csr_indices = csr.aux_data(csr::kIdx); + TBlob csr_indptr = csr.aux_data(csr::kIndPtr); + MSHADOW_SGL_DBL_TYPE_SWITCH(csr_data.type_flag_, DataType, { Review comment: The function is already templated. DType === DataType, right? I think the part of the template "typename DType, typename IType, typename CType" can be removed, which makes it easier for other operators to call this function. I noticed that RspRspOp is also templated. We probably canmove the DType switches inside the implementation to make the code cleaner, but that will be out of the scope of this PR. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182810971 ## File path: src/operator/elemwise_op_common.h ## @@ -102,6 +102,48 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs, in_attrs, out_attrs); } +template +inline bool ElemwisePreferDenseStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector *in_attrs, + std::vector *out_attrs) { + using namespace common; + CHECK_EQ(in_attrs->size(), 2); + CHECK_EQ(out_attrs->size(), 1); + const auto lhs_stype = (*in_attrs)[0]; + const auto rhs_stype = (*in_attrs)[1]; + bool dispatched = false; + const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; + const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : + DispatchMode::kFComputeEx; + if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { +// dns, dns ... -> dns +dispatched = storage_type_assign(out_attrs, kDefaultStorage, + dispatch_mode, DispatchMode::kFCompute); + } + if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { +// rsp, rsp, ... -> rsp +dispatched = storage_type_assign(out_attrs, kRowSparseStorage, + dispatch_mode, dispatch_ex); + } + if (!dispatched && csr && common::ContainsOnlyStorage(*in_attrs, kCSRStorage)) { +// csr, csr, ... -> csr +dispatched = storage_type_assign(out_attrs, kCSRStorage, + dispatch_mode, dispatch_ex); + } + if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || + (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { +// dense, csr -> csr / csr, dense -> csr Review comment: I'm not sure what this comment means. Output should be dense?? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182810260 ## File path: src/operator/elemwise_op_common.h ## @@ -102,6 +102,48 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs, in_attrs, out_attrs); } +template +inline bool ElemwisePreferDenseStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector *in_attrs, + std::vector *out_attrs) { + using namespace common; + CHECK_EQ(in_attrs->size(), 2); + CHECK_EQ(out_attrs->size(), 1); + const auto lhs_stype = (*in_attrs)[0]; + const auto rhs_stype = (*in_attrs)[1]; + bool dispatched = false; + const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; + const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : + DispatchMode::kFComputeEx; + if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { Review comment: nit: common::ContainsOnlyStorage -> ContainsOnlyStorage This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r182810702 ## File path: src/operator/elemwise_op_common.h ## @@ -102,6 +102,48 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs, in_attrs, out_attrs); } +template +inline bool ElemwisePreferDenseStorageType(const nnvm::NodeAttrs& attrs, Review comment: This function should be moved to binary_op instead of elemwise_op_common.h This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r181541710 ## File path: src/operator/elemwise_op_common.h ## @@ -73,6 +73,12 @@ inline bool ElemwiseStorageAttr(const nnvm::NodeAttrs& attrs, dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } + if (!dispatched && (((*in_attrs)[0] == kDefaultStorage && (*in_attrs)[1] == kCSRStorage) || + ((*in_attrs)[0] == kCSRStorage && (*in_attrs)[1] == kDefaultStorage))) { +// dense, csr -> csr +dispatched = storage_type_assign(out_attrs, kDefaultStorage, Review comment: 1. Changing `ElemwiseStorageAttr` will affect ALL operators using this function. 2. Unary op also uses this function. There's no guarantee len(in_attr) > 1. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r181541724 ## File path: src/operator/elemwise_op_common.h ## @@ -73,6 +73,12 @@ inline bool ElemwiseStorageAttr(const nnvm::NodeAttrs& attrs, dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } Review comment: As usual, please add unit tests This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors
eric-haibin-lin commented on a change in pull request #10550: [MXNET-320] Support elemwise_add/sub/max/min/hypot between dense and csr tensors URL: https://github.com/apache/incubator-mxnet/pull/10550#discussion_r181541551 ## File path: src/operator/elemwise_op_common.h ## @@ -73,6 +73,12 @@ inline bool ElemwiseStorageAttr(const nnvm::NodeAttrs& attrs, dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } Review comment: As usual, please update the documentation for the sparse operators during registration This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services