jasonyu1996 commented on a change in pull request #12430: [MXNET-882] Support
for N-d arrays added to diag op.
URL: https://github.com/apache/incubator-mxnet/pull/12430#discussion_r217239292
##########
File path: src/operator/tensor/diag_op-inl.h
##########
@@ -104,42 +143,132 @@ inline bool DiagOpType(const nnvm::NodeAttrs& attrs,
return (*out_attrs)[0] != -1;
}
-template<int req>
+template<int ndim, int req, bool back>
struct diag {
template<typename DType>
- MSHADOW_XINLINE static void Map(int i, DType* out, const DType* a,
- mshadow::Shape<2> ishape, int k) {
+ MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* a,
+ mshadow::Shape<ndim> oshape,
+ mshadow::Shape<ndim> ishape,
+ index_t stride, index_t offset,
+ index_t base) {
using namespace mxnet_op;
- int j = 0;
- if (k > 0) {
- j = ravel(mshadow::Shape2(i, i + k), ishape);
- } else if (k < 0) {
- j = ravel(mshadow::Shape2(i - k, i), ishape);
+ index_t idx = i / base;
+ index_t j = ravel(unravel(idx, oshape), ishape) + offset + stride * (i -
idx * base);
+ if (back) {
+ KERNEL_ASSIGN(out[j], req, a[i]);
} else {
- j = ravel(mshadow::Shape2(i, i), ishape);
+ KERNEL_ASSIGN(out[i], req, a[j]);
}
-
- KERNEL_ASSIGN(out[i], req, a[j]);
}
};
-template<int req>
+template<int req, bool back>
struct diag_gen {
template<typename DType>
- MSHADOW_XINLINE static void Map(int i, DType* out, const DType* a,
+ MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* a,
mshadow::Shape<2> oshape, int k) {
using namespace mxnet_op;
auto j = unravel(i, oshape);
if (j[1] == (j[0] + k)) {
auto l = j[0] < j[1] ? j[0] : j[1];
- KERNEL_ASSIGN(out[i], req, a[l]);
- } else {
+ if (back) {
+ KERNEL_ASSIGN(out[l], req, a[i]);
+ } else {
+ KERNEL_ASSIGN(out[i], req, a[l]);
+ }
+ } else if (!back) {
KERNEL_ASSIGN(out[i], req, static_cast<DType>(0));
}
}
};
+template<typename xpu, bool back>
+void DiagOpProcess(const TBlob& in_data,
+ const TBlob& out_data,
+ const TShape& ishape,
+ const TShape& oshape,
+ index_t dsize,
+ const DiagParam& param,
+ mxnet_op::Stream<xpu> *s,
+ const std::vector<OpReqType>& req) {
+ using namespace mxnet_op;
+ using namespace mshadow;
+ if (ishape.ndim() > 1) {
+ // input : (leading + i, body + i, trailing)
+ int x1 = CheckAxis(param.axis1.value(), ishape.ndim());
+ int x2 = CheckAxis(param.axis2.value(), ishape.ndim());
+
+ int idim = ishape.ndim(), odim = oshape.ndim();
+
+ int minx = x1, maxx = x2;
+ if (minx > maxx) {
+ std::swap(minx, maxx);
+ }
+
+ index_t oleading = 1,
+ obody = 1,
+ otrailing = 1;
+
+ for (int i = 0; i < minx; ++i) {
+ oleading *= ishape[i];
+ }
+ for (int i = minx + 1; i < maxx; ++i) {
+ obody *= ishape[i];
+ }
+ for (int i = maxx + 1; i < idim; ++i) {
+ otrailing *= ishape[i];
+ }
+
+ index_t ileading = oleading,
+ ibody = obody * ishape[minx],
+ itrailing = otrailing * ishape[maxx];
+
+ index_t stride1 = itrailing * obody,
+ stride2 = otrailing;
+
+ if (x1 == maxx) {
+ std::swap(stride1, stride2);
+ }
+ index_t offset;
+ int k = param.k.value();
+ if (k > 0) {
+ offset = stride2 * k;
+ } else if (k < 0) {
+ offset = stride1 * -k;
+ } else {
+ offset = 0;
+ }
+
+ MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
+ MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
+ if (back && req[0] != kAddTo && req[0] != kNullOp) {
+ out_data.FlatTo1D<xpu, DType>(s) = 0;
+ }
+ if (ileading == 1) {
+ Kernel<diag<2, req_type, back>, xpu>::Launch(s, dsize,
out_data.dptr<DType>(),
+ in_data.dptr<DType>(), Shape2(obody, otrailing),
+ Shape2(ibody, itrailing),
+ stride1 + stride2, offset, oshape[odim - 1]);
+ } else {
+ Kernel<diag<3, req_type, back>, xpu>::Launch(s, dsize,
out_data.dptr<DType>(),
Review comment:
That is because for efficiency here I have merged some contiguous axes
together, and the resulting shape cannot have more than 3 axes. In the original
shape, there is no need to distinguish two axes if they are not separated by
`axis1` or `axis2`.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services