This is an automated email from the ASF dual-hosted git repository.

samskalicky pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new b6ab781  Enable large tensor support for numpy cross (#19312)
b6ab781 is described below

commit b6ab781c2267a026a01aafd29667678dbb7300de
Author: Rohit Kumar Srivastava <[email protected]>
AuthorDate: Thu Oct 8 17:48:34 2020 -0700

    Enable large tensor support for numpy cross (#19312)
    
    Co-authored-by: Rohit Kumar Srivastava <[email protected]>
---
 src/operator/numpy/np_cross-inl.h    | 14 +++++++-------
 tests/nightly/test_np_large_array.py | 19 +++++++++++++++++++
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/src/operator/numpy/np_cross-inl.h 
b/src/operator/numpy/np_cross-inl.h
index 23a3a33..ab64564 100644
--- a/src/operator/numpy/np_cross-inl.h
+++ b/src/operator/numpy/np_cross-inl.h
@@ -90,8 +90,8 @@ struct NumpyCrossParam : public 
dmlc::Parameter<NumpyCrossParam> {
 
 struct CrossInAssign {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i, const DType *in_ptr, DType *out_ptr,
-                                  const int stride, const int index, const int 
msize) {
+  MSHADOW_XINLINE static void Map(index_t i, const DType *in_ptr, DType 
*out_ptr,
+                                  const index_t stride, const index_t index, 
const size_t msize) {
     if (index < stride && i * stride + index < msize) {
       out_ptr[i] = in_ptr[i * stride + index];
     }
@@ -101,9 +101,9 @@ struct CrossInAssign {
 template<int req>
 struct CrossOutAssign {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i, const DType *in_ptr, DType *out_ptr,
-                                  const int positive, const int stride,
-                                  const int index, const int msize) {
+  MSHADOW_XINLINE static void Map(index_t i, const DType *in_ptr, DType 
*out_ptr,
+                                  const int positive, const index_t stride,
+                                  const index_t index, const size_t msize) {
     if (index < stride && i * stride + index < msize) {
       KERNEL_ASSIGN(out_ptr[i * stride + index], req, positive == 1 ? 
in_ptr[i] : -in_ptr[i]);
     }
@@ -113,7 +113,7 @@ struct CrossOutAssign {
 template<int req>
 struct ResAssign {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i, const DType *in_data, DType 
*out_data) {
+  MSHADOW_XINLINE static void Map(index_t i, const DType *in_data, DType 
*out_data) {
     KERNEL_ASSIGN(out_data[i], req, in_data[i]);
   }
 };
@@ -153,7 +153,7 @@ inline mxnet::TShape GetMoveaxisShape(const Tuple<int>& 
moveaxis_index,
   const int ndim = org_shape.ndim();
   if (ndim == 0) { return mxnet::TShape(0, 0); }
   CHECK_EQ(moveaxis_index.ndim(), org_shape.ndim()) << "moveaxis index 
dismatch original shape.";
-  std::vector<int> moveaxis_shape_vec(ndim, -1);
+  std::vector<size_t> moveaxis_shape_vec(ndim, -1);
   for (int i = 0; i < ndim; ++i) {
     moveaxis_shape_vec[i] = org_shape[moveaxis_index[i]];
   }
diff --git a/tests/nightly/test_np_large_array.py 
b/tests/nightly/test_np_large_array.py
index be0ccf3..e567080 100644
--- a/tests/nightly/test_np_large_array.py
+++ b/tests/nightly/test_np_large_array.py
@@ -1855,3 +1855,22 @@ def test_round():
     output = np.round(input)
     assert output.shape == (INT_OVERFLOW, 2)
     assert output[-1][0] == 2
+
+
+@use_np
+def test_cross():
+    inp = np.ones((INT_OVERFLOW, 3))
+    inp2 = np.ones((INT_OVERFLOW, 2))
+    inp[-1] = np.array([1, 2, 3])
+    inp2[-1] = np.array([4, 5])
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.cross(inp, inp2)
+        out.backward()
+    assert out.shape == (INT_OVERFLOW, 3)
+    assert out[0, 0] == -1 and out[0, 1] == 1 and out[0, 2] == 0
+    assert out[-1, 0] == -15 and out[-1, 1] == 12 and out[-1, 2] == -3
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[0, 0] == 1 and inp.grad[0, 1] == -1 and inp.grad[0, 2] == 0
+    assert inp.grad[-1, 0] == 5 and inp.grad[-1, 1] == -4 and inp.grad[-1, 2] 
== -1
+

Reply via email to