This is an automated email from the ASF dual-hosted git repository.

apeforest pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 5486828  [Large Tensor] Fixed SoftmaxActivation op (#17634)
5486828 is described below

commit 5486828ea5b5ddcd5d5441533664820aab937edf
Author: Connor Goggins <cgoggi...@gmail.com>
AuthorDate: Thu Feb 20 10:20:08 2020 -0800

    [Large Tensor] Fixed SoftmaxActivation op (#17634)
    
    * Changed dtype for data & gradient dimensions
    
    * Add nightly test
---
 src/operator/nn/softmax_activation-inl.h | 14 +++++++-------
 tests/nightly/test_large_array.py        | 12 ++++++++++++
 2 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/src/operator/nn/softmax_activation-inl.h 
b/src/operator/nn/softmax_activation-inl.h
index 1c67380..9f8e581 100644
--- a/src/operator/nn/softmax_activation-inl.h
+++ b/src/operator/nn/softmax_activation-inl.h
@@ -82,9 +82,9 @@ void SoftmaxActivationCompute(const nnvm::NodeAttrs& attrs,
   } else {
     CHECK_GE(in_data.ndim(), 3)
         << "Input need to have a least 3 dimensions when mode=channel";
-    int n = in_data.size(0);
-    int k = in_data.size(1);
-    Shape<3> s3 = Shape3(n, k, static_cast<int>(in_data.Size()/n/k));
+    index_t n = in_data.size(0);
+    index_t k = in_data.size(1);
+    Shape<3> s3 = Shape3(n, k, static_cast<index_t>(in_data.Size()/n/k));
     Tensor<xpu, 3, real_t> data = in_data.get_with_shape<xpu, 3, real_t>(s3, 
s);
     Tensor<xpu, 3, real_t> out = out_data.get_with_shape<xpu, 3, real_t>(s3, 
s);
     Softmax(out, data);
@@ -107,10 +107,10 @@ void SoftmaxActivationGradCompute(const nnvm::NodeAttrs& 
attrs,
   const OpReqType &req = reqs[0];
   const TBlob &in_grad = outputs[0];
   // Use 3d tensor for both mode -> {instance, channel}. Get shapes
-  int total_size = in_grad.Size();
-  int batch_size = in_grad.shape_[0];
-  int channel_num = in_grad.shape_[1];
-  int rest_size = total_size / (batch_size * channel_num);
+  index_t total_size = in_grad.Size();
+  index_t batch_size = in_grad.shape_[0];
+  index_t channel_num = in_grad.shape_[1];
+  index_t rest_size = total_size / (batch_size * channel_num);
   const Shape<3> data_shape = Shape3(batch_size, channel_num, rest_size);
   // Get tensors
   Stream<xpu> *s = ctx.get_stream<xpu>();
diff --git a/tests/nightly/test_large_array.py 
b/tests/nightly/test_large_array.py
index 379030a..4e09cfe 100644
--- a/tests/nightly/test_large_array.py
+++ b/tests/nightly/test_large_array.py
@@ -127,6 +127,17 @@ def test_nn():
         expected_grad_out[k] = -1
         assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
 
+    def check_softmax_activation():
+        data = nd.random_normal(shape=(2**29, 2, 2, 2))
+        out = nd.random_normal(shape=(2**29, 2, 2, 2))
+
+        res = nd.SoftmaxActivation(data=data, out=out)
+
+        assert res.shape[0] == 536870912
+        assert res.shape[1] == 2
+        assert res.shape[2] == 2
+        assert res.shape[3] == 2
+
     def np_softmax(x, axis=-1, temperature=1.0):
         x = x - np.max(x, axis=axis, keepdims=True)
         x = np.exp(x/temperature)
@@ -450,6 +461,7 @@ def test_nn():
     check_softmax()
     check_softmax_cross_entropy()
     check_softmax_output()
+    check_softmax_activation()
     check_log_softmax()
     check_leaky_relu()
     check_pooling()

Reply via email to