access2rohit commented on a change in pull request #18932:
URL: https://github.com/apache/incubator-mxnet/pull/18932#discussion_r472437632



##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -76,3 +78,459 @@ def test_softmax():
         true_output = np.full((SMALL_Y, LARGE_X), (1 / input_data.shape[axis]))
         output = npx.softmax(input_data, axis=axis)
         assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, 
atol=1e-5)
+
+'''
+  _ _ _  _ _ __  _ __ _  _
+ | ' \ || | '  \| '_ \ || |
+ |_||_\_,_|_|_|_| .__/\_, |
+                |_|   |__/
+'''
+
+@use_np
+def test_ones():
+    A = np.ones((INT_OVERFLOW, 2))
+    assert A.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_zeros():
+    A = np.zeros((INT_OVERFLOW, 2))
+    assert A.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_abs():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.abs(A)
+    print(B)
+    assert B.shape == (INT_OVERFLOW, 2)
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_absolute():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.absolute(A)
+    print(B)
+    assert B.shape == (INT_OVERFLOW, 2)
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_add():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        C = np.add(A, B)
+    print(C)
+    assert C.shape == (INT_OVERFLOW, 2)
+    C.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+# this will fail; broadcast needs to be fixed
+# TODO add backward test after forward is fixed
+@use_np
[email protected](reason='Does not support large tensor; to be fixed')
+def test_add_broadcast():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 1))
+    C = np.add(A, B)
+    print(C)
+    assert C.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_all():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.all(A)
+    print(B)
+    assert B.asnumpy() == True
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)

Review comment:
       bkwrd value check

##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -76,3 +78,459 @@ def test_softmax():
         true_output = np.full((SMALL_Y, LARGE_X), (1 / input_data.shape[axis]))
         output = npx.softmax(input_data, axis=axis)
         assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, 
atol=1e-5)
+
+'''
+  _ _ _  _ _ __  _ __ _  _
+ | ' \ || | '  \| '_ \ || |
+ |_||_\_,_|_|_|_| .__/\_, |
+                |_|   |__/
+'''
+
+@use_np
+def test_ones():
+    A = np.ones((INT_OVERFLOW, 2))
+    assert A.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_zeros():
+    A = np.zeros((INT_OVERFLOW, 2))
+    assert A.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_abs():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.abs(A)
+    print(B)
+    assert B.shape == (INT_OVERFLOW, 2)
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_absolute():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.absolute(A)
+    print(B)
+    assert B.shape == (INT_OVERFLOW, 2)
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_add():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        C = np.add(A, B)
+    print(C)
+    assert C.shape == (INT_OVERFLOW, 2)
+    C.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+# this will fail; broadcast needs to be fixed
+# TODO add backward test after forward is fixed
+@use_np
[email protected](reason='Does not support large tensor; to be fixed')
+def test_add_broadcast():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 1))
+    C = np.add(A, B)
+    print(C)
+    assert C.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_all():
+    A = np.ones((INT_OVERFLOW, 2))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.all(A)
+    print(B)
+    assert B.asnumpy() == True
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)
+
+@use_np
+def test_amin():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[100][1] = -1
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.amin(A)
+    print(B)
+    assert B.asnumpy() == -1.0
+    B.backward()
+    assert A.grad.shape == (INT_OVERFLOW, 2)

Review comment:
       bkwrd value check




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to