access2rohit commented on a change in pull request #19059:
URL: https://github.com/apache/incubator-mxnet/pull/19059#discussion_r485261190
##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -176,11 +178,10 @@ def test_amin():
@use_np
def test_amax():
A = np.zeros((INT_OVERFLOW, 2))
- A[100][1] = 1
+ A[-1, -1] = 1
A.attach_grad()
with mx.autograd.record():
B = np.amax(A)
- print(B)
assert B == 1.0
B.backward()
Review comment:
can you move it inside `autograd.record()`
##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -453,13 +455,14 @@ def batch_check(x1, x2, axises, shapes):
# backward not working https://github.com/apache/incubator-mxnet/issues/18952
def test_copysign():
A = np.ones((INT_OVERFLOW, 2))
- #A.attach_grad()
- #with mx.autograd.record():
- B = np.copysign(A, -1)
- assert B.shape == (INT_OVERFLOW, 2)
- assert B[0][0] == -1
- #B.backward()
- #assert A.grad.shape == (INT_OVERFLOW, 2)
+ A.attach_grad()
+ B = np.array([-1])
+ with mx.autograd.record():
+ C = np.copysign(A, B)
+ assert C.shape == (INT_OVERFLOW, 2)
+ assert C[0][0] == -1
+ C.backward()
Review comment:
can you move it inside `autograd.record()`
##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -568,7 +571,686 @@ def test_slice_assign():
B[-1] = 2
assert B[-1, 0] == 2 and B[-1, 1] == 2
+@use_np
[email protected](reason='This test will pass; will fail if the dim along \
+ the axis is large. Need to add dim check')
+def test_cumsum():
+ A = np.ones((INT_OVERFLOW, 3))
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.cumsum(A, axis=1, dtype='float64')
+ assert B.shape == A.shape
+ assert B[-1, -1] == 3
+ B.backward()
Review comment:
same
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]