szha commented on a change in pull request #19059:
URL: https://github.com/apache/incubator-mxnet/pull/19059#discussion_r485906490



##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -568,6 +572,682 @@ def test_slice_assign():
     B[-1] = 2
     assert B[-1, 0] == 2 and B[-1, 1] == 2
 
+@use_np
[email protected](reason='This test will pass;  will fail if the dim along \
+    the axis is large. Need to add dim check')
+def test_cumsum():
+    A = np.ones((INT_OVERFLOW, 3))
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.cumsum(A, axis=1, dtype='float64')
+        B.backward()
+    assert B.shape == A.shape
+    assert B[-1, -1] == 3
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 3
+    assert A.grad[-1, -1] == 1
+
+@use_np
[email protected](reason='breaks on large tensor')
+def test_cross():
+    A = np.ones((INT_OVERFLOW, 3))
+    B = np.ones((INT_OVERFLOW, 2))
+    A[-1] = np.array([1, 2, 3])
+    B[-1] = np.array([4, 5])
+    A.attach_grad()
+    with mx.autograd.record():
+        C = np.cross(A, B)
+        C.backward()
+    assert C.shape == (INT_OVERFLOW, 3)
+    assert C[0, 0] == -1 and C[0, 1] == 1 and C[0, 2] == 0
+    assert C[-1, 0] == -15 and C[-1, 1] == 12 and C[-1, 2] == -3
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 1 and A.grad[0, 1] == -1 and A.grad[0, 2] == 0
+    assert A.grad[-1, 0] == 5 and A.grad[-1, 1] == -4 and A.grad[-1, 2] == -1
+
+@use_np
+def test_logical_family():
+    def batch_check(x1, x2, funcs):
+        x1.attach_grad()
+        for f in funcs:
+            with mx.autograd.record():
+                y = f(x1, x2)
+                y.backward()
+            assert y.shape == x1.shape
+            assert y[0] == f(x1[0], x2[0])
+            assert x1.grad.shape == x1.shape
+            assert x1.grad[0] == 0
+
+    A = np.zeros((INT_OVERFLOW), dtype='int32')
+    B = np.ones((INT_OVERFLOW), dtype='int32')
+    batch_check(A, B, [np.logical_and, np.logical_or, np.logical_xor])
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.logical_not(B)
+        C.backward()
+    assert C.shape == B.shape
+    assert C[0] == 0
+    assert B.grad.shape == B.shape
+    assert B.grad[0] == 0
+
+@use_np
+def test_deg_rad():
+    # deg2rad is the same thing as radians
+    # rad2deg is the same thing as degrees
+    A = np.zeros((INT_OVERFLOW, 2))
+    A[-1, -1] = 180
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.deg2rad(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[0, 0] == 0
+    assert_almost_equal(B[-1, -1], np.array([np.pi]), rtol=1e-5, atol=1e-5)
+    assert A.grad.shape == A.shape
+    assert_almost_equal(A.grad[0, 0], np.array([1.0 / 180 * np.pi]), 
rtol=1e-5, atol=1e-5)
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.rad2deg(B)
+        C.backward()
+    assert C.shape == B.shape
+    assert C[0, 0] == 0 and C[-1, -1] == 180
+    assert B.grad.shape == B.shape
+    assert_almost_equal(B.grad[0, 0], np.array([180.0 / np.pi]), rtol=1e-5, 
atol=1e-5)
+
+@use_np
[email protected](reason='breaks on large (>=2**31) tensor; times out \
+    on large tensors')
+def test_delete():
+    A = np.zeros((INT_OVERFLOW, 2))
+    A[-1, -1] = 2
+    A[-2, -1] = 1
+    B = np.delete(A, INT_OVERFLOW-1, axis=0)
+    assert B.shape == (INT_OVERFLOW-1, 2)
+    assert B[-1, -1] == 1
+
+@use_np
[email protected](reason='segfault on large tensor ~2**31')
+def test_diff():
+    A = np.zeros((2, INT_OVERFLOW))
+    A[-1, -1] = 100
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.diff(A)
+        B.backward()
+    assert B.shape == (2, INT_OVERFLOW-1)
+    assert B[-1,-1] == 100
+    assert A.grad.shape == A.shape
+    assert A[-1, -1] == 100
+
+@use_np
+def test_divide():
+    # np.divide and np.true_divide are the same thing
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = 10
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.divide(A, np.array([2, 3]))
+        B.backward()
+    assert B.shape == A.shape
+    assert_almost_equal(B[-1, -1], np.array([10 / 3]), rtol=1e-5, atol=1e-5)
+    assert A.grad.shape == A.shape
+    assert_almost_equal(A.grad[-1, -1], np.array([1.0 / 3]), rtol=1e-5, 
atol=1e-5)
+
+@use_np
[email protected](reason='errors out on 2**31')
+# TODO test other split operators: dsplit vsplit hsplit
+def test_split():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[INT_OVERFLOW // 2] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.split(A, 2, axis = 0)
+        B[1].backward()
+    assert B[0].shape == (INT_OVERFLOW // 2, 2)
+    assert B[1][0, 0] == 2
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 0 and A.grad[-1, -1] == 1
+
+@use_np
+def test_minimum():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = -1
+    B = np.zeros((INT_OVERFLOW, 1))
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.minimum(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == -1
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 1 and A.grad[0, 0] == 0
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == 1 and B.grad[0] == 2
+
+@use_np
+def test_maximum():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = -1
+    B = np.zeros((INT_OVERFLOW, 1))
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.maximum(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 0
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0 and A.grad[0, 0] == 1
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == 1 and B.grad[0] == 0
+
+@use_np
+def test_eye():
+    N = 2**16
+    A = np.eye(N)
+    assert A.shape == (N, N)
+    for i in range(N):
+        assert A[i, i] == 1
+    assert A[-1, -2] == 0 and A[0, 1] == 0
+    B = np.eye(N, M=N-1,  k=-1)
+    assert B.shape == (N, N-1)
+    for i in range(1, N):
+        assert B[i, i-1] == 1
+    assert B[0, 0] == 0 and B[-1, -2] == 0
+
+@use_np
+def test_fix():
+    A = np.ones((2, INT_OVERFLOW))
+    A[-1, -1] = -2.9
+    A[0, 0] = 2.9
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.fix(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[0, 0] == 2 and B[-1, -1] == -2
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0
+
+@use_np
+def test_flip():
+    A = np.zeros((2, INT_OVERFLOW))
+    A[0, 0] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.flip(A, axis=0)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[1, 0] == 2
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 1
+    C = np.flip(A, axis=1)
+    assert C[0, -1] == 2
+
+@use_np
+def test_fliplr():
+    A = np.zeros((1, 2, INT_OVERFLOW))
+    A[0, 0, 0] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.fliplr(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[0, 1, 0] == 2
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0, 0] == 1
+
+@use_np
+def test_flipud():
+    A = np.zeros((2, 1, INT_OVERFLOW))
+    A[0, 0, 0] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.flipud(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[1, 0, 0] == 2
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0, 0] == 1
+
+@use_np
+def test_full():
+    A = np.full((INT_OVERFLOW, 2), np.array([1, 2]))
+    assert A.shape == (INT_OVERFLOW, 2)
+    assert A[-1, 0] == 1 and A [-1, 1] == 2
+    B = np.full((2, INT_OVERFLOW), 3)
+    assert B.shape == (2, INT_OVERFLOW)
+    assert B[-1, -1] == 3
+
+@use_np
+def test_full_like():
+    A = np.zeros((INT_OVERFLOW, 2))
+    B = np.full_like(A, 2)
+    assert B.shape == A.shape
+    assert B[-1, -1] == 2
+
+@use_np
+def test_comparison_family():
+    def batch_check(funcs, exp):
+        A.attach_grad()
+        for f, e in zip(funcs, exp):
+            with mx.autograd.record():
+                C = f(A, B)
+                C.backward()
+            assert C.shape == A.shape
+            assert (C[0, 0], C[-1, -1]) == e
+            assert A.grad.shape == A.shape
+            assert A.grad[-1, -1] == 0
+    
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.zeros((INT_OVERFLOW, 2))
+    B[-1, -1] = 1
+    batch_check([np.greater, np.greater_equal, \
+        np.less, np.less_equal, np.equal, np.not_equal], \
+        [(True, False), (True, True), \
+        (False, False), (False, True), (False, True), (True, False)])
+
+@use_np
+def test_lcm():
+    A = np.ones((2, INT_OVERFLOW), dtype='int32')
+    B = np.ones((2, INT_OVERFLOW), dtype='int32')
+    A[-1, -1] = 3
+    B[-1, -1] = 5
+    A.attach_grad()
+    with mx.autograd.record():
+        C = np.lcm(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 15
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0
+
+@use_np
+def test_log_family():
+    def batch_check(funcs, exp):
+        A.attach_grad()
+        for f, e in zip(funcs, exp):
+            with mx.autograd.record():
+                B = f(A)
+                B.backward()
+            assert B.shape == A.shape
+            assert_almost_equal(B[-1, -1], np.array([e[0]]), \
+                rtol=1e-5, atol=1e-5)
+            assert A.grad.shape == A.shape
+            assert_almost_equal(A.grad[-1, -1], np.array([e[1]]), \
+                rtol=1e-5, atol=1e-5)
+
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = 100
+    batch_check([np.log, np.log10, np.log2, np.log1p], \
+        [(4.6051702, 0.01), (2, 0.00434294), \
+        (6.643856, 0.01442695), (4.6151204, 0.00990099)])
+
+@use_np
[email protected](reason='errors out on 2**31')
+def test_empty_like():
+    A = np.zeros((INT_OVERFLOW, 2))
+    B = np.empty_like(A)
+    assert B.shape == A.shape
+    npx.waitall()
+    B[-1, -1] = 1
+    assert B[-1, -1] == 1
+
+@use_np
+def test_expand_dims():
+    A = np.zeros((INT_OVERFLOW))
+    B = np.expand_dims(A, axis=0)
+    C = np.expand_dims(B, axis=2)
+    npx.waitall()
+    assert B.shape == (1, INT_OVERFLOW)
+    assert C.shape == (1, INT_OVERFLOW, 1)
+
+@use_np
+def test_hamming():
+    A = np.hamming((INT_OVERFLOW))
+    npx.waitall()
+    assert A.shape == (INT_OVERFLOW, )
+
+@use_np
+def test_hanning():
+    A = np.hanning((INT_OVERFLOW))
+    npx.waitall()
+    assert A.shape == (INT_OVERFLOW, )
+
+@use_np
[email protected](reason='seg fault on 2**31')
+def test_histogram():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = 2
+    hist, _ = np.histogram(A, np.array([0.5, 1.5, 2.5]))
+    assert hist.shape == (2, )
+    assert hist[0] == int(2 * INT_OVERFLOW - 1) and hist[1] == 1
+
+@use_np
[email protected](reason='backward segfaults on 2**31')
+def test_hypot():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1], B[-1, -1] = 3, 4
+    A.attach_grad()
+    with mx.autograd.record():
+        C = np.hypot(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 5
+    assert A.grad.shape == A.shape
+    assert_almost_equal(A.grad[-1, -1], np.array([0.6]), rtol=1e-5, atol=1e-5)
+
+@use_np
+def test_fmax():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = -1
+    B = np.zeros((INT_OVERFLOW, 1))
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.fmax(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 0
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0 and A.grad[0, 0] == 1
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == 1 and B.grad[0] == 0
+
+@use_np
+def test_fmin():
+    A = np.ones((INT_OVERFLOW, 2))
+    A[-1, -1] = -1
+    B = np.zeros((INT_OVERFLOW, 1))
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.fmin(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == -1
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 1 and A.grad[0, 0] == 0
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == 1 and B.grad[0] == 2
+
+@use_np
+def test_fmod():
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 1))
+    A[-1, -1], B[-1, -1] = 11, 7
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.fmod(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 4
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 1
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == -1 and B.grad[0] == -2
+
+@use_np
+def test_mod():
+    # np.mod and np.remainder are the same thing
+    A = np.ones((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW, 1))
+    A[-1, -1], B[-1, -1] = 11, 7
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.mod(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 4
+    assert A.grad.shape == A.shape
+    assert A.grad[0, 0] == 1
+    assert B.grad.shape == B.shape
+    assert B.grad[-1] == -1 and B.grad[0] == -2
+
+@use_np
+def test_value_check_family():
+    def batch_check(funcs, ref):
+        A.attach_grad()
+        for f, r in zip(funcs, ref):
+            with mx.autograd.record():
+                B = f(A)
+                B.backward()
+            assert B.shape == A.shape
+            for i in range(4):
+                assert B[i, -1] == r[i]
+            assert A.grad.shape == A.shape
+            assert A.grad[-1, -1] == 0
+
+    A = np.zeros((4, INT_OVERFLOW))
+    A[1:, -1] = np.array([np.inf, -np.inf, np.nan])
+    batch_check([np.isinf, np.isneginf, np.isposinf, np.isnan, np.isfinite], \
+        [(False, True, True, False), (False, False, True, False), \
+        (False, True, False, False), (False, False, False, True), \
+        (True, False, False, False)])
+
+@use_np
[email protected](reason='segfaults on 2**31')
+def test_kron():
+    A = np.array([5, 10], dtype="float64")
+    B = np.ones((INT_OVERFLOW), dtype = 'float64')
+    B[-1] = 3
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.kron(A, B)
+        C.backward()
+    assert C.shape == (int(2 * INT_OVERFLOW), )
+    assert C[INT_OVERFLOW-1] == 15 and C[-1] == 30
+    assert A.grad.shape == A.shape and B.grad.shape == B.shape
+    assert A.grad[0] == INT_OVERFLOW + 3 - 1
+    assert B.grad[-1] == 15
+
+@use_np
+def test_rint():
+    A = np.zeros((INT_OVERFLOW, 2))
+    A[0, 0], A[-1, -1] = 2.1,  2.9
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.rint(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[0, 0] == 2 and B[-1, -1] == 3
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0
+
+@use_np
[email protected](reason='segfaults on large tensor ~2**30')
+def test_insert():
+    A = np.zeros((INT_OVERFLOW, 2))
+    B = np.ones((INT_OVERFLOW))
+    B[-1] = 2
+    C = np.insert(A, 1, B, axis=1)
+    assert C.shape == (INT_OVERFLOW, 3)
+    assert C[0, 1] == 1 and C[-1, 1] == 2
+
+@use_np
[email protected](reason='segfaults on large tensor ~2**30')
+# problem might be on total size
+def test_interp():
+    xp = np.array([1, 2, 3])
+    fp = np.array([3, 2, 1])
+    A = np.ones((2, INT_OVERFLOW))
+    A[-1, -1] = 2.5
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.interp(A, xp, fp)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[-1, -1] == 1.5
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0
+
+@use_np
+def test_invert():
+    A = np.zeros((2, INT_OVERFLOW), dtype='uint8')
+    A[-1, -1] = 1
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.invert(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert B[0, 0] == 255 and B[-1, -1] == 254
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 0
+
+@use_np
+def test_exp():
+    A = np.ones((2, INT_OVERFLOW))
+    A[-1, -1] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.exp(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert_almost_equal(B[0, 0], np.array(np.e**1), rtol=1e-5, atol=1e-5)
+    assert_almost_equal(B[-1, -1], np.array(np.e**2), rtol=1e-5, atol=1e-5)
+    assert A.grad.shape == A.shape
+    assert_almost_equal(A.grad[-1, -1], B[-1, -1], rtol=1e-5, atol=1e-5)
+
+@use_np
+def test_expm1():
+    A = np.ones((2, INT_OVERFLOW))
+    A[-1, -1] = 2
+    A.attach_grad()
+    with mx.autograd.record():
+        B = np.expm1(A)
+        B.backward()
+    assert B.shape == A.shape
+    assert_almost_equal(B[0, 0], np.array(np.e**1 - 1), rtol=1e-5, atol=1e-5)
+    assert_almost_equal(B[-1, -1], np.array(np.e**2 - 1), rtol=1e-5, atol=1e-5)
+    assert A.grad.shape == A.shape
+    assert_almost_equal(A.grad[-1, -1], np.array(np.e**2), rtol=1e-5, 
atol=1e-5)
+
+@use_np
[email protected](reason='segfaults on large tensor ~2**30')
+def test_power():
+    INT_OVERFLOW = 2**30
+    A = np.full((2, INT_OVERFLOW), 2)
+    B = np.ones((2, INT_OVERFLOW))
+    B[-1, -1] = 3
+    A.attach_grad()
+    B.attach_grad()
+    with mx.autograd.record():
+        C = np.power(A, B)
+        C.backward()
+    assert C.shape == A.shape
+    assert C[-1, -1] == 8
+    assert A.grad.shape == A.shape
+    assert A.grad[-1, -1] == 12
+    assert B.grad.shape == B.shape
+    assert_almost_equal(B.grad[-1, -1], 2**3 * np.log(2), rtol=1e-5, atol=1e-5)
+
+@use_np
+def test_frexp():
+    A = np.ones((2, INT_OVERFLOW))
+    A[-1, -1] = 9
+    B, C = np.frexp(A)
+    assert_almost_equal(A[-1, -1], B[-1, -1] * 2 ** C[-1, -1], \
+        rtol=1e-5, atol=1e-5)
+
+@use_np
[email protected](reason='segfaults on large tensor ~2**30, likely the same \

Review comment:
       shall we exclude these skipped tests for now then?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to