Zha0q1 commented on a change in pull request #19059:
URL: https://github.com/apache/incubator-mxnet/pull/19059#discussion_r489116957
##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -568,6 +502,517 @@ def test_slice_assign():
B[-1] = 2
assert B[-1, 0] == 2 and B[-1, 1] == 2
+@use_np
+def test_logical_family():
+ def batch_check(x1, x2, funcs):
+ x1.attach_grad()
+ for f in funcs:
+ with mx.autograd.record():
+ y = f(x1, x2)
+ y.backward()
+ assert y.shape == x1.shape
+ assert y[0] == f(x1[0], x2[0])
+ assert x1.grad.shape == x1.shape
+ assert x1.grad[0] == 0
+
+ A = np.zeros((INT_OVERFLOW), dtype='int32')
+ B = np.ones((INT_OVERFLOW), dtype='int32')
+ batch_check(A, B, [np.logical_and, np.logical_or, np.logical_xor])
+ B.attach_grad()
+ with mx.autograd.record():
+ C = np.logical_not(B)
+ C.backward()
+ assert C.shape == B.shape
+ assert C[0] == 0
+ assert B.grad.shape == B.shape
+ assert B.grad[0] == 0
+
+@use_np
+def test_deg_rad():
+ # deg2rad is the same thing as radians
+ # rad2deg is the same thing as degrees
+ A = np.zeros((INT_OVERFLOW, 2))
+ A[-1, -1] = 180
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.deg2rad(A)
+ B.backward()
+ assert B.shape == A.shape
+ assert B[0, 0] == 0
+ assert_almost_equal(B[-1, -1], np.array([np.pi]), rtol=1e-5, atol=1e-5)
+ assert A.grad.shape == A.shape
+ assert_almost_equal(A.grad[0, 0], np.array([1.0 / 180 * np.pi]),
rtol=1e-5, atol=1e-5)
+ B.attach_grad()
+ with mx.autograd.record():
+ C = np.rad2deg(B)
+ C.backward()
+ assert C.shape == B.shape
+ assert C[0, 0] == 0 and C[-1, -1] == 180
+ assert B.grad.shape == B.shape
+ assert_almost_equal(B.grad[0, 0], np.array([180.0 / np.pi]), rtol=1e-5,
atol=1e-5)
+
+@use_np
+def test_divide():
+ # np.divide and np.true_divide are the same thing
+ A = np.ones((INT_OVERFLOW, 2))
+ A[-1, -1] = 10
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.divide(A, np.array([2, 3]))
+ B.backward()
+ assert B.shape == A.shape
+ assert_almost_equal(B[-1, -1], np.array([10 / 3]), rtol=1e-5, atol=1e-5)
+ assert A.grad.shape == A.shape
+ assert_almost_equal(A.grad[-1, -1], np.array([1.0 / 3]), rtol=1e-5,
atol=1e-5)
+
+@use_np
+def test_minimum():
+ A = np.ones((INT_OVERFLOW, 2))
+ A[-1, -1] = -1
+ B = np.zeros((INT_OVERFLOW, 1))
+ A.attach_grad()
+ B.attach_grad()
+ with mx.autograd.record():
+ C = np.minimum(A, B)
+ C.backward()
+ assert C.shape == A.shape
+ assert C[-1, -1] == -1
+ assert A.grad.shape == A.shape
+ assert A.grad[-1, -1] == 1 and A.grad[0, 0] == 0
+ assert B.grad.shape == B.shape
+ assert B.grad[-1] == 1 and B.grad[0] == 2
+
+@use_np
+def test_maximum():
+ A = np.ones((INT_OVERFLOW, 2))
+ A[-1, -1] = -1
+ B = np.zeros((INT_OVERFLOW, 1))
+ A.attach_grad()
+ B.attach_grad()
+ with mx.autograd.record():
+ C = np.maximum(A, B)
+ C.backward()
+ assert C.shape == A.shape
+ assert C[-1, -1] == 0
+ assert A.grad.shape == A.shape
+ assert A.grad[-1, -1] == 0 and A.grad[0, 0] == 1
+ assert B.grad.shape == B.shape
+ assert B.grad[-1] == 1 and B.grad[0] == 0
+
+@use_np
+def test_eye():
+ N = 2**16
+ A = np.eye(N)
+ assert A.shape == (N, N)
+ for i in range(N):
+ assert A[i, i] == 1
+ assert A[-1, -2] == 0 and A[0, 1] == 0
+ B = np.eye(N, M=N-1, k=-1)
+ assert B.shape == (N, N-1)
+ for i in range(1, N):
+ assert B[i, i-1] == 1
+ assert B[0, 0] == 0 and B[-1, -2] == 0
+
+@use_np
+def test_fix():
+ A = np.ones((2, INT_OVERFLOW))
+ A[-1, -1] = -2.9
+ A[0, 0] = 2.9
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.fix(A)
+ B.backward()
+ assert B.shape == A.shape
+ assert B[0, 0] == 2 and B[-1, -1] == -2
+ assert A.grad.shape == A.shape
+ assert A.grad[-1, -1] == 0
+
+@use_np
+def test_flip():
+ A = np.zeros((2, INT_OVERFLOW))
+ A[0, 0] = 2
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.flip(A, axis=0)
+ B.backward()
+ assert B.shape == A.shape
+ assert B[1, 0] == 2
+ assert A.grad.shape == A.shape
+ assert A.grad[0, 0] == 1
+ C = np.flip(A, axis=1)
+ assert C[0, -1] == 2
+
+@use_np
+def test_fliplr():
+ A = np.zeros((1, 2, INT_OVERFLOW))
+ A[0, 0, 0] = 2
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.fliplr(A)
+ B.backward()
+ assert B.shape == A.shape
+ assert B[0, 1, 0] == 2
+ assert A.grad.shape == A.shape
+ assert A.grad[0, 0, 0] == 1
+
+@use_np
+def test_flipud():
+ A = np.zeros((2, 1, INT_OVERFLOW))
+ A[0, 0, 0] = 2
+ A.attach_grad()
+ with mx.autograd.record():
+ B = np.flipud(A)
+ B.backward()
+ assert B.shape == A.shape
+ assert B[1, 0, 0] == 2
+ assert A.grad.shape == A.shape
+ assert A.grad[0, 0, 0] == 1
+
+@use_np
+def test_full():
+ A = np.full((INT_OVERFLOW, 2), np.array([1, 2]))
+ assert A.shape == (INT_OVERFLOW, 2)
+ assert A[-1, 0] == 1 and A [-1, 1] == 2
+ B = np.full((2, INT_OVERFLOW), 3)
+ assert B.shape == (2, INT_OVERFLOW)
+ assert B[-1, -1] == 3
+
+@use_np
+def test_full_like():
+ A = np.zeros((INT_OVERFLOW, 2))
+ B = np.full_like(A, 2)
+ assert B.shape == A.shape
+ assert B[-1, -1] == 2
+
+@use_np
+def test_comparison_family():
+ def batch_check(funcs, exp):
+ A.attach_grad()
+ for f, e in zip(funcs, exp):
+ with mx.autograd.record():
+ C = f(A, B)
+ C.backward()
+ assert C.shape == A.shape
+ assert (C[0, 0], C[-1, -1]) == e
+ assert A.grad.shape == A.shape
+ assert A.grad[-1, -1] == 0
+
+ A = np.ones((INT_OVERFLOW, 2))
+ B = np.zeros((INT_OVERFLOW, 2))
+ B[-1, -1] = 1
+ batch_check([np.greater, np.greater_equal, \
+ np.less, np.less_equal, np.equal, np.not_equal], \
+ [(True, False), (True, True), \
+ (False, False), (False, True), (False, True), (True, False)])
+
+@use_np
+def test_lcm():
+ A = np.ones((2, INT_OVERFLOW), dtype='int32')
+ B = np.ones((2, INT_OVERFLOW), dtype='int32')
+ A[-1, -1] = 3
+ B[-1, -1] = 5
+ A.attach_grad()
+ with mx.autograd.record():
+ C = np.lcm(A, B)
+ C.backward()
+ assert C.shape == A.shape
+ assert C[-1, -1] == 15
+ assert A.grad.shape == A.shape
+ assert A.grad[-1, -1] == 0
+
+@use_np
+def test_log_family():
+ def batch_check(funcs, exp):
+ A.attach_grad()
+ for f, e in zip(funcs, exp):
+ with mx.autograd.record():
+ B = f(A)
+ B.backward()
+ assert B.shape == A.shape
+ assert_almost_equal(B[-1, -1], np.array([e[0]]), \
+ rtol=1e-5, atol=1e-5)
+ assert A.grad.shape == A.shape
+ assert_almost_equal(A.grad[-1, -1], np.array([e[1]]), \
+ rtol=1e-5, atol=1e-5)
+
+ A = np.ones((INT_OVERFLOW, 2))
+ A[-1, -1] = 100
+ batch_check([np.log, np.log10, np.log2, np.log1p], \
+ [(4.6051702, 0.01), (2, 0.00434294), \
+ (6.643856, 0.01442695), (4.6151204, 0.00990099)])
+
+@use_np
+def test_expand_dims():
+ A = np.zeros((INT_OVERFLOW))
+ B = np.expand_dims(A, axis=0)
+ C = np.expand_dims(B, axis=2)
+ npx.waitall()
+ assert B.shape == (1, INT_OVERFLOW)
+ assert C.shape == (1, INT_OVERFLOW, 1)
+
+@use_np
+def test_hamming():
+ A = np.hamming((INT_OVERFLOW))
Review comment:
Good point. I will do a value check according to the formula
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]