apeforest commented on a change in pull request #15943: Added tests to verify
Large Vector Support for initial set of ops
URL: https://github.com/apache/incubator-mxnet/pull/15943#discussion_r317310379
##########
File path: tests/nightly/test_large_vector.py
##########
@@ -33,6 +35,273 @@ def test_slice():
assert res.shape[0] == MEDIUM_X
+def test_gluon_embedding():
+ m = gluon.nn.Embedding(1, LARGE_Y)
+ m.initialize()
+ a = nd.zeros((LARGE_Y, 1))
+ b = m(a)
+ assert b.shape == (LARGE_Y, 1, LARGE_Y)
+ assert b.asnumpy().size == LARGE_X*2
+
+
+def test_ndarray_zeros():
+ a = nd.zeros(shape=LARGE_X)
+ assert a[-1] == 0
+ assert a.shape == (LARGE_X,)
+ assert a.size == LARGE_X
+
+
+def test_ndarray_ones():
+ a = nd.ones(shape=LARGE_X)
+ assert a[-1] == 1
+ assert nd.sum(a).asnumpy() == LARGE_X
+
+
+@with_seed()
+def test_ndarray_random_uniform():
+ a = nd.random.uniform(shape=LARGE_X)
+ assert a[-1] != 0
+
+
+@with_seed()
+def test_ndarray_random_randint():
+ a = nd.random.randint(100, 10000, shape=LARGE_X)
+ assert a.shape == (LARGE_X,)
+ # check if randint can generate value greater than 2**32 (large)
+ low_large_value = 2**32
+ high_large_value = 2**34
+ a = nd.random.randint(low_large_value, high_large_value, dtype=np.int64)
+ low = mx.nd.array([low_large_value], dtype='int64')
+ high = mx.nd.array([high_large_value], dtype='int64')
+ assert a.__gt__(low) and a.__lt__(high)
+
+
+def test_ndarray_empty():
+ a = nd.empty(LARGE_X)
+ assert a.shape == (LARGE_X,)
+
+
+def test_elementwise():
+ a = nd.ones(shape=LARGE_X)
+ b = nd.ones(shape=LARGE_X)
+ res = a + b
+ assert res[-1].asnumpy() == 2
+ res = a + 1
+ assert res[-1].asnumpy() == 2
+ res = nd.sqrt(a + 8)
+ assert res[-1].asnumpy() == 3
+
+
+def test_reduce():
+ a = nd.ones(shape=(LARGE_X, SMALL_Y))
+ assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1]
+
+
+def test_broadcast():
+ a = nd.ones(shape=(LARGE_X, SMALL_Y*2))
+ b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1)
+ res = nd.broadcast_to(b, shape=(b.shape[0], SMALL_Y*2))
+ assert np.sum(res[-1].asnumpy() == LARGE_X) == res.shape[1]
+ res = mx.nd.broadcast_like(b, a)
+ assert np.sum(res[-1].asnumpy() == LARGE_X) == res.shape[1]
+
+
+def test_clip():
+ a = nd.arange(0, LARGE_X)
+ res = nd.clip(a, a_min=100, a_max=1000)
+ assert np.sum(res[-1].asnumpy() == 1000) == 1
+
+
+def test_argmin():
+ a = nd.arange(0, LARGE_X)
+ idx = mx.nd.argmin(a, axis=0)
+ assert idx.shape[0] == SMALL_Y
+
+
+def test_tile():
+ a = nd.arange(0, LARGE_X)
+ b = nd.tile(a, reps=(1,2))
+ assert b[0][LARGE_X] == b[0][0]
+ assert b[0][LARGE_X-1] == b[0][-1]
+
+
+def test_take():
+ a = nd.ones(shape=LARGE_X)
+ idx = nd.arange(LARGE_X - 1000, LARGE_X)
+ res = nd.take(a, idx)
+ assert np.sum(res.asnumpy() == 1) == res.shape[0]
+
+
+def test_slice_assign():
+ a = nd.ones(shape=LARGE_X)
+ a[LARGE_X-1:LARGE_X] = 1000
+ assert np.sum(a[-1].asnumpy() == 1000) == 1
+
+
+def test_expand_dims():
+ a = nd.ones(shape=LARGE_X)
+ res = nd.expand_dims(a, axis=0)
+ assert res[0][0] == 1
+ assert res.shape == (1, a.shape[0])
+
+
+def test_squeeze():
+ a = nd.ones(shape=LARGE_X)
+ data = nd.expand_dims(a, axis=0)
+ res = nd.squeeze(data)
+ assert a[0] == res[0]
+ assert res.shape == a.shape
+
+
+def test_broadcast_div():
+ a = nd.ones(shape=LARGE_X)
+ b = nd.ones(shape=LARGE_X) * 2
+ res = a / b
+ assert np.sum(res.asnumpy() == 0.5) == a.shape[0]
+
+
+def test_Dense(ctx=mx.cpu(0)):
+ data = mx.nd.ones(shape=LARGE_X)
+ linear = gluon.nn.Dense(2)
+ linear.initialize(ctx=ctx)
+ res = linear(data)
+ res.wait_to_read()
+ assert res.shape == (LARGE_X, 2)
+
+
+def test_pick():
+ a = mx.nd.ones(shape=(LARGE_X, 2))
+ b = mx.nd.ones(shape=LARGE_X)
+ res = mx.nd.pick(a, b)
+ assert res.shape == b.shape
+
+
+def test_depthtospace():
+ def numpy_depth_to_space(x, blocksize):
+ b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
+ tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h,
w])
+ tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
+ y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w *
blocksize])
+ return y
+
+ shape_inp = (LARGE_X, 4, 1, 1)
+ data = rand_ndarray(shape_inp, 'default')
+ data_np = data.asnumpy()
+ expected = numpy_depth_to_space(data_np, 2)
+ output = mx.nd.depth_to_space(data, 2)
+ assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
+
+
+def test_spacetodepth():
Review comment:
Why need for 1D test?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services