jcf94 commented on a change in pull request #8336:
URL: https://github.com/apache/tvm/pull/8336#discussion_r661092648
##########
File path: tests/python/topi/python/test_topi_dense.py
##########
@@ -43,108 +54,115 @@
}
-def verify_dense(batch, in_dim, out_dim, use_bias=True):
- A = te.placeholder((batch, in_dim), name="A")
- B = te.placeholder((out_dim, in_dim), name="B")
- C = te.placeholder((out_dim,), name="C")
- dtype = A.dtype
-
- # use memoize to pickle the test data for next time use
- @memoize("topi.tests.test_topi_dense")
- def get_ref_data():
- a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)
- b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)
- c_np = np.random.uniform(size=(out_dim,)).astype(dtype)
- if use_bias:
- d_np = np.maximum(np.dot(a_np, b_np.T) + c_np, 0.0)
- else:
- d_np = np.maximum(np.dot(a_np, b_np.T), 0.0)
- return (a_np, b_np, c_np, d_np)
-
- # get the test data
- a_np, b_np, c_np, d_np = get_ref_data()
-
- def check_device(device, dev):
- print("Running on target: %s" % device)
- for fcompute, fschedule in tvm.topi.testing.dispatch(device,
_dense_implement):
- with tvm.target.Target(device):
- D = fcompute(A, B, C if use_bias else None)
- D = topi.nn.relu(D)
- s = fschedule([D])
- a = tvm.nd.array(a_np, dev)
- b = tvm.nd.array(b_np, dev)
- c = tvm.nd.array(c_np, dev)
- d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype),
dev)
- f = tvm.build(s, [A, B, C, D], device, name="dense")
- f(a, b, c, d)
- tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-5)
-
- for device, dev in tvm.testing.enabled_targets():
- check_device(device, dev)
-
-
-def verify_dense_int8(batch, in_dim, out_dim, use_bias=True):
- dtype = "int8"
- out_dtype = "int32"
- A = te.placeholder((batch, in_dim), name="A", dtype=dtype)
- B = te.placeholder((out_dim, in_dim), name="B", dtype=dtype)
[email protected](cache_return_value=True)
+def dense_ref_data(batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype):
+ if "float" in in_dtype:
+ a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
+ b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
+ c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
+ elif in_dtype == "int8":
+ a_np = np.random.randint(low=-128, high=127, size=(batch_size,
in_dim)).astype(in_dtype)
+ b_np = np.random.randint(low=-128, high=127, size=(out_dim,
in_dim)).astype(in_dtype)
+ c_np = np.random.randint(low=-128, high=127,
size=(out_dim,)).astype(out_dtype)
+ else:
+ raise ValueError("No method to generate test data for data type
'{}'".format(in_dtype))
+
+ matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
+
+ if use_bias:
+ matmul += c_np
+
+ d_np = np.maximum(matmul, 0)
+ return (a_np, b_np, c_np, d_np)
+
+
+def test_dense(
Review comment:
Oh, my bad. That really a helpful feature.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]