AndrewZhaoLuo commented on code in PR #13802:
URL: https://github.com/apache/tvm/pull/13802#discussion_r1088178321
##########
tests/python/frontend/onnx/test_forward.py:
##########
@@ -6707,6 +6707,117 @@ def verify_qlinearsigmoid(a_shape):
verify_qlinearsigmoid([])
[email protected]_targets("llvm")
+def test_random_bernoulli(target, dev):
+ """test_random_bernoulli"""
+
+ def verify_bernoulli(
+ inputs=None,
+ shape=[],
+ in_dtype="float32",
+ out_dtype="int32",
+ seed=None,
+ target=target,
+ dev=dev,
+ use_vm=False,
+ freeze_params=False,
+ rtol=0.1,
+ atol=0.1,
+ in_out_equal=False,
+ ):
+ def get_bernoulli_model(shape, in_dtype="float32", out_dtype="int32",
seed=None):
+ onnx_itype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
+ onnx_otype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_dtype)]
+ node = helper.make_node(
+ "Bernoulli",
+ ["input"],
+ ["output"],
+ )
+ dtype_attr = helper.make_attribute("dtype", onnx_otype)
+ node.attribute.append(dtype_attr)
+ if seed is not None:
+ seed_attr = helper.make_attribute("seed", float(seed))
+ node.attribute.append(seed_attr)
+
+ graph = helper.make_graph(
+ [node],
+ "random_bernoulli_test",
+ inputs=[helper.make_tensor_value_info("input", onnx_itype,
list(shape))],
+ outputs=[helper.make_tensor_value_info("output", onnx_otype,
list(shape))],
+ )
+ return helper.make_model(graph,
producer_name="random_bernoulli_test")
+
+ if inputs is None:
+ assert len(shape) != 0
+ inputs = np.random.uniform(size=shape).astype(in_dtype)
+ else:
+ shape = inputs.shape
+ in_dtype = inputs.dtype
+ model = get_bernoulli_model(shape, in_dtype, out_dtype, seed)
+
+ if use_vm:
+ tvm_out = get_tvm_output_with_vm(
+ model,
+ inputs,
+ target,
+ dev,
+ freeze_params=freeze_params,
+ )
+ else:
+ tvm_out = get_tvm_output(
+ model,
+ inputs,
+ target,
+ dev,
+ )
+
+ if isinstance(tvm_out, list):
+ tvm_out = tvm_out[0]
+ ideal_mean = np.mean(inputs)
+ # check that values are 0 or 1
+ tvm_flat = tvm_out.flatten()
+ for i in range(len(tvm_flat)):
Review Comment:
you can make this vectorized by doing something like
`assert np.array_equal(arr, arr.astype('bool'))`
which will be faster
##########
tests/python/frontend/onnx/test_forward.py:
##########
@@ -6707,6 +6707,117 @@ def verify_qlinearsigmoid(a_shape):
verify_qlinearsigmoid([])
[email protected]_targets("llvm")
+def test_random_bernoulli(target, dev):
+ """test_random_bernoulli"""
+
+ def verify_bernoulli(
+ inputs=None,
+ shape=[],
+ in_dtype="float32",
+ out_dtype="int32",
+ seed=None,
+ target=target,
+ dev=dev,
+ use_vm=False,
+ freeze_params=False,
+ rtol=0.1,
+ atol=0.1,
+ in_out_equal=False,
+ ):
+ def get_bernoulli_model(shape, in_dtype="float32", out_dtype="int32",
seed=None):
+ onnx_itype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
+ onnx_otype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_dtype)]
+ node = helper.make_node(
+ "Bernoulli",
+ ["input"],
+ ["output"],
+ )
+ dtype_attr = helper.make_attribute("dtype", onnx_otype)
+ node.attribute.append(dtype_attr)
+ if seed is not None:
+ seed_attr = helper.make_attribute("seed", float(seed))
+ node.attribute.append(seed_attr)
+
+ graph = helper.make_graph(
+ [node],
+ "random_bernoulli_test",
+ inputs=[helper.make_tensor_value_info("input", onnx_itype,
list(shape))],
+ outputs=[helper.make_tensor_value_info("output", onnx_otype,
list(shape))],
+ )
+ return helper.make_model(graph,
producer_name="random_bernoulli_test")
+
+ if inputs is None:
+ assert len(shape) != 0
+ inputs = np.random.uniform(size=shape).astype(in_dtype)
+ else:
+ shape = inputs.shape
+ in_dtype = inputs.dtype
+ model = get_bernoulli_model(shape, in_dtype, out_dtype, seed)
+
+ if use_vm:
+ tvm_out = get_tvm_output_with_vm(
+ model,
+ inputs,
+ target,
+ dev,
+ freeze_params=freeze_params,
+ )
+ else:
+ tvm_out = get_tvm_output(
+ model,
+ inputs,
+ target,
+ dev,
+ )
+
+ if isinstance(tvm_out, list):
+ tvm_out = tvm_out[0]
+ ideal_mean = np.mean(inputs)
+ # check that values are 0 or 1
+ tvm_flat = tvm_out.flatten()
+ for i in range(len(tvm_flat)):
+ assert tvm_flat[i] == 0 or tvm_flat[i] == 1
+ if in_out_equal:
+ tvm.testing.assert_allclose(inputs, tvm_out)
+ else:
+ # check that mean value is close to the theoretical one by
binomial test
+ bnm_test_res = scipy.stats.binomtest(
+ k=np.sum(tvm_flat, dtype="int32"), n=len(tvm_flat),
p=ideal_mean
+ )
+ assert bnm_test_res.pvalue >= 1e-6
+
+ # Test input sequence of 0 and 1
+ inputs = np.random.randint(2, size=[10000]).astype("float32")
+ verify_bernoulli(inputs, in_out_equal=True)
+
+ # Binomial test input with 0.5 values
+ val_num = 10000
+ arr = [0.5] * val_num
+ inputs = np.array(arr).astype("float32")
+ verify_bernoulli(inputs)
+
+ # Binomial test input with 0.1 values
+ arr = [0.1] * val_num
+ inputs = np.array(arr).astype("float32")
+ verify_bernoulli(inputs)
+
+ # Simple test
+ verify_bernoulli(shape=[1000])
+
+ # Floating output type
+ verify_bernoulli(shape=[1000], out_dtype="float32")
+
+ # Double input type
+ verify_bernoulli(shape=[1000], in_dtype="float64")
+
+ # Test N-D tensor generation
+ verify_bernoulli(shape=[2, 4, 100, 100])
+
+ # Test with seed
+ verify_bernoulli(shape=[1000], seed=np.random.randint(1e6))
Review Comment:
For seed value, I believe you need to check you get the same values every
time you invoke new bernoulli func with the same seed,
##########
tests/python/frontend/onnx/test_forward.py:
##########
@@ -6707,6 +6707,117 @@ def verify_qlinearsigmoid(a_shape):
verify_qlinearsigmoid([])
[email protected]_targets("llvm")
+def test_random_bernoulli(target, dev):
+ """test_random_bernoulli"""
+
+ def verify_bernoulli(
+ inputs=None,
+ shape=[],
+ in_dtype="float32",
+ out_dtype="int32",
+ seed=None,
+ target=target,
+ dev=dev,
+ use_vm=False,
+ freeze_params=False,
+ rtol=0.1,
+ atol=0.1,
+ in_out_equal=False,
+ ):
+ def get_bernoulli_model(shape, in_dtype="float32", out_dtype="int32",
seed=None):
+ onnx_itype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
+ onnx_otype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_dtype)]
+ node = helper.make_node(
+ "Bernoulli",
+ ["input"],
+ ["output"],
+ )
+ dtype_attr = helper.make_attribute("dtype", onnx_otype)
+ node.attribute.append(dtype_attr)
+ if seed is not None:
+ seed_attr = helper.make_attribute("seed", float(seed))
+ node.attribute.append(seed_attr)
+
+ graph = helper.make_graph(
+ [node],
+ "random_bernoulli_test",
+ inputs=[helper.make_tensor_value_info("input", onnx_itype,
list(shape))],
+ outputs=[helper.make_tensor_value_info("output", onnx_otype,
list(shape))],
+ )
+ return helper.make_model(graph,
producer_name="random_bernoulli_test")
+
+ if inputs is None:
+ assert len(shape) != 0
+ inputs = np.random.uniform(size=shape).astype(in_dtype)
+ else:
+ shape = inputs.shape
+ in_dtype = inputs.dtype
+ model = get_bernoulli_model(shape, in_dtype, out_dtype, seed)
+
+ if use_vm:
+ tvm_out = get_tvm_output_with_vm(
+ model,
+ inputs,
+ target,
+ dev,
+ freeze_params=freeze_params,
+ )
+ else:
+ tvm_out = get_tvm_output(
+ model,
+ inputs,
+ target,
+ dev,
+ )
+
+ if isinstance(tvm_out, list):
+ tvm_out = tvm_out[0]
+ ideal_mean = np.mean(inputs)
+ # check that values are 0 or 1
+ tvm_flat = tvm_out.flatten()
+ for i in range(len(tvm_flat)):
+ assert tvm_flat[i] == 0 or tvm_flat[i] == 1
+ if in_out_equal:
+ tvm.testing.assert_allclose(inputs, tvm_out)
+ else:
+ # check that mean value is close to the theoretical one by
binomial test
+ bnm_test_res = scipy.stats.binomtest(
+ k=np.sum(tvm_flat, dtype="int32"), n=len(tvm_flat),
p=ideal_mean
+ )
+ assert bnm_test_res.pvalue >= 1e-6
+
+ # Test input sequence of 0 and 1
+ inputs = np.random.randint(2, size=[10000]).astype("float32")
+ verify_bernoulli(inputs, in_out_equal=True)
+
+ # Binomial test input with 0.5 values
+ val_num = 10000
+ arr = [0.5] * val_num
Review Comment:
nit: recommend np.ones([10000]) * 0.5 for numpy style
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]