Lunderberg commented on a change in pull request #8621:
URL: https://github.com/apache/tvm/pull/8621#discussion_r681855158
##########
File path: tests/python/frontend/onnx/test_forward.py
##########
@@ -1404,191 +1424,181 @@ def verify_upsample3d_trilinear():
model = helper.make_model(graph, producer_name="upsample_trilinear_test")
# TODO(jwfromm): Trilinear upsampling not supported in 1.0.0 onnxruntime.
# Replace topi comparison with verify_with_ort once we update.
- for target, dev in tvm.testing.enabled_targets():
- tvm_out = get_tvm_output(model, in_array, target, dev, out_shape,
"float32")
- tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
-
-
[email protected]_gpu
-def test_upsample():
- verify_upsample_nearest()
- verify_upsample_bilinear()
- verify_upsample3d_nearest()
- verify_upsample3d_trilinear()
+ tvm_out = get_tvm_output(model, in_array, target, dev, out_shape,
"float32")
+ tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
-def verify_softmax(inshape, axis):
- opname = "Softmax"
- indata = np.random.uniform(size=inshape).astype(np.float32)
- outshape = inshape
- y = helper.make_node(opname, ["in"], ["out"])
- if axis is not None:
- axis_attr = helper.make_attribute("axis", axis)
- y.attribute.append(axis_attr)
[email protected]_targets
+def test_softmax(target, dev):
+ def verify_softmax(inshape, axis):
+ opname = "Softmax"
+ indata = np.random.uniform(size=inshape).astype(np.float32)
+ outshape = inshape
+ y = helper.make_node(opname, ["in"], ["out"])
+ if axis is not None:
+ axis_attr = helper.make_attribute("axis", axis)
+ y.attribute.append(axis_attr)
- graph = helper.make_graph(
- [y],
- opname + "_test",
- inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT,
list(indata.shape))],
- outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(outshape))],
- )
-
- model = helper.make_model(graph, producer_name=opname + "_test")
- verify_with_ort_with_inputs(model, [indata])
+ graph = helper.make_graph(
+ [y],
+ opname + "_test",
+ inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT,
list(indata.shape))],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(outshape))],
+ )
+ model = helper.make_model(graph, producer_name=opname + "_test")
+ verify_with_ort_with_inputs(model, [indata], target=target, dev=dev)
[email protected]_gpu
-def test_softmax():
verify_softmax((1, 10), None)
verify_softmax((1, 10), 1)
-def verify_min(input_dim):
- dtype = "float32"
-
- a_np1 = np.random.uniform(size=input_dim).astype(dtype)
- a_np2 = np.random.uniform(size=input_dim).astype(dtype)
- a_np3 = np.random.uniform(size=input_dim).astype(dtype)
[email protected]_targets
+def test_forward_min(target, dev):
+ def verify_min(input_dim):
+ dtype = "float32"
- min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
+ a_np1 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np2 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np3 = np.random.uniform(size=input_dim).astype(dtype)
- graph = helper.make_graph(
- [min_node],
- "Min_test",
- inputs=[
- helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
- ],
- outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
- )
+ min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"],
["out"])
- model = helper.make_model(graph, producer_name="Min_test")
- verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
+ graph = helper.make_graph(
+ [min_node],
+ "Min_test",
+ inputs=[
+ helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
+ ],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
+ )
+ model = helper.make_model(graph, producer_name="Min_test")
+ verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3],
target=target, dev=dev)
[email protected]_gpu
-def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
-def verify_max(input_dim):
- dtype = "float32"
-
- a_np1 = np.random.uniform(size=input_dim).astype(dtype)
- a_np2 = np.random.uniform(size=input_dim).astype(dtype)
- a_np3 = np.random.uniform(size=input_dim).astype(dtype)
[email protected]_targets
+def test_forward_max(target, dev):
+ def verify_max(input_dim):
+ dtype = "float32"
- max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
+ a_np1 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np2 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np3 = np.random.uniform(size=input_dim).astype(dtype)
- graph = helper.make_graph(
- [max_node],
- "Max_test",
- inputs=[
- helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
- ],
- outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
- )
+ max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"],
["out"])
- model = helper.make_model(graph, producer_name="Max_test")
- verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
+ graph = helper.make_graph(
+ [max_node],
+ "Max_test",
+ inputs=[
+ helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
+ ],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
+ )
+ model = helper.make_model(graph, producer_name="Max_test")
+ verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3],
target=target, dev=dev)
[email protected]_gpu
-def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
-def verify_mean(input_dim):
- dtype = "float32"
[email protected]_targets
+def test_forward_mean(target, dev):
+ def verify_mean(input_dim):
+ dtype = "float32"
- a_np1 = np.random.uniform(size=input_dim).astype(dtype)
- a_np2 = np.random.uniform(size=input_dim).astype(dtype)
- a_np3 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np1 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np2 = np.random.uniform(size=input_dim).astype(dtype)
+ a_np3 = np.random.uniform(size=input_dim).astype(dtype)
- mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
+ mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"],
["out"])
- graph = helper.make_graph(
- [mean_node],
- "Mean_test",
- inputs=[
- helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
- helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
- ],
- outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
- )
-
- model = helper.make_model(graph, producer_name="Mean_test")
- verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
+ graph = helper.make_graph(
+ [mean_node],
+ "Mean_test",
+ inputs=[
+ helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np2", TensorProto.FLOAT,
list(input_dim)),
+ helper.make_tensor_value_info("a_np3", TensorProto.FLOAT,
list(input_dim)),
+ ],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
+ )
+ model = helper.make_model(graph, producer_name="Mean_test")
+ verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3],
target=target, dev=dev)
[email protected]_gpu
-def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
-def verify_hardsigmoid(input_dim, alpha, beta):
- dtype = "float32"
-
- a_np1 = np.random.uniform(size=input_dim).astype(dtype)
[email protected]_targets
+def test_forward_hardsigmoid(target, dev):
+ def verify_hardsigmoid(input_dim, alpha, beta):
+ dtype = "float32"
- hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"],
alpha=alpha, beta=beta)
+ a_np1 = np.random.uniform(size=input_dim).astype(dtype)
- graph = helper.make_graph(
- [hardsigmoid_node],
- "HardSigmoid_test",
- inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim))],
- outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
- )
+ hardsigmoid_node = helper.make_node(
+ "HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta
+ )
- model = helper.make_model(graph, producer_name="HardSigmoid_test")
- verify_with_ort_with_inputs(model, [a_np1])
+ graph = helper.make_graph(
+ [hardsigmoid_node],
+ "HardSigmoid_test",
+ inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
list(input_dim))],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
list(input_dim))],
+ )
+ model = helper.make_model(graph, producer_name="HardSigmoid_test")
+ verify_with_ort_with_inputs(model, [a_np1], target=target, dev=dev)
[email protected]_gpu
-def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
-def verify_argreduce(input_dim, op_name, axis=None, keepdims=None):
- a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
- out_shape = list(a_np1.shape)
- def_axis = axis if axis is not None else 0
- if keepdims == 1 or keepdims == None:
- out_shape[def_axis] = 1
- else:
- out_shape.pop(def_axis)
-
- node = onnx.helper.make_node(op_name, inputs=["a_np1"], outputs=["out"])
+# TODO (mbrookhart, electriclilies) Fix argmin on GPU and enable this test
[email protected]_targets
+def test_forward_arg_min_max(target, dev):
+ if "cuda" in target:
+ pytest.skip("Fails on CUDA")
Review comment:
Follow-up, it looks like currently the
`@tvm.testing.parametrize_targets` decorator doesn't respect the settings in
`known_failing_targets` or `exclude_targets`. There's a slightly different
code-path between the explicit parametrization and the auto-parametrization.
This is resolved with #8542, which merges these two paths.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]