This is an automated email from the ASF dual-hosted git repository.
lukhut pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 0dd3d4aab6 [ACL][TESTING] Use pytest.mark.parametrize in ACL conv2d
tests (#14011)
0dd3d4aab6 is described below
commit 0dd3d4aab64bd478341748f33082a5506d14a50b
Author: Elen Kalda <[email protected]>
AuthorDate: Thu Feb 16 19:53:03 2023 +0000
[ACL][TESTING] Use pytest.mark.parametrize in ACL conv2d tests (#14011)
Parametrize the test instead of looping over a long list of configs
in the test body.
---
.../contrib/test_arm_compute_lib/test_conv2d.py | 395 +++++++++++----------
1 file changed, 201 insertions(+), 194 deletions(-)
diff --git a/tests/python/contrib/test_arm_compute_lib/test_conv2d.py
b/tests/python/contrib/test_arm_compute_lib/test_conv2d.py
index 8acf3d81d9..df708020bf 100644
--- a/tests/python/contrib/test_arm_compute_lib/test_conv2d.py
+++ b/tests/python/contrib/test_arm_compute_lib/test_conv2d.py
@@ -294,17 +294,9 @@ def _get_expected_codegen(
return inputs
-def test_conv2d():
- Device.load("test_config.json")
-
- if skip_runtime_test():
- return
-
- device = Device()
- np.random.seed(0)
-
- dtype = "float32"
- trials = [
[email protected](
+ "trial",
+ [
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False),
False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True),
False],
@@ -322,9 +314,20 @@ def test_conv2d():
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False),
True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False,
False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True),
True],
- ]
+ ],
+)
+def test_conv2d(trial):
+ Device.load("test_config.json")
- for (
+ if skip_runtime_test():
+ return
+
+ device = Device()
+ np.random.seed(0)
+
+ dtype = "float32"
+
+ (
kernel_h,
kernel_w,
pad,
@@ -334,54 +337,52 @@ def test_conv2d():
shape,
composite,
is_depthwise,
- ) in trials:
- shape = (1, *shape)
- if is_depthwise:
- groups = shape[3]
- else:
- groups = 1
- outputs = []
- inputs = {
- "a": tvm.nd.array(np.random.uniform(-128, 127,
shape).astype(dtype)),
- }
-
- func, params = _get_model(
- shape,
- kernel_h,
- kernel_w,
- pad,
- stride,
- dilation,
- groups,
- dtype,
- out_channels,
- iter(inputs),
- has_pad=composite[0],
- has_bias=composite[1],
- has_activation=composite[2],
- )
- for acl in [False, True]:
- outputs.append(build_and_run(func, inputs, 1, params, device,
enable_acl=acl)[0])
-
- config = {
- "shape": shape,
- "groups": groups,
- "kernel size": (kernel_h, kernel_w),
- "padding": pad,
- "stride": stride,
- "dilation": dilation,
- "out channels": out_channels,
- "composite operators (pad, bias, activation)": composite,
- }
- verify(outputs, atol=0.002, rtol=0.01, config=config)
-
-
-def test_codegen_conv2d():
- if skip_codegen_test():
- return
+ ) = trial
+ shape = (1, *shape)
+ if is_depthwise:
+ groups = shape[3]
+ else:
+ groups = 1
+ outputs = []
+ inputs = {
+ "a": tvm.nd.array(np.random.uniform(-128, 127, shape).astype(dtype)),
+ }
- dtype = "float32"
- trials = [
+ func, params = _get_model(
+ shape,
+ kernel_h,
+ kernel_w,
+ pad,
+ stride,
+ dilation,
+ groups,
+ dtype,
+ out_channels,
+ iter(inputs),
+ has_pad=composite[0],
+ has_bias=composite[1],
+ has_activation=composite[2],
+ )
+ # Generate results for ACL conv2d and TVM native conv2d for comparison
+ for acl in [False, True]:
+ outputs.append(build_and_run(func, inputs, 1, params, device,
enable_acl=acl)[0])
+
+ config = {
+ "shape": shape,
+ "groups": groups,
+ "kernel size": (kernel_h, kernel_w),
+ "padding": pad,
+ "stride": stride,
+ "dilation": dilation,
+ "out channels": out_channels,
+ "composite operators (pad, bias, activation)": composite,
+ }
+ verify(outputs, atol=0.002, rtol=0.01, config=config)
+
+
[email protected](
+ "trial",
+ [
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False),
False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True),
False],
@@ -399,9 +400,15 @@ def test_codegen_conv2d():
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False),
True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False,
False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True),
True],
- ]
+ ],
+)
+def test_codegen_conv2d(trial):
+ if skip_codegen_test():
+ return
- for (
+ dtype = "float32"
+
+ (
kernel_h,
kernel_w,
pad,
@@ -411,40 +418,30 @@ def test_codegen_conv2d():
shape,
composite,
is_depthwise,
- ) in trials:
- shape = (1, *shape)
- if is_depthwise:
- groups = shape[3]
- else:
- groups = 1
- inputs = {"a"}
-
- args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups,
dtype, out_channels)
-
- func, params = _get_model(
- *args,
- var_names=iter(inputs),
- has_pad=composite[0],
- has_bias=composite[1],
- has_activation=composite[2],
- )
- exp_codegen = _get_expected_codegen(
- *args, has_bias=composite[1], has_activation=composite[2]
- )
- verify_codegen(func, exp_codegen, 1)
-
+ ) = trial
+ shape = (1, *shape)
+ if is_depthwise:
+ groups = shape[3]
+ else:
+ groups = 1
+ inputs = {"a"}
[email protected]("dtype", QNN_DTYPES)
-def test_qnn_conv2d(dtype):
- Device.load("test_config.json")
+ args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype,
out_channels)
- if skip_runtime_test():
- return
+ func, params = _get_model(
+ *args,
+ var_names=iter(inputs),
+ has_pad=composite[0],
+ has_bias=composite[1],
+ has_activation=composite[2],
+ )
+ exp_codegen = _get_expected_codegen(*args, has_bias=composite[1],
has_activation=composite[2])
+ verify_codegen(func, exp_codegen, 1)
- device = Device()
- np.random.seed(0)
- trials = [
[email protected](
+ "trial",
+ [
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False),
False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True),
False],
@@ -462,9 +459,19 @@ def test_qnn_conv2d(dtype):
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False),
True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False,
False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True),
True],
- ]
+ ],
+)
[email protected]("dtype", QNN_DTYPES)
+def test_qnn_conv2d(trial, dtype):
+ Device.load("test_config.json")
- for (
+ if skip_runtime_test():
+ return
+
+ device = Device()
+ np.random.seed(0)
+
+ (
kernel_h,
kernel_w,
pad,
@@ -474,74 +481,72 @@ def test_qnn_conv2d(dtype):
shape,
composite,
is_depthwise,
- ) in trials:
- shape = (1, *shape)
- if is_depthwise:
- groups = shape[3]
- else:
- groups = 1
- outputs = []
- inputs = {"a": tvm.nd.array(np.random.uniform(0, 255,
shape).astype(dtype))}
-
- input_zp = 100
- input_sc = 0.5
- kernel_zp = 25
- kernel_sc = 0.03
- output_zp, output_sc = _get_qnn_params(
- input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w,
shape[3]
- )
+ ) = trial
+ shape = (1, *shape)
+ if is_depthwise:
+ groups = shape[3]
+ else:
+ groups = 1
+ outputs = []
+ inputs = {"a": tvm.nd.array(np.random.uniform(0, 255,
shape).astype(dtype))}
+
+ input_zp = 100
+ input_sc = 0.5
+ kernel_zp = 25
+ kernel_sc = 0.03
+ output_zp, output_sc = _get_qnn_params(
+ input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]
+ )
- func, params = _get_qnn_model(
- shape,
- kernel_h,
- kernel_w,
- pad,
- stride,
- dilation,
- groups,
- dtype,
- out_channels,
- input_zp,
- input_sc,
- kernel_zp,
- kernel_sc,
- output_zp,
- output_sc,
- iter(inputs),
- has_pad=composite[0],
- has_bias=composite[1],
- has_activation=composite[2],
- )
- for acl in [False, True]:
- outputs.append(build_and_run(func, inputs, 1, params, device,
enable_acl=acl)[0])
-
- config = {
- "shape": shape,
- "groups": groups,
- "kernel size": (kernel_h, kernel_w),
- "padding": pad,
- "stride": stride,
- "dilation": dilation,
- "out channels": out_channels,
- "composite operators (pad, bias, activation)": composite,
- "input scale": input_sc,
- "input zero point": input_zp,
- "kernel scale": kernel_sc,
- "kernel zero point": kernel_zp,
- "output scale": output_sc,
- "output zero point": output_zp,
- }
-
- atol = 2 if is_depthwise else 1
- verify(outputs, atol=atol, rtol=0, config=config,
verify_saturation=True)
+ func, params = _get_qnn_model(
+ shape,
+ kernel_h,
+ kernel_w,
+ pad,
+ stride,
+ dilation,
+ groups,
+ dtype,
+ out_channels,
+ input_zp,
+ input_sc,
+ kernel_zp,
+ kernel_sc,
+ output_zp,
+ output_sc,
+ iter(inputs),
+ has_pad=composite[0],
+ has_bias=composite[1],
+ has_activation=composite[2],
+ )
+ for acl in [False, True]:
+ outputs.append(build_and_run(func, inputs, 1, params, device,
enable_acl=acl)[0])
+
+ config = {
+ "shape": shape,
+ "groups": groups,
+ "kernel size": (kernel_h, kernel_w),
+ "padding": pad,
+ "stride": stride,
+ "dilation": dilation,
+ "out channels": out_channels,
+ "composite operators (pad, bias, activation)": composite,
+ "input scale": input_sc,
+ "input zero point": input_zp,
+ "kernel scale": kernel_sc,
+ "kernel zero point": kernel_zp,
+ "output scale": output_sc,
+ "output zero point": output_zp,
+ }
+ atol = 2 if is_depthwise else 1
+ verify(outputs, atol=atol, rtol=0, config=config, verify_saturation=True)
[email protected]("dtype", QNN_DTYPES)
-def test_codegen_qnn_conv2d(dtype):
- if skip_codegen_test():
- return
- trials = [
[email protected]("dtype", QNN_DTYPES)
[email protected](
+ "trial",
+ [
# Normal convolution
[2, 2, (1, 1), (1, 1), (1, 1), 4, (10, 10, 14), (False, False, False),
False],
[2, 1, (2, 2), (1, 1), (1, 1), 7, (12, 15, 16), (False, False, True),
False],
@@ -559,9 +564,13 @@ def test_codegen_qnn_conv2d(dtype):
[3, 3, (2, 2), (2, 2), (1, 1), 14, (10, 10, 14), (True, False, False),
True],
[5, 5, (0, 0), (1, 1), (1, 1), 20, (20, 20, 20), (False, False,
False), True],
[3, 3, (1, 1), (2, 2), (1, 1), 14, (10, 10, 14), (False, True, True),
True],
- ]
+ ],
+)
+def test_codegen_qnn_conv2d(trial, dtype):
+ if skip_codegen_test():
+ return
- for (
+ (
kernel_h,
kernel_w,
pad,
@@ -571,41 +580,39 @@ def test_codegen_qnn_conv2d(dtype):
shape,
composite,
is_depthwise,
- ) in trials:
- shape = (1, *shape)
- if is_depthwise:
- groups = shape[3]
- else:
- groups = 1
- inputs = {"a"}
-
- input_zp = 100
- input_sc = 0.5
- kernel_zp = 25
- kernel_sc = 0.03
- output_zp, output_sc = _get_qnn_params(
- input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w,
shape[3]
- )
+ ) = trial
+ shape = (1, *shape)
+ if is_depthwise:
+ groups = shape[3]
+ else:
+ groups = 1
+ inputs = {"a"}
+
+ input_zp = 100
+ input_sc = 0.5
+ kernel_zp = 25
+ kernel_sc = 0.03
+ output_zp, output_sc = _get_qnn_params(
+ input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]
+ )
- args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups,
dtype, out_channels)
-
- func, params = _get_qnn_model(
- *args,
- input_zp=input_zp,
- input_sc=input_sc,
- kernel_zp=kernel_zp,
- kernel_sc=kernel_sc,
- output_zp=output_zp,
- output_sc=output_sc,
- var_names=iter(inputs),
- has_pad=composite[0],
- has_bias=composite[1],
- has_activation=composite[2],
- )
- exp_codegen = _get_expected_codegen(
- *args, has_bias=composite[1], has_activation=composite[2]
- )
- verify_codegen(func, exp_codegen, 1)
+ args = (shape, kernel_h, kernel_w, pad, stride, dilation, groups, dtype,
out_channels)
+
+ func, params = _get_qnn_model(
+ *args,
+ input_zp=input_zp,
+ input_sc=input_sc,
+ kernel_zp=kernel_zp,
+ kernel_sc=kernel_sc,
+ output_zp=output_zp,
+ output_sc=output_sc,
+ var_names=iter(inputs),
+ has_pad=composite[0],
+ has_bias=composite[1],
+ has_activation=composite[2],
+ )
+ exp_codegen = _get_expected_codegen(*args, has_bias=composite[1],
has_activation=composite[2])
+ verify_codegen(func, exp_codegen, 1)
if __name__ == "__main__":