guberti commented on code in PR #13051:
URL: https://github.com/apache/tvm/pull/13051#discussion_r994222745
##########
tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d.py:
##########
@@ -14,203 +14,125 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-import numpy as np
-import tvm
-import tvm.testing
-from tvm import relay
-from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
-from tvm.micro.testing.aot_test_utils import AOT_CORSTONE300_RUNNER
-
-
-class BasicDepthwiseConv2dTests:
- @tvm.testing.requires_corstone300
- def test_conv2d(
- self,
- data_shape,
- data_layout,
- kernel_size,
- kernel_layout,
- num_filter,
- strides,
- padding,
- dilation,
- dtype,
- schedule_name,
- ):
- """Test a subgraph with a single conv2d operator."""
- ishape = data_shape
- groups = num_filter
-
- assert groups > 1, f"groups should be more than 1 to create a
depthwise conv2d."
-
- if data_layout == "NCHW" and kernel_layout == "OIHW":
- assert (
- num_filter == data_shape[1]
- ), f"Output channels({num_filter}) should be equal to input
channels({data_shape[1]})."
- wshape = (num_filter, data_shape[1] // groups, *kernel_size)
- elif data_layout == "NHWC" and kernel_layout == "HWOI":
- assert (
- num_filter == data_shape[3]
- ), f"Output channels({num_filter}) should be equal to input
channels({data_shape[3]})."
- wshape = (*kernel_size, num_filter, data_shape[3] // groups)
- else:
- raise ValueError(
- f"Incorrect data layout({data_layout}) and kernel
layout({kernel_layout})."
- )
-
- weight_data = np.random.randint(low=-10, high=10, size=wshape,
dtype=dtype)
-
- input0 = relay.var("input", relay.TensorType(ishape, dtype))
- weight0 = relay.const(weight_data)
- out0 = relay.op.nn.conv2d(
- input0,
- weight0,
- kernel_size=kernel_size,
- strides=strides,
- padding=padding,
- groups=groups,
- dilation=(dilation, dilation),
- data_layout=data_layout,
- kernel_layout=kernel_layout,
- out_dtype="int32",
- out_layout=data_layout,
- )
- ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
-
- input1 = relay.var("input", relay.TensorType(ishape, dtype))
- weight1 = relay.const(weight_data)
- out1 = relay.op.nn.conv2d(
- input1,
- weight1,
- kernel_size=kernel_size,
- strides=strides,
- padding=padding,
- groups=groups,
- dilation=(dilation, dilation),
- data_layout=data_layout,
- kernel_layout=kernel_layout,
- out_dtype="int32",
- out_layout=data_layout,
- )
- mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
-
- inputs = {"input": np.random.randint(low=-128, high=127, size=ishape,
dtype=dtype)}
- output_list = generate_ref_data(ref_mod, inputs)
-
- compile_and_run(
- AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
- runner=AOT_CORSTONE300_RUNNER,
- interface_api="c",
- use_unpacked_api=True,
- target_opts={
- "-keys": "arm_cpu",
- "-mcpu": "cortex-m7",
- },
- schedule_name=schedule_name,
- )
-
-
-class TestDepthwiseConv2d_NCHW_OIHW(BasicDepthwiseConv2dTests):
+"""Tests for arm_cpu schedules for depthwise_conv2d."""
+
+from test_generalized_conv2d import GeneralizedConv2dTests
+from tvm.testing import parameter, parameters, main
+
+
+class DepthwiseConv2dTests(GeneralizedConv2dTests):
+ """Helper for constructing depthwise Conv2ds. Sets kernel layout to what
x86 code supports."""
+
+ def setup_method(self):
+ self.ref_kernel_layout = "HWOI"
+
+
+class TestDepthwiseConv2d_NCHW_OIHW(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nchw.arm_cpu schedule."""
- data_shape, kernel_size, num_filter, strides, padding, dilation =
tvm.testing.parameters(
- ((1, 16, 32, 32), (3, 3), 16, 1, 0, 1),
- ((1, 32, 10, 3), (3, 3), 32, 1, 0, 1),
- ((1, 32, 32, 16), (3, 3), 32, 1, (0, 2, 2, 0), 1),
- ((1, 32, 32, 16), (3, 3), 32, 1, 0, 1),
- ((1, 32, 32, 16), (3, 3), 32, 1, 0, 1),
- ((1, 32, 32, 16), (3, 3), 32, 1, (0, 2, 2, 0), 2),
- ((1, 16, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
+ data_shape, groups, kernel_size, num_filter, strides, padding, dilation =
parameters(
+ ((1, 32, 32, 16), 16, (3, 3), 16, 1, 0, 1),
+ ((1, 10, 3, 32), 32, (3, 3), 32, 1, 0, 1),
+ ((1, 32, 16, 32), 32, (3, 3), 32, 1, (0, 2, 2, 0), 1),
+ ((1, 32, 16, 32), 32, (3, 3), 32, 1, 0, 1),
+ ((1, 32, 16, 32), 32, (3, 3), 32, 1, 0, 1),
+ ((1, 32, 16, 32), 32, (3, 3), 32, 1, (0, 2, 2, 0), 2),
+ ((1, 32, 16, 16), 16, (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
- data_layout = tvm.testing.parameter("NCHW")
- dtype = tvm.testing.parameter("int8", "int16")
- kernel_layout = tvm.testing.parameter("OIHW")
- schedule_name = tvm.testing.parameter("depthwise_conv2d_nchw.arm_cpu")
+
+ in_dtype = parameter("int8", "int16")
+ data_layout = parameter("NCHW")
+ kernel_layout = parameter("OIHW")
+ out_layout = parameter("NCHW")
+ schedule_name = parameter("depthwise_conv2d_nchw.arm_cpu")
-class TestDepthwiseConv2d_NHWC_HWOI(BasicDepthwiseConv2dTests):
+class TestDepthwiseConv2d_NHWC_HWOI(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc.generic schedule."""
- data_shape, kernel_size, num_filter, strides, padding, dilation =
tvm.testing.parameters(
- ((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
- ((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
- ((1, 49, 10, 64), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
- ((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
- ((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
- ((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
- ((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
- ((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
+ data_shape, groups, kernel_size, num_filter, strides, padding, dilation =
parameters(
Review Comment:
Being a `depthwise_conv2d` means that the number of groups equals the number
of _input channels_, which might not equal the number of filters. Your point
still stands tho - I've added a TVM fixture to avoid specifying the number of
groups in each line.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]