This is an automated email from the ASF dual-hosted git repository.

mehrdadh pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 62e449cb85 [microTVM][ARM]Add tests for arm schedules (#11472)
62e449cb85 is described below

commit 62e449cb858bde9be0bdd3903f3515916bff0131
Author: Mohamad Katanbaf <[email protected]>
AuthorDate: Wed Jun 1 09:54:10 2022 -0700

    [microTVM][ARM]Add tests for arm schedules (#11472)
    
    * add more tests for arm_cpu schedules
    
    conv1d_ncw, conv1d_nwc, conv2d_NCHWc, depthwise_conv2d_NCHWc, dense_dsp, 
avg_ pool and max_pool tests are added.
    
    Co-authored-by: Mohamad <[email protected]>
---
 .../python/relay/strategy/arm_cpu/test_avg_pool.py | 168 +++++++++++++++++++++
 .../relay/strategy/arm_cpu/test_conv1d_ncw.py      | 117 ++++++++++++++
 .../relay/strategy/arm_cpu/test_conv1d_nwc.py      | 145 ++++++++++++++++++
 .../relay/strategy/arm_cpu/test_conv2d_NCHWc.py    | 138 +++++++++++++++++
 .../relay/strategy/arm_cpu/test_dense_dsp.py       |  90 +++++++++++
 .../arm_cpu/test_depthwise_conv2d_NCHWc.py         | 121 +++++++++++++++
 .../python/relay/strategy/arm_cpu/test_max_pool.py | 135 +++++++++++++++++
 7 files changed, 914 insertions(+)

diff --git a/tests/python/relay/strategy/arm_cpu/test_avg_pool.py 
b/tests/python/relay/strategy/arm_cpu/test_avg_pool.py
new file mode 100644
index 0000000000..31a812b38e
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_avg_pool.py
@@ -0,0 +1,168 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicPoolTests:
+    @tvm.testing.requires_corstone300
+    def test_pool(
+        self,
+        pool_type,
+        shape,
+        dtype,
+        pool_size,
+        strides,
+        padding,
+        dilation,
+        layout,
+        ceil_mode,
+        count_include_pad,
+        schedule_name,
+    ):
+        """Test a subgraph with a single pool operator."""
+        ishape = shape
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+
+        out0 = getattr(relay.op.nn, pool_type)(
+            input0,
+            pool_size=pool_size,
+            strides=strides,
+            dilation=dilation,
+            padding=padding,
+            layout=layout,
+            out_layout="",
+            ceil_mode=ceil_mode,
+            count_include_pad=count_include_pad,
+        )
+
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        out1 = getattr(relay.op.nn, pool_type)(
+            input1,
+            pool_size=pool_size,
+            strides=strides,
+            dilation=dilation,
+            padding=padding,
+            layout=layout,
+            out_layout="",
+            ceil_mode=ceil_mode,
+            count_include_pad=count_include_pad,
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestAvgPool1d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    (
+        shape,
+        pool_size,
+        strides,
+        padding,
+        dilation,
+        layout,
+        ceil_mode,
+        count_include_pad,
+    ) = tvm.testing.parameters(
+        ((3, 32, 27), (3,), (2,), 0, 1, "NCW", False, False),
+        ((3, 32, 27), (3,), (2,), 0, 1, "NWC", False, False),
+        ((3, 32, 27), (3,), (2,), 0, 1, "NCW", True, False),
+        ((3, 32, 27), (3,), (2,), 1, 1, "NCW", False, True),
+        ((1, 1, 32), 3, 1, 0, 1, "NCW", False, False),
+        ((1, 4, 20), 3, 2, 2, 1, "NCW", False, False),
+    )
+    pool_type = tvm.testing.parameter("avg_pool1d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+class TestAvgPool2d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    (
+        shape,
+        pool_size,
+        strides,
+        padding,
+        dilation,
+        layout,
+        ceil_mode,
+        count_include_pad,
+    ) = tvm.testing.parameters(
+        ((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
+        ((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NHWC", False, False),
+        ((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, False),
+        ((2, 27, 27, 16), (3, 3), (2, 2), 0, 1, "NHWC", True, False),
+        ((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, True),
+        ((1, 25, 5, 64), (25, 5), (25, 5), 0, 1, "NHWC", False, False),
+        ((1, 3, 3, 256), (3, 3), (3, 3), 0, 1, "NHWC", False, False),
+        ((1, 8, 8, 64), (8, 8), (8, 8), 0, 1, "NHWC", False, False),
+        ((1, 1, 32, 32), (3, 3), 1, 0, 1, "NCHW", False, False),
+        ((1, 4, 32, 20), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
+    )
+    pool_type = tvm.testing.parameter("avg_pool2d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+class TestAvgPool3d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    (
+        shape,
+        pool_size,
+        strides,
+        padding,
+        dilation,
+        layout,
+        ceil_mode,
+        count_include_pad,
+    ) = tvm.testing.parameters(
+        ((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False, False),
+    )
+    pool_type = tvm.testing.parameter("avg_pool3d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_conv1d_ncw.py 
b/tests/python/relay/strategy/arm_cpu/test_conv1d_ncw.py
new file mode 100644
index 0000000000..0f0507cfe7
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_conv1d_ncw.py
@@ -0,0 +1,117 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicConv1dTests:
+    @tvm.testing.requires_corstone300
+    def test_conv1d(
+        self,
+        data_shape,
+        kernel_size,
+        num_filter,
+        strides,
+        padding,
+        dilation,
+        dtype,
+        schedule_name,
+    ):
+        """Test a subgraph with a single conv1d_ncw operator."""
+        ishape = data_shape
+        wshape = (num_filter, data_shape[1], kernel_size)
+
+        weight_data = np.random.randint(low=-10, high=10, size=wshape, 
dtype=dtype)
+
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight0 = relay.const(weight_data)
+        out0 = relay.op.nn.conv1d(
+            input0,
+            weight0,
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout="NCW",
+            kernel_layout="OIW",
+            out_dtype="int32",
+            out_layout="NCW",
+        )
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight1 = relay.const(weight_data)
+
+        out1 = relay.op.nn.conv1d(
+            input1,
+            weight1,
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout="NCW",
+            kernel_layout="OIW",
+            out_dtype="int32",
+            out_layout="NCW",
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestConv1d_ncw(BasicConv1dTests):
+    """This test is for conv1d_ncw.generic schedule."""
+
+    data_shape, kernel_size, num_filter, strides, padding, dilation = 
tvm.testing.parameters(
+        ((4, 32, 16), 3, 12, 1, 0, 1),
+        ((4, 16, 32), 3, 12, 1, 0, 1),
+        ((1, 12, 32), 3, 16, 1, 0, 1),
+        ((3, 10, 12), 4, 24, 1, 0, 1),
+        ((1, 7, 7), 3, 5, 1, 0, 1),
+        ((1, 2, 10), 4, 4, 2, (1, 1), 1),
+        ((1, 2, 20), 4, 4, 2, (0, 1), 1),
+        ((1, 4, 16), 1, 12, 1, (1, 0), 1),
+        ((1, 16, 24), 1, 32, 3, (2, 2), 1),
+    )
+    dtype = tvm.testing.parameter("int8", "int16")
+    data_layout = tvm.testing.parameter("NCW")
+    schedule_name = tvm.testing.parameter("conv1d_ncw.generic")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_conv1d_nwc.py 
b/tests/python/relay/strategy/arm_cpu/test_conv1d_nwc.py
new file mode 100644
index 0000000000..e430ade2fa
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_conv1d_nwc.py
@@ -0,0 +1,145 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicConv1dTests:
+    @tvm.testing.requires_corstone300
+    def test_conv1d(
+        self,
+        data_shape,
+        kernel_size,
+        kernel_layout,
+        num_filter,
+        strides,
+        padding,
+        dilation,
+        dtype,
+        schedule_name,
+    ):
+        """Test a subgraph with a single conv1d_nwc operator."""
+        ishape = data_shape
+        wshape = (kernel_size, data_shape[-1], num_filter)
+        weight_data = np.random.randint(low=-10, high=10, size=wshape, 
dtype=dtype)
+
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight0 = relay.const(weight_data)
+        out0 = relay.op.nn.conv1d(
+            input0,
+            weight0,
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout="NWC",
+            kernel_layout="WIO",
+            out_dtype="int32",
+            out_layout="NWC",
+        )
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+
+        if kernel_layout == "WOI":
+            weight1 = relay.const(np.moveaxis(weight_data, 1, -1))
+        else:
+            weight1 = relay.const(weight_data)
+
+        out1 = relay.op.nn.conv1d(
+            input1,
+            weight1,
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout="NWC",
+            kernel_layout=kernel_layout,
+            out_dtype="int32",
+            out_layout="NWC",
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestConv1d_dsp(BasicConv1dTests):
+    """This test is for conv1d_dsp schedule."""
+
+    data_shape, kernel_size, num_filter, strides, padding, dilation = 
tvm.testing.parameters(
+        ((4, 32, 16), 3, 12, 1, 0, 1),
+        ((4, 16, 32), 3, 12, 1, 0, 1),
+        ((4, 32, 16), 3, 12, 1, 0, 1),
+        ((1, 32, 12), 3, 16, 1, 0, 1),
+        # TODO: The following 4 tests fail due to 
https://github.com/apache/tvm/issues/11466
+        # ((3, 12, 10), 4, 24, 1, 0, 1),
+        # ((1, 7, 7), 3, 5, 1, 0, 1),
+        # ((1, 10, 2), 4, 4, 2, (1, 1), 1),
+        # ((1, 20, 2), 4, 4, 2, (0, 1), 1),
+        ((1, 16, 4), 1, 12, 1, (1, 0), 1),
+        ((1, 24, 16), 1, 32, 3, (2, 2), 1),
+    )
+    dtype = tvm.testing.parameter("int8", "int16")
+    data_layout = tvm.testing.parameter("NWC")
+    kernel_layout = tvm.testing.parameter("WOI")
+    schedule_name = tvm.testing.parameter("conv1d_dsp")
+
+
+class TestConv1d_nwc(BasicConv1dTests):
+    """This test is for conv1d_nwc.generic schedule."""
+
+    data_shape, kernel_size, num_filter, strides, padding, dilation = 
tvm.testing.parameters(
+        ((4, 32, 16), 3, 12, 1, 0, 1),
+        ((4, 16, 32), 3, 12, 1, 0, 1),
+        ((4, 32, 16), 3, 12, 1, 0, 1),
+        ((1, 32, 12), 3, 16, 1, 0, 1),
+        ((3, 12, 10), 4, 24, 1, 0, 1),
+        ((1, 7, 7), 3, 5, 1, 0, 1),
+        ((1, 10, 2), 4, 4, 2, (1, 1), 1),
+        ((1, 20, 2), 4, 4, 2, (0, 1), 1),
+        ((1, 16, 4), 1, 12, 1, (1, 0), 1),
+        ((1, 24, 16), 1, 32, 3, (2, 2), 1),
+    )
+    dtype = tvm.testing.parameter("int8", "int16")
+    data_layout = tvm.testing.parameter("NWC")
+    kernel_layout = tvm.testing.parameter("WIO")
+    schedule_name = tvm.testing.parameter("conv1d_nwc.generic")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_conv2d_NCHWc.py 
b/tests/python/relay/strategy/arm_cpu/test_conv2d_NCHWc.py
new file mode 100644
index 0000000000..3b43d37c90
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_conv2d_NCHWc.py
@@ -0,0 +1,138 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicConv2dTests:
+    @tvm.testing.requires_corstone300
+    def test_conv2d_NCHWc(
+        self,
+        data_shape,
+        kernel_size,
+        data_layout,
+        kernel_layout,
+        num_filter,
+        strides,
+        padding,
+        dilation,
+        dtype,
+        schedule_name,
+    ):
+        """Test a subgraph with a single conv2d_NCHWc operator."""
+        ishape = data_shape
+        wshape = (num_filter, data_shape[1], *kernel_size)
+        weight_data = np.random.randint(low=-10, high=10, size=wshape, 
dtype=dtype)
+
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight0 = relay.const(weight_data)
+        out0 = relay.op.nn.contrib_conv2d_nchwc(
+            relay.layout_transform(input0, "NCHW", data_layout),
+            relay.layout_transform(weight0, "OIHW", kernel_layout),
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout=data_layout,
+            kernel_layout=kernel_layout,
+            channels=num_filter,
+            out_dtype="",
+            out_layout="",
+        )
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight1 = relay.const(weight_data)
+        out1 = relay.op.nn.contrib_conv2d_nchwc(
+            relay.layout_transform(input1, "NCHW", data_layout),
+            relay.layout_transform(weight1, "OIHW", kernel_layout),
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout=data_layout,
+            kernel_layout=kernel_layout,
+            channels=num_filter,
+            out_dtype="",
+            out_layout="",
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestConv2d_NCHWc(BasicConv2dTests):
+    """This test is for conv2d_NCHWc.x86 schedule."""
+
+    (
+        data_shape,
+        kernel_size,
+        num_filter,
+        strides,
+        padding,
+        dilation,
+        dtype,
+        kernel_layout,
+        data_layout,
+    ) = tvm.testing.parameters(
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW2i8o", "NCHW8c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", 
"OIHW2i8o", "NCHW8c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", 
"OIHW2i8o", "NCHW8c"),
+        # ResNet18 workloads
+        # this test does not fit in corstone300 DCTM section.
+        # ((1, 3, 112, 112), (7, 7), 64, (2, 2), (3, 3), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 64, 28, 28), (3, 3), 64, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 64, 28, 28), (1, 1), 64, (1, 1), (0, 0), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 64, 28, 28), (3, 3), 128, (2, 2), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 64, 28, 28), (1, 1), 128, (2, 2), (0, 0), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 128, 14, 14), (3, 3), 128, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 128, 14, 14), (3, 3), 256, (2, 2), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 128, 14, 14), (1, 1), 256, (2, 2), (0, 0), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 256, 7, 7), (3, 3), 256, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 256, 7, 7), (3, 3), 512, (2, 2), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 256, 7, 7), (1, 1), 512, (2, 2), (0, 0), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+        ((1, 512, 3, 3), (3, 3), 512, (1, 1), (1, 1), (1, 1), "int8", 
"OIHW4i4o", "NCHW4c"),
+    )
+    schedule_name = tvm.testing.parameter("conv2d_NCHWc.x86")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_dense_dsp.py 
b/tests/python/relay/strategy/arm_cpu/test_dense_dsp.py
new file mode 100644
index 0000000000..3edffba8ac
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_dense_dsp.py
@@ -0,0 +1,90 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicDenseTests:
+    @tvm.testing.requires_corstone300
+    def test_dense(self, shape, weight_shape, dtype, schedule_name):
+        """Test a subgraph with a single dense operator."""
+        ishape = shape
+        wshape = weight_shape
+        units = weight_shape[0]
+        weight_data = np.random.randint(low=-10, high=10, size=wshape, 
dtype=dtype)
+
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight0 = relay.const(weight_data)
+        out0 = relay.op.nn.dense(
+            input0,
+            weight0,
+            units=units,
+            out_dtype="int32",
+        )
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight1 = relay.const(weight_data)
+        out1 = relay.op.nn.dense(
+            input1,
+            weight1,
+            units=units,
+            out_dtype="int32",
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestDense(BasicDenseTests):
+    """This test is for dense_dsp schedule."""
+
+    shape, weight_shape = tvm.testing.parameters(
+        ((1, 128), (16, 128)),
+        ((32, 32), (32, 32)),
+        ((1, 64), (1, 64)),
+        ((11, 2), (2, 2)),
+        ((1, 32), (64, 32)),
+        ((3, 12), (10, 12)),
+    )
+    dtype = tvm.testing.parameter("int8", "int16")
+    schedule_name = tvm.testing.parameter("dense_dsp")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d_NCHWc.py 
b/tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d_NCHWc.py
new file mode 100644
index 0000000000..69e9ab09e4
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d_NCHWc.py
@@ -0,0 +1,121 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicConv2dTests:
+    @tvm.testing.requires_corstone300
+    def test_depthwise_conv2d_NCHWc(
+        self,
+        data_shape,
+        kernel_size,
+        data_layout,
+        kernel_layout,
+        groups,
+        strides,
+        padding,
+        dilation,
+        dtype,
+        schedule_name,
+    ):
+        """Test a subgraph with a single depthwise_conv2d_nchwc operator."""
+        ishape = data_shape
+        wshape = (data_shape[1], 1, *kernel_size)
+        weight_data = np.random.randint(low=-10, high=10, size=wshape, 
dtype=dtype)
+        groups = groups
+
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight0 = relay.const(weight_data)
+        out0 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
+            relay.layout_transform(input0, "NCHW", data_layout),
+            relay.layout_transform(weight0, "OIHW", kernel_layout),
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout=data_layout,
+            kernel_layout=kernel_layout,
+            groups=groups,
+            out_dtype="",
+            out_layout="",
+        )
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        weight1 = relay.const(weight_data)
+        out1 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
+            relay.layout_transform(input1, "NCHW", data_layout),
+            relay.layout_transform(weight1, "OIHW", kernel_layout),
+            kernel_size=kernel_size,
+            strides=strides,
+            padding=padding,
+            dilation=dilation,
+            data_layout=data_layout,
+            kernel_layout=kernel_layout,
+            groups=groups,
+            out_dtype="",
+            out_layout="",
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestDepthWiseConv2d_NCHWc(BasicConv2dTests):
+    """This test is for depthwise_conv2d_NCHWc schedule."""
+
+    (
+        data_shape,
+        kernel_size,
+        groups,
+        strides,
+        padding,
+        dilation,
+        kernel_layout,
+        data_layout,
+    ) = tvm.testing.parameters(
+        ((1, 16, 32, 32), (3, 3), 16, (1, 1), (1, 1, 1, 1), (1, 1), 
"OIHW1i4o", "NCHW4c"),
+        ((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1, 1, 1), (1, 1), 
"OIHW1i8o", "NCHW8c"),
+    )
+    dtype = tvm.testing.parameter("int8", "int16", "int32")
+    schedule_name = tvm.testing.parameter("depthwise_conv2d_NCHWc")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/python/relay/strategy/arm_cpu/test_max_pool.py 
b/tests/python/relay/strategy/arm_cpu/test_max_pool.py
new file mode 100644
index 0000000000..f58a041ecb
--- /dev/null
+++ b/tests/python/relay/strategy/arm_cpu/test_max_pool.py
@@ -0,0 +1,135 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from pickle import FALSE
+import sys
+import numpy as np
+import pytest
+import tvm
+import tvm.testing
+from tvm import relay
+from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
+from tvm.micro.testing.aot_test_utils import (
+    AOT_CORSTONE300_RUNNER,
+)
+
+
+class BasicPoolTests:
+    @tvm.testing.requires_corstone300
+    def test_pool(
+        self,
+        pool_type,
+        shape,
+        dtype,
+        pool_size,
+        strides,
+        padding,
+        dilation,
+        layout,
+        ceil_mode,
+        schedule_name,
+    ):
+        """Test a subgraph with a single max_pool operator."""
+        ishape = shape
+        input0 = relay.var("input", relay.TensorType(ishape, dtype))
+
+        out0 = getattr(relay.op.nn, pool_type)(
+            input0,
+            pool_size=pool_size,
+            strides=strides,
+            dilation=dilation,
+            padding=padding,
+            layout=layout,
+            out_layout="",
+            ceil_mode=ceil_mode,
+        )
+
+        ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
+
+        input1 = relay.var("input", relay.TensorType(ishape, dtype))
+        out1 = getattr(relay.op.nn, pool_type)(
+            input1,
+            pool_size=pool_size,
+            strides=strides,
+            dilation=dilation,
+            padding=padding,
+            layout=layout,
+            out_layout="",
+            ceil_mode=ceil_mode,
+        )
+        mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
+
+        inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, 
dtype=dtype)}
+        output_list = generate_ref_data(ref_mod, inputs)
+
+        compile_and_run(
+            AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
+            runner=AOT_CORSTONE300_RUNNER,
+            interface_api="c",
+            use_unpacked_api=True,
+            target_opts={
+                "-keys": "arm_cpu",
+                "-mcpu": "cortex-m7",
+            },
+            schedule_name=schedule_name,
+        )
+
+
+class TestMaxPool1d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    shape, pool_size, strides, padding, dilation, layout, ceil_mode = 
tvm.testing.parameters(
+        ((3, 32, 27), (3,), (2,), 0, 1, "NCW", True),
+        ((1, 32, 1), 3, 1, 0, 1, "NWC", False),
+        ((1, 20, 4), 3, 2, 0, 1, "NWC", False),
+    )
+    pool_type = tvm.testing.parameter("max_pool1d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+class TestMaxPool2d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    shape, pool_size, strides, padding, dilation, layout, ceil_mode = 
tvm.testing.parameters(
+        ((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False),
+        ((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True),
+        ((1, 26, 26, 12), (2, 2), (2, 2), 0, 1, "NHWC", False),
+        ((1, 11, 11, 32), (2, 2), (2, 2), 0, 1, "NHWC", False),
+        ((1, 3, 3, 64), (2, 2), (2, 2), 0, 1, "NHWC", False),
+        ((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", False),
+        ((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", False),
+        ((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", True),
+        ((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", True),
+    )
+    pool_type = tvm.testing.parameter("max_pool2d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+class TestMaxPool3d(BasicPoolTests):
+    """This test is for pool.arm_cpu schedule."""
+
+    shape, pool_size, strides, padding, dilation, layout, ceil_mode = 
tvm.testing.parameters(
+        ((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False),
+    )
+    pool_type = tvm.testing.parameter("max_pool3d")
+    dtype = tvm.testing.parameter("int32")
+    schedule_name = tvm.testing.parameter("pool.arm_cpu")
+
+
+if __name__ == "__main__":
+    sys.exit(pytest.main([__file__] + sys.argv[1:]))

Reply via email to