mehrdadh commented on a change in pull request #9233:
URL: https://github.com/apache/tvm/pull/9233#discussion_r726247929



##########
File path: python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/relu.py
##########
@@ -0,0 +1,78 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name, no-value-for-parameter
+"""Defines relu intrinsics for SIMD relu operation."""
+
+
+def relu_MxN_impl(M, N, uniq_id):
+    """Emit C code for relu impl."""
+    cc_code = f"""
+#ifndef __STATIC_FORCEINLINE
+    #define __STATIC_FORCEINLINE  static inline
+#endif
+
+#ifdef __cplusplus
+extern "C"
+#endif
+__STATIC_FORCEINLINE int32_t relu_rest(
+    int N,
+    int8_t *mat) {{
+  for (int j = 0; j < N; j++)
+    mat[j] = mat[j] > 0 ? mat[j] : 0;
+  return 0;
+}}
+
+#ifdef __cplusplus
+extern "C"
+#endif
+__STATIC_FORCEINLINE int32_t relu_{M}x{N}_loop_{uniq_id}(
+    int8_t *mat) {{
+  for (int i = 0; i < {M}; i++)
+    for (int j = 0; j < {N}; j++)
+                       mat[i * {N} + j] > 0 ? mat[i * {N} + j] : 0;
+  return 0;
+}}
+
+#ifdef __cplusplus
+extern "C"
+#endif
+__STATIC_FORCEINLINE int32_t relu_{M}x{N}_{uniq_id}(
+    int8_t *mat) {{
+
+       int32_t *pmat32 = (int32_t *)mat;
+
+#ifdef GROVETY_OP_BENCHMARK

Review comment:
       What's the use of this macro?

##########
File path: python/tvm/relay/op/strategy/arm_cpu.py
##########
@@ -415,3 +435,67 @@ def schedule_bitserial_dense_arm_cpu(attrs, inputs, 
out_type, target):
         name="bitserial_dense.arm_cpu",
     )
     return strategy
+
+
+@dense_strategy.register(["arm_cpu", "micro_dev"])
+def schedule_dense_arm_cpu(attrs, inputs, out_type, target):
+    """dense arm cpu strategy"""
+    strategy = _op.OpStrategy()
+    isa = arm_isa.IsaAnalyzer(target)
+    if "SMLAD" in isa:
+        strategy.add_implementation(
+            wrap_compute_dense(topi.nn.dense),
+            wrap_topi_schedule(topi.arm_cpu.schedule_dense_direct_simd),
+            name="dense_direct_simd.micro_dev",
+        )
+    else:
+        strategy.add_implementation(
+            wrap_compute_dense(topi.nn.dense),
+            wrap_topi_schedule(topi.generic.schedule_dense),
+            name="dense.generic",
+        )
+    return strategy
+
+
+@conv1d_strategy.register("arm_cpu")
+def conv1d_strategy_arm_cpu(attrs, inputs, out_type, target):
+    """conv1d strategy"""
+    strategy = _op.OpStrategy()
+    layout = attrs.data_layout
+    kernel_layout = attrs.kernel_layout
+    dilation = get_const_tuple(attrs.dilation)
+    if dilation[0] < 1:
+        raise ValueError("dilation should be a positive value")
+
+    isa = arm_isa.IsaAnalyzer(target)
+
+    if kernel_layout == "WOI":
+        if layout == "NWC" and "SMLAD" in isa:
+            strategy.add_implementation(
+                wrap_compute_conv1d(topi.arm_cpu.conv1d_direct_simd),
+                wrap_topi_schedule(topi.arm_cpu.schedule_conv1d_direct_simd),

Review comment:
       maybe rename `schedule_conv1d_direct_simd` to 
`schedule_conv1d_nwc_direct_simd` to be more readable and consistent with 
others.

##########
File path: python/tvm/topi/arm_cpu/conv2d.py
##########
@@ -508,12 +509,25 @@ def _callback(op):
 @autotvm.register_topi_compute("conv2d_direct_simd.arm_cpu")
 def conv2d_direct_simd(cfg, data, kernel, strides, padding, dilation, 
out_dtype):
     """Compute conv2d with SIMD (v7e-m)."""
-    return direct_simd.conv2d_direct_simd_compute(
+    return direct_simd_conv2d.conv2d_direct_simd_compute(
         cfg, data, kernel, strides, padding, dilation, out_dtype
     )
 
 
 @autotvm.register_topi_schedule("conv2d_direct_simd.arm_cpu")
 def schedule_conv2d_direct_simd(cfg, outs):
     """Create schedule for conv2d_direct_simd"""
-    return direct_simd.conv2d_direct_simd_nhwc_schedule(cfg, outs)
+    return direct_simd_conv2d.conv2d_direct_simd_nhwc_schedule(cfg, outs)
+
+
[email protected]_topi_compute("conv1d_direct_simd.arm_cpu")
+def conv1d_direct_simd(cfg, data, kernel, strides, padding, dilation, 
out_dtype):

Review comment:
       please move `conv1d` related functions to conv1d.py file in the same 
directory. 

##########
File path: tests/python/integration/test_m7_simd.py
##########
@@ -0,0 +1,331 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import sys
+import numpy as np
+import pytest
+import tvm
+from tvm import relay
+from tests.python.relay.aot.aot_test_utils import (
+    AOTTestModel,
+    AOT_CORSTONE300_RUNNER,
+    generate_ref_data,
+    compile_and_run,
+)
+
+
[email protected]_corstone300
[email protected](
+    "data_shape_nhwc, kernel_size, num_filter, strides, padding",
+    [
+        ((1, 32, 32, 1), (3, 3), 12, 1, 0),
+        ((1, 32, 10, 3), (3, 3), 16, 1, 0),
+        ((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1)),
+        # TOFIX: https://github.com/apache/tvm/issues/9226
+        # ((1, 49, 10, 1), (10, 4), 64, (2, 2), (4, 1, 5, 1)),

Review comment:
       do you plan to have a follow on PR for this issue?

##########
File path: python/tvm/topi/arm_cpu/conv2d.py
##########
@@ -508,12 +509,25 @@ def _callback(op):
 @autotvm.register_topi_compute("conv2d_direct_simd.arm_cpu")
 def conv2d_direct_simd(cfg, data, kernel, strides, padding, dilation, 
out_dtype):
     """Compute conv2d with SIMD (v7e-m)."""
-    return direct_simd.conv2d_direct_simd_compute(
+    return direct_simd_conv2d.conv2d_direct_simd_compute(
         cfg, data, kernel, strides, padding, dilation, out_dtype
     )
 
 
 @autotvm.register_topi_schedule("conv2d_direct_simd.arm_cpu")
 def schedule_conv2d_direct_simd(cfg, outs):
     """Create schedule for conv2d_direct_simd"""
-    return direct_simd.conv2d_direct_simd_nhwc_schedule(cfg, outs)
+    return direct_simd_conv2d.conv2d_direct_simd_nhwc_schedule(cfg, outs)
+
+
[email protected]_topi_compute("conv1d_direct_simd.arm_cpu")
+def conv1d_direct_simd(cfg, data, kernel, strides, padding, dilation, 
out_dtype):
+    """Compute conv1d with SIMD (v7e-m)."""
+    return direct_simd_conv1d.conv1d_direct_simd_compute(
+        cfg, data, kernel, strides, padding, dilation, out_dtype
+    )
+
+
[email protected]_topi_schedule("conv1d_direct_simd.arm_cpu")
+def schedule_conv1d_direct_simd(cfg, outs):

Review comment:
       same comment here. since we only support `nwc` format, I think we should 
be more explicit in the name.

##########
File path: python/tvm/topi/arm_cpu/cortex_m7/conv1d/direct_simd.py
##########
@@ -0,0 +1,175 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name, no-value-for-parameter
+"""Direct implementation of conv1d."""
+from tvm import autotvm
+from tvm.autotvm.task import deserialize_args
+from tvm import te
+from tvm.topi.utils import simplify, traverse_inline
+from tvm.topi.nn.pad import pad
+from tvm.topi.nn.utils import get_pad_tuple1d
+
+from ..micro_kernel.gemm import (
+    intrin_gemm_MxKxN,
+    gemm_MxKxN_impl,
+)
+
+
+def conv1d_direct_simd(*args, **kwargs):

Review comment:
       same here.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to