This is an automated email from the ASF dual-hosted git repository.
csullivan pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 46675996e5 [Hexagon] Register basic strategies and schedules for
common operators (#10919)
46675996e5 is described below
commit 46675996e5c008921562d2278c0c3ec6504a28bb
Author: Krzysztof Parzyszek <[email protected]>
AuthorDate: Thu Apr 7 18:24:06 2022 -0500
[Hexagon] Register basic strategies and schedules for common operators
(#10919)
These are just placeholders to enable building full models.
---
python/tvm/relay/op/strategy/hexagon.py | 91 +++++++++++++++++++++-
python/tvm/topi/hexagon/__init__.py | 5 ++
.../topi/hexagon/{conv2d.py => batch_matmul.py} | 20 ++++-
python/tvm/topi/hexagon/conv2d.py | 32 +++++++-
python/tvm/topi/hexagon/{conv2d.py => dense.py} | 20 ++++-
.../tvm/topi/hexagon/{conv2d.py => injective.py} | 24 +++++-
python/tvm/topi/hexagon/{conv2d.py => pooling.py} | 27 ++++++-
python/tvm/topi/hexagon/{conv2d.py => reduce.py} | 20 ++++-
8 files changed, 219 insertions(+), 20 deletions(-)
diff --git a/python/tvm/relay/op/strategy/hexagon.py
b/python/tvm/relay/op/strategy/hexagon.py
index cb1fec3559..fd5ee97e88 100644
--- a/python/tvm/relay/op/strategy/hexagon.py
+++ b/python/tvm/relay/op/strategy/hexagon.py
@@ -14,7 +14,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-
"""Definition of Hexagon operator strategy."""
# pylint:
disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
@@ -24,6 +23,21 @@ from .generic import *
from .. import op as _op
+# --- Op strategy registration
+
+
+@batch_matmul_strategy.register("hexagon")
+def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
+ """batch_matmul strategy for Hexagon"""
+ strategy = _op.OpStrategy()
+ strategy.add_implementation(
+ wrap_compute_batch_matmul(topi.nn.batch_matmul),
+ wrap_topi_schedule(topi.hexagon.schedule_batch_matmul),
+ name="batch_matmul.hexagon",
+ )
+ return strategy
+
+
@conv2d_strategy.register("hexagon")
def conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""Conv2d strategy for Hexagon"""
@@ -35,10 +49,81 @@ def conv2d_strategy_hexagon(attrs, inputs, out_type,
target):
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nhwc),
- name="conv2d.hexagon",
+ name="conv2d_nhwc.hexagon",
+ )
+ return strategy
+
+ if data_layout == "NCHW" and kernel_layout == "OIHW":
+ strategy.add_implementation(
+ wrap_compute_conv2d(topi.nn.conv2d_nchw),
+ wrap_topi_schedule(topi.hexagon.schedule_conv2d_nchw),
+ name="conv2d_nchw.hexagon",
)
return strategy
raise RuntimeError(
- "Unsupported layouts: data_layout:{},
kernel_layout:{}".format(data_layout, kernel_layout)
+ f"Unsupported layouts: data_layout:{data_layout},
kernel_layout:{kernel_layout}, "
+ f"groups:{attrs.groups}"
+ )
+
+
+@dense_strategy.register("hexagon")
+def dense_strategy_hexagon(attrs, inputs, out_type, target):
+ """Dense strategy for Hexagon"""
+ strategy = _op.OpStrategy()
+ strategy.add_implementation(
+ wrap_compute_dense(topi.nn.dense),
+ wrap_topi_schedule(topi.hexagon.schedule_dense),
+ name="dense.hexagon",
)
+ return strategy
+
+
+@softmax_strategy.register("hexagon")
+def softmax_strategy_hexagon(attrs, inputs, out_type, target):
+ """Softmax strategy for Hexagon"""
+ strategy = _op.OpStrategy()
+ strategy.add_implementation(
+ wrap_compute_softmax(topi.nn.softmax),
+ wrap_topi_schedule(topi.hexagon.schedule_softmax),
+ name="softmax.hexagon",
+ )
+ return strategy
+
+
+# --- Op schedule registration
+
+
+@schedule_adaptive_pool.register("hexagon")
+def schedule_adaptive_pool_hexagon(attrs, outs, target):
+ """Schedule adaptive pool ops for Hexagon"""
+ with target:
+ return topi.hexagon.schedule_adaptive_pool(outs)
+
+
+@schedule_concatenate.register("hexagon")
+def schedule_concatenate_hexagon(attrs, outs, target):
+ """Schedule concatenate ops for Hexagon"""
+ with target:
+ return topi.hexagon.schedule_injective(outs)
+
+
+@schedule_injective.register("hexagon")
+def schedule_injective_hexagon(attrs, outs, target):
+ """Schedule injective ops for Hexagon"""
+ with target:
+ return topi.hexagon.schedule_injective(outs)
+
+
+@schedule_pool.register("hexagon")
+def schedule_pool_hexagon(attrs, outs, target):
+ """Schedule pool ops for Hexagon"""
+ with target:
+ return topi.hexagon.schedule_pool(outs)
+
+
+@schedule_reduce.register("hexagon")
+def schedule_reduce_hexagon(attrs, outs, target):
+ """Schedule reduction ops for Hexagon"""
+ with target:
+ return topi.hexagon.schedule_reduce(outs)
diff --git a/python/tvm/topi/hexagon/__init__.py
b/python/tvm/topi/hexagon/__init__.py
index 3263819ccf..6718b21130 100644
--- a/python/tvm/topi/hexagon/__init__.py
+++ b/python/tvm/topi/hexagon/__init__.py
@@ -19,4 +19,9 @@
# pylint: disable=wildcard-import
+from .batch_matmul import *
from .conv2d import *
+from .dense import *
+from .injective import *
+from .pooling import *
+from .reduce import *
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/batch_matmul.py
similarity index 63%
copy from python/tvm/topi/hexagon/conv2d.py
copy to python/tvm/topi/hexagon/batch_matmul.py
index 8a484ae77e..bf2ca3c9c7 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/batch_matmul.py
@@ -15,12 +15,26 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for composition of batch_matmul operator"""
import tvm
-def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+def schedule_batch_matmul(outs):
+ """Schedule for batch_matmul op.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of batch_matmul in the format
+ of an array of tensors.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/conv2d.py
index 8a484ae77e..6df15f8b8c 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/conv2d.py
@@ -15,12 +15,40 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for conv2d"""
import tvm
def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+ """Schedule for conv2d NHWC operator.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of conv2d in the format
+ of an array of tensors.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s
+
+
+def schedule_conv2d_nchw(outs):
+ return schedule_conv2d_nhwc(outs)
+
+
+def schedule_conv2d(outs, layout="NHWC"):
+ layout_uncase = layout.casefold()
+ if layout_uncase == "NHWC".casefold():
+ return schedule_conv2d_nhwc(outs)
+ if layout_uncase == "NCHW".casefold():
+ return schedule_conv2d_nchw(outs)
+
+ raise ValueError(f"Unexpected layout={layout}")
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/dense.py
similarity index 65%
copy from python/tvm/topi/hexagon/conv2d.py
copy to python/tvm/topi/hexagon/dense.py
index 8a484ae77e..afe53f515f 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/dense.py
@@ -15,12 +15,26 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for dense operator"""
import tvm
-def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+def schedule_dense(outs):
+ """Schedule for dense op.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of dense in the format
+ of an array of tensors.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/injective.py
similarity index 61%
copy from python/tvm/topi/hexagon/conv2d.py
copy to python/tvm/topi/hexagon/injective.py
index 8a484ae77e..88e0f40640 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/injective.py
@@ -15,12 +15,30 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for injective operators"""
import tvm
-def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+def schedule_injective(outs):
+ """Schedule for injective op.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of injective in the format
+ of an array of tensors.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s
+
+
+def schedule_softmax(outs):
+ return schedule_injective(outs)
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/pooling.py
similarity index 58%
copy from python/tvm/topi/hexagon/conv2d.py
copy to python/tvm/topi/hexagon/pooling.py
index 8a484ae77e..eb8adac35f 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/pooling.py
@@ -15,12 +15,33 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for pooling operators"""
import tvm
-def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+def schedule_pool(outs, layout="NHWC"): # pylint: disable=unused-argument
+ """Schedule for pooling op.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of injective in the format
+ of an array of tensors.
+
+ layout: str
+ The tensor layout.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s
+
+
+def schedule_adaptive_pool(outs):
+ return schedule_pool(outs)
diff --git a/python/tvm/topi/hexagon/conv2d.py
b/python/tvm/topi/hexagon/reduce.py
similarity index 64%
copy from python/tvm/topi/hexagon/conv2d.py
copy to python/tvm/topi/hexagon/reduce.py
index 8a484ae77e..ea10cd492a 100644
--- a/python/tvm/topi/hexagon/conv2d.py
+++ b/python/tvm/topi/hexagon/reduce.py
@@ -15,12 +15,26 @@
# specific language governing permissions and limitations
# under the License.
-""" Schedules for conv2d. """
+"""Schedule for composition of reduction operator"""
import tvm
-def schedule_conv2d_nhwc(outs):
- """Schedule for Conv2d NHWC operator."""
+def schedule_reduce(outs):
+ """Schedule for reduction op.
+
+ Parameters
+ ----------
+ outs: Array of Tensor
+ The computation graph description of reduction in the format
+ of an array of tensors.
+
+ Returns
+ -------
+ sch: Schedule
+ The computation schedule for the op.
+ """
+ outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
+ tvm.te.schedule.AutoInlineInjective(s)
return s