NicolaLancellotti commented on a change in pull request #9384:
URL: https://github.com/apache/tvm/pull/9384#discussion_r738596287
##########
File path: python/tvm/relay/op/contrib/ethosu.py
##########
@@ -331,6 +332,133 @@ def qnn_depthwise_conv2d_pattern() ->
tvm.relay.dataflow_pattern.DFPattern:
return clip_or_req
+class MaxPool2DParams:
+ """
+ This class will parse a call to a ethosu.maxpool2d composite function
+ and extract the parameter information.
+ """
+
+ composite_name = "ethosu.maxpool2d"
+ # The hardware only supports padding upto the numbers as follows
+ padding_bounds = [127, 127, 128, 128]
+
+ def __init__(self, func_body: Call):
+ clip = None
+ if str(func_body.op) == "clip":
+ clip = func_body
+ pool_op = clip.args[0]
+ else:
+ pool_op = func_body
+
+ attrs = pool_op.attrs
+ self.ifm = TensorParams(pool_op.args[MaxPoolArgs.ifm.value],
attrs.layout)
+ self.ofm = TensorParams(pool_op, attrs.layout)
+ self.pool_shape = [int(i) for i in attrs.pool_size]
+ self.strides = attrs.strides
+ self.padding = attrs.padding
+ self.activation = clip
+ self.pooling_type = "MAX"
+
+ def is_valid(self):
+ """
+ This function checks whether MaxPool2D has compatible attributes with
the NPU
+ """
+ tensor_params = [self.ifm, self.ofm]
+ if not check_valid_dtypes(tensor_params):
+ return False
+ if self.ifm.dtype != self.ofm.dtype:
+ return False
+ if not check_strides(self.strides):
+ return False
+ if not check_batch_size(self.ifm):
+ return False
+ if not check_padding(self.padding, self.padding_bounds):
+ return False
+ # Check pool size
+ if (
+ len(self.pool_shape) != 2
+ or self.pool_shape[1] > 256
+ or self.pool_shape[0] * self.pool_shape[1] > 256 * 256
+ ):
+ return False
+ return True
Review comment:
I'd rather the current approach, that is, a list of `if` guards that
checks if it is not valid, and at the end, it returns True otherwise. What do
other people think?
##########
File path: python/tvm/relay/op/contrib/ethosu.py
##########
@@ -331,6 +332,133 @@ def qnn_depthwise_conv2d_pattern() ->
tvm.relay.dataflow_pattern.DFPattern:
return clip_or_req
+class MaxPool2DParams:
+ """
+ This class will parse a call to a ethosu.maxpool2d composite function
+ and extract the parameter information.
+ """
+
+ composite_name = "ethosu.maxpool2d"
+ # The hardware only supports padding upto the numbers as follows
+ padding_bounds = [127, 127, 128, 128]
+
+ def __init__(self, func_body: Call):
+ clip = None
+ if str(func_body.op) == "clip":
+ clip = func_body
+ pool_op = clip.args[0]
+ else:
+ pool_op = func_body
+
+ attrs = pool_op.attrs
+ self.ifm = TensorParams(pool_op.args[MaxPoolArgs.ifm.value],
attrs.layout)
+ self.ofm = TensorParams(pool_op, attrs.layout)
+ self.pool_shape = [int(i) for i in attrs.pool_size]
+ self.strides = attrs.strides
+ self.padding = attrs.padding
+ self.activation = clip
+ self.pooling_type = "MAX"
+
+ def is_valid(self):
+ """
+ This function checks whether MaxPool2D has compatible attributes with
the NPU
+ """
+ tensor_params = [self.ifm, self.ofm]
+ if not check_valid_dtypes(tensor_params):
+ return False
+ if self.ifm.dtype != self.ofm.dtype:
+ return False
+ if not check_strides(self.strides):
+ return False
+ if not check_batch_size(self.ifm):
+ return False
+ if not check_padding(self.padding, self.padding_bounds):
+ return False
+ # Check pool size
+ if (
+ len(self.pool_shape) != 2
+ or self.pool_shape[1] > 256
+ or self.pool_shape[0] * self.pool_shape[1] > 256 * 256
+ ):
+ return False
+ return True
+
+
+def qnn_maxpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
+ """
+ This function creates the pattern for nn.max_pool2d with optional fused
RELU activation.
+ """
+ pattern = is_op("nn.max_pool2d")(wildcard())
+ pattern = pattern.optional(is_op("clip"))
+ return pattern
+
+
+class AvgPool2DParams:
+ """
+ This class will parse a call to a ethosu.avgpool2d composite function
+ and extract the parameter information.
+ """
+
+ composite_name = "ethosu.avgpool2d"
+ # The hardware only supports padding upto the numbers as follows
+ padding_bounds = [127, 127, 128, 128]
+
+ def __init__(self, func_body: Call):
+ clip = None
+ if str(func_body.op) == "clip":
+ clip = func_body
+ pool_op = clip.args[0]
+ else:
+ pool_op = func_body
+
+ cast2 = pool_op
+ avgpool = cast2.args[0]
+ cast1 = avgpool.args[0]
+ layout = avgpool.attrs.layout
+ self.ifm = TensorParams(cast1.args[0], layout)
+ self.ofm = TensorParams(cast2, layout)
+ self.pool_shape = [int(i) for i in avgpool.attrs.pool_size]
+ self.strides = avgpool.attrs.strides
+ self.padding = avgpool.attrs.padding
+ self.activation = clip
+ self.pooling_type = "AVG"
+
+ def is_valid(self):
+ """
+ This function checks whether AvgPool2D has compatible attributes with
the NPU
+ """
+ tensor_params = [self.ifm, self.ofm]
+ if not check_valid_dtypes(tensor_params):
+ return False
+ if self.ifm.dtype != self.ofm.dtype:
+ return False
+ if not check_strides(self.strides):
+ return False
+ if not check_batch_size(self.ifm):
+ return False
+ if not check_padding(self.padding, self.padding_bounds):
+ return False
+ # Check pool size
+ if (
+ len(self.pool_shape) != 2
+ or self.pool_shape[1] > 256
+ or self.pool_shape[0] * self.pool_shape[1] > 256 * 256
+ ):
+ return False
+ return True
Review comment:
The same as above.
##########
File path: src/relay/op/contrib/ethosu/pooling.cc
##########
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file src/relay/op/contrib/ethosu/pooling.cc
+ * \brief Pooling operators definitions for the Arm(R) Ethos(TM)-U NPU
convolution ops.
+ */
+#include <tvm/relay/op.h>
+
+#include "common.h"
+
+namespace tvm {
+namespace relay {
+namespace op {
+namespace contrib {
+namespace ethosu {
+
+/*! \brief Attributes used by the Ethos(TM)-U NPU pooling operator */
+struct EthosuPoolingAttrs : public tvm::AttrsNode<EthosuPoolingAttrs> {
+ String pooling_type;
+ double ifm_scale;
+ int ifm_zero_point;
+ double ofm_scale;
+ int ofm_zero_point;
+ Array<IndexExpr> pool_shape;
+ IndexExpr ofm_channels;
+ Array<IndexExpr> strides;
+ Array<IndexExpr> padding;
+ String activation;
+ int clip_min;
+ int clip_max;
+ String upscale;
+ String ifm_layout;
+ String ofm_layout;
+
+ TVM_DECLARE_ATTRS(EthosuPoolingAttrs, "relay.attrs.EthosuPoolingAttrs") {
+ TVM_ATTR_FIELD(pooling_type)
+ .describe("The type of the pooling. 'AVG' - average pool, 'MAX' - max
pool.");
+ TVM_ATTR_FIELD(ifm_scale).describe("The quantization scale for the Input
Feature Map tensor.");
+ TVM_ATTR_FIELD(ifm_zero_point)
+ .describe("The quantization zero point for the Input Feature Map
tensor.");
+ TVM_ATTR_FIELD(ofm_scale).describe("The quantization scale for the Output
Feature Map tensor.");
+ TVM_ATTR_FIELD(ofm_zero_point)
+ .describe("The quantization zero point for the Output Feature Map
tensor.");
+ TVM_ATTR_FIELD(pool_shape)
+ .describe("The 2 dimensional pool shape as (pool_shape_height,
pool_shape_width).")
+ .set_default(NullValue<Array<IndexExpr> >());
+ TVM_ATTR_FIELD(ofm_channels)
+ .describe(" The number of OFM channels.")
+ .set_default(NullValue<IndexExpr>());
+ TVM_ATTR_FIELD(strides)
+ .set_default(Array<IndexExpr>({1, 1}))
+ .describe("The 2 dimensional strides as (stride_height,
stride_width).");
+ TVM_ATTR_FIELD(padding)
+ .describe("The 4 dimensional padding as (pad_top, pad_left,
pad_bottom, pad_right).")
+ .set_default(Array<IndexExpr>({0, 0, 0, 0}));
+ TVM_ATTR_FIELD(activation)
+ .describe(
+ "The activation function to use. "
+ "'NONE' - no activation function. "
+ "'CLIP' - clip the output between clip_min and clip_max. "
+ "'TANH' - tanh activation function. "
+ "'SIGMOID' - sigmoid activation function. "
+ "'LUT' - use a look-up table to perform the activation function.")
+ .set_default("NONE");
+ TVM_ATTR_FIELD(clip_min)
+ .describe("The minimum clipping value if activation = 'CLIP'.")
+ .set_default(0);
+ TVM_ATTR_FIELD(clip_max)
+ .describe("The maximum clipping value if activation = 'CLIP'.")
+ .set_default(0);
+ TVM_ATTR_FIELD(upscale)
+ .describe(
+ "The 2x2 upscaling mode to apply to the Input Feature Map tensor. "
+ "'NONE' - no upscaling. "
+ "'NEAREST' - upscale using nearest neighbour. "
+ "'ZEROS' - upscale using zeros.")
+ .set_default("NONE");
+ TVM_ATTR_FIELD(ifm_layout)
+ .describe("The layout of the Input Feature Map tensor. Can be 'NHWC'
or 'NHCWB16'.")
+ .set_default("NHWC");
+ TVM_ATTR_FIELD(ofm_layout)
+ .describe("The layout of the Output Feature Map tensor. Can be 'NHWC'
or 'NHCWB16'.")
+ .set_default("NHWC");
+ }
+};
+
+TVM_REGISTER_NODE_TYPE(EthosuPoolingAttrs);
+
+bool EthosuPoolingRel(const Array<Type>& types, int num_inputs, const Attrs&
attrs,
+ const TypeReporter& reporter) {
+ int ifm_index = 0;
+ int result_index = 2;
+ ICHECK_EQ(types.size(), result_index + 1);
+
+ const auto* ifm = types[ifm_index].as<TensorTypeNode>();
+ if (ifm == nullptr) return false;
+
+ const auto* param = attrs.as<EthosuPoolingAttrs>();
+ ICHECK(param != nullptr) << "EthosuPoolingAttrs cannot be nullptr.";
+
+ bool is_avg_pooling = param->pooling_type == "AVG";
+ ICHECK(is_avg_pooling || param->pooling_type == "MAX")
+ << "Expected pooling_type 'AVG' or 'MAX' but was" << param->pooling_type;
+
+ ICHECK(ifm->dtype == DataType::UInt(8) || ifm->dtype == DataType::Int(8))
Review comment:
The checks should always be true, because we check the values during the
legalization.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]