anijain2305 commented on a change in pull request #7613:
URL: https://github.com/apache/tvm/pull/7613#discussion_r590606361
##########
File path: python/tvm/relay/qnn/op/qnn.py
##########
@@ -143,6 +178,39 @@ def dequantize(data, input_scale, input_zero_point,
axis=-1):
return _make.dequantize(data, input_scale, input_zero_point, axis)
+def simulated_dequantize(data, input_scale, input_zero_point, axis=-1,
in_dtype="int8"):
+ r"""Simulated Quantize op
+ Mimics the quantize op but has more flexibility in valid inputs and always
Review comment:
s/quantize/dequantize/ --> couple of more places in the doc string
##########
File path: python/tvm/topi/nn/qnn.py
##########
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Quantized Neural Network (QNN) Operators"""
+import tvm
+from tvm import te, tir, topi
+
+SQNN_FP32 = 0
+SQNN_INT8 = 1
+SQNN_UINT8 = 2
+SQNN_INT32 = 3
+
+SQNN_DTYPE_TO_CODE = {
+ "float32": SQNN_FP32,
+ "int8": SQNN_INT8,
+ "uint8": SQNN_UINT8,
+ "int32": SQNN_INT32,
+}
+
+SQNN_CODE_TO_DTYPE = {v: k for k, v in SQNN_DTYPE_TO_CODE.items()}
+
+
[email protected]_scope(tag=topi.tag.ELEMWISE)
+def simulated_quantize(data, out_dtype, output_scale=None,
output_zero_point=None, axis=-1):
+ """Simulated QNN quantize operator that mimics QNN outputs in floating
point. The benefit
+ of this operator over true QNN quantize is that this operator allows
dynamic datatype
+ selection and can operate on both per-channel and scalar scales and zero
points while
+ QNN quantize requires both of these to be fixed at compile time.
+
+ Parameters
+ ----------
+ data: tvm.te.Tensor
+ An N-D input tensor to the operator.
+
+ out_dtype: tvm.te.Tensor
+ A scalar variable that indicates which datatype to simulate
quantization with. Use
+ SQNN_DTYPE_TO_CODE to convert a dtype string into the corresponding
variable
+ value.
+
+ output_scale: tvm.te.Tensor, optional
+ A scalar tensor representing the scale to use when quantizing to
integer datatypes.
+ When it contains more than a single value, N must match the number of
channels in data.
+
+ output_zero_point: tvm.te.Tensor, optional
+ A 1-D tensor representing the zero point to use when quantizing to
integer datatypes.
+ When it contains more than a single value, N must match the number of
channels in data.
+
+ axis: int, optional
+ The channel axis for quantization. Default value is -1 which
corresponds to the last axis.
+
+ """
+ # Since all simulated outputs are in float32, we can just return the input
tensor for fp32.
+ def _compute_fp32(value, *indices):
+ return value[indices]
+
+ # Simulate quantization for arbitrary integer datatypes. The computation
for all datatypes is:
+ # Q_output = clip((round(input_tensor/output_scale) + output_zero_point),
+ # out_dtype::min,
+ # out_dtype::max)
+ def _compute_intn(dtype, value, *indices):
+ assert output_scale is not None and output_zero_point is not None
+ const_min = tvm.tir.min_value(dtype)
+ const_max = tvm.tir.max_value(dtype)
+ # Use indexmod to handle both scalar and per-channel QNN parameters.
+ scale_idx = tir.indexmod(indices[axis], topi.shape(output_scale)[0])
+ zp_idx = tir.indexmod(indices[axis], topi.shape(output_zero_point)[0])
+ return te.max(
+ te.min(
+ te.round(value[indices] / output_scale[scale_idx]) +
output_zero_point[zp_idx],
+ const_max,
+ ),
+ const_min,
+ )
+
+ # Use an if chain to dynamically return the proper quantization based on
the input datatype.
+ # This allows the op to compile once but apply different quantization
approaches
+ # using a variable datatype input.
+ def _dispatch_sim_quantize(value):
Review comment:
+1, clever trick.
##########
File path: python/tvm/relay/qnn/op/qnn.py
##########
@@ -143,6 +178,39 @@ def dequantize(data, input_scale, input_zero_point,
axis=-1):
return _make.dequantize(data, input_scale, input_zero_point, axis)
+def simulated_dequantize(data, input_scale, input_zero_point, axis=-1,
in_dtype="int8"):
+ r"""Simulated Quantize op
Review comment:
s/Quantize/Dequantize/
##########
File path: python/tvm/topi/nn/qnn.py
##########
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Quantized Neural Network (QNN) Operators"""
+import tvm
+from tvm import te, tir, topi
+
+SQNN_FP32 = 0
+SQNN_INT8 = 1
+SQNN_UINT8 = 2
+SQNN_INT32 = 3
+
+SQNN_DTYPE_TO_CODE = {
+ "float32": SQNN_FP32,
+ "int8": SQNN_INT8,
+ "uint8": SQNN_UINT8,
+ "int32": SQNN_INT32,
+}
+
+SQNN_CODE_TO_DTYPE = {v: k for k, v in SQNN_DTYPE_TO_CODE.items()}
+
+
[email protected]_scope(tag=topi.tag.ELEMWISE)
+def simulated_quantize(data, out_dtype, output_scale=None,
output_zero_point=None, axis=-1):
+ """Simulated QNN quantize operator that mimics QNN outputs in floating
point. The benefit
+ of this operator over true QNN quantize is that this operator allows
dynamic datatype
+ selection and can operate on both per-channel and scalar scales and zero
points while
+ QNN quantize requires both of these to be fixed at compile time.
+
+ Parameters
+ ----------
+ data: tvm.te.Tensor
+ An N-D input tensor to the operator.
+
+ out_dtype: tvm.te.Tensor
+ A scalar variable that indicates which datatype to simulate
quantization with. Use
+ SQNN_DTYPE_TO_CODE to convert a dtype string into the corresponding
variable
+ value.
+
+ output_scale: tvm.te.Tensor, optional
+ A scalar tensor representing the scale to use when quantizing to
integer datatypes.
+ When it contains more than a single value, N must match the number of
channels in data.
+
+ output_zero_point: tvm.te.Tensor, optional
Review comment:
Typically, the zero points are scalar. Even for asymmetric, they are
scalar. This is done mostly for performance reasons. But, since these ops are
generic, it's better to keep it the way that you have.
##########
File path: src/relay/qnn/op/simulated_quantize.cc
##########
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file src/relay/qnn/op/simulated_quantize.cc
+ * \brief QNN simulated quantize operator. Mimics the behavior
+ * of QNN quantize in floating point with added flexibility.
+ */
+
+#include <tvm/relay/analysis.h>
+#include <tvm/relay/op_attr_types.h>
+#include <tvm/relay/qnn/attrs.h>
+
+#include "../../transforms/pattern_utils.h"
+#include "../utils.h"
+
+namespace tvm {
+namespace relay {
+namespace qnn {
+
+TVM_REGISTER_NODE_TYPE(SimulatedQuantizeAttrs);
+
+bool SimulatedQuantizeRel(const Array<Type>& types, int num_inputs, const
Attrs& attrs,
Review comment:
Minor suggestion (feel free to ignore) - How about sharing the TypeRel
functions for Quantize and Dequantize? they seem to be same.
##########
File path: python/tvm/topi/nn/qnn.py
##########
@@ -0,0 +1,186 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Quantized Neural Network (QNN) Operators"""
+import tvm
+from tvm import te, tir, topi
+
+SQNN_FP32 = 0
+SQNN_INT8 = 1
+SQNN_UINT8 = 2
+SQNN_INT32 = 3
+
+SQNN_DTYPE_TO_CODE = {
+ "float32": SQNN_FP32,
+ "int8": SQNN_INT8,
+ "uint8": SQNN_UINT8,
+ "int32": SQNN_INT32,
+}
+
+SQNN_CODE_TO_DTYPE = {v: k for k, v in SQNN_DTYPE_TO_CODE.items()}
+
+
[email protected]_scope(tag=topi.tag.ELEMWISE)
+def simulated_quantize(data, out_dtype, output_scale=None,
output_zero_point=None, axis=-1):
+ """Simulated QNN quantize operator that mimics QNN outputs in floating
point. The benefit
+ of this operator over true QNN quantize is that this operator allows
dynamic datatype
+ selection and can operate on both per-channel and scalar scales and zero
points while
+ QNN quantize requires both of these to be fixed at compile time.
+
+ Parameters
+ ----------
+ data: tvm.te.Tensor
+ An N-D input tensor to the operator.
+
+ out_dtype: tvm.te.Tensor
+ A scalar variable that indicates which datatype to simulate
quantization with. Use
+ SQNN_DTYPE_TO_CODE to convert a dtype string into the corresponding
variable
+ value.
+
+ output_scale: tvm.te.Tensor, optional
+ A scalar tensor representing the scale to use when quantizing to
integer datatypes.
+ When it contains more than a single value, N must match the number of
channels in data.
+
+ output_zero_point: tvm.te.Tensor, optional
+ A 1-D tensor representing the zero point to use when quantizing to
integer datatypes.
+ When it contains more than a single value, N must match the number of
channels in data.
+
+ axis: int, optional
+ The channel axis for quantization. Default value is -1 which
corresponds to the last axis.
+
+ """
+ # Since all simulated outputs are in float32, we can just return the input
tensor for fp32.
+ def _compute_fp32(value, *indices):
+ return value[indices]
+
+ # Simulate quantization for arbitrary integer datatypes. The computation
for all datatypes is:
+ # Q_output = clip((round(input_tensor/output_scale) + output_zero_point),
+ # out_dtype::min,
+ # out_dtype::max)
+ def _compute_intn(dtype, value, *indices):
+ assert output_scale is not None and output_zero_point is not None
+ const_min = tvm.tir.min_value(dtype)
+ const_max = tvm.tir.max_value(dtype)
+ # Use indexmod to handle both scalar and per-channel QNN parameters.
+ scale_idx = tir.indexmod(indices[axis], topi.shape(output_scale)[0])
+ zp_idx = tir.indexmod(indices[axis], topi.shape(output_zero_point)[0])
+ return te.max(
+ te.min(
+ te.round(value[indices] / output_scale[scale_idx]) +
output_zero_point[zp_idx],
+ const_max,
+ ),
+ const_min,
+ )
+
+ # Use an if chain to dynamically return the proper quantization based on
the input datatype.
+ # This allows the op to compile once but apply different quantization
approaches
+ # using a variable datatype input.
+ def _dispatch_sim_quantize(value):
+ fp32_value = te.compute(data.shape, lambda *indices:
_compute_fp32(value, *indices))
+ int8_value = te.compute(
+ data.shape,
+ lambda *indices: tir.if_then_else(
+ out_dtype.equal(SQNN_DTYPE_TO_CODE["int8"]),
+ _compute_intn("int8", value, *indices),
+ fp32_value[indices],
+ ),
+ )
+ uint8_value = te.compute(
+ data.shape,
+ lambda *indices: tir.if_then_else(
+ out_dtype.equal(SQNN_DTYPE_TO_CODE["uint8"]),
+ _compute_intn("uint8", value, *indices),
+ int8_value[indices],
+ ),
+ )
+ int32_value = te.compute(
+ data.shape,
+ lambda *indices: tir.if_then_else(
+ out_dtype.equal(SQNN_DTYPE_TO_CODE["int32"]),
+ _compute_intn("int32", value, *indices),
+ uint8_value[indices],
+ ),
+ )
+
+ return int32_value
+
+ return te.compute(data.shape, lambda *indices:
_dispatch_sim_quantize(data)[indices])
+
+
[email protected]_scope(tag=topi.tag.ELEMWISE)
+def simulated_dequantize(data, in_dtype, input_scale=None,
input_zero_point=None, axis=-1):
+ """Simulated QNN dequantize operator that mimics QNN outputs in floating
point. The benefit
+ of this operator over true QNN quantize is that this operator allows
dynamic datatype
Review comment:
s/quantize/dequantize
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]