anijain2305 commented on a change in pull request #4629: [QNN] Channel wise
quantization - Quantize & Requantize
URL: https://github.com/apache/incubator-tvm/pull/4629#discussion_r363894702
##########
File path: src/relay/pass/pattern_util.h
##########
@@ -221,29 +222,69 @@ inline bool IsScalar(const Expr& expr) {
return true;
}
+/*!
+ * \brief Check if expr is a const scalar.
+ * \param expr The expr.
+ * \return True if const scalar.
+ */
+inline bool IsConstScalar(const Expr& expr) {
+ const auto* const_expr = expr.as<ConstantNode>();
+ if (const_expr) {
+ return const_expr->is_scalar();
+ }
+ return false;
+}
+
/*!
* \brief Create a Constant with a scalar
*
* \param dtype The data type.
* \param value The value of the scalar.
* \return A Constant.
*/
-template<typename T>
+template <typename T>
inline Constant MakeConstantScalar(DataType dtype, T value) {
runtime::NDArray arr = runtime::NDArray::Empty({}, dtype, {kDLCPU, 0});
TVM_DTYPE_DISPATCH(dtype, DType, {
if (dtype == DataType::Float(16)) {
// convert to float16
// storage is uint16_t
*static_cast<DType*>(arr->data) =
- __truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t,
10>(static_cast<float>(value));
+ __truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t,
10>(static_cast<float>(value));
} else {
*static_cast<DType*>(arr->data) = value;
}
})
return ConstantNode::make(arr);
}
+/*!
+ * \brief Create a Constant with a tensor.
+ *
+ * \param dtype The data type.
+ * \param value The vector of the tensor values.
+ * \return A Constant.
+ */
+template <typename T>
+static inline Constant MakeConstantTensor(DataType dtype, std::vector<int64_t>
shape,
+ std::vector<T> value) {
+ runtime::NDArray arr = runtime::NDArray::Empty(shape, dtype, {kDLCPU, 0});
Review comment:
Good catch but its not needed. The constants are always created in CPU
device, as they have to go through FoldConstant pass (that can only run on
CPU). After we have a compiler graph, if the graph is supposed to run on GPU,
we can create a Constant GPU object later on.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services