ashutosh-arm commented on code in PR #12970:
URL: https://github.com/apache/tvm/pull/12970#discussion_r988794150


##########
src/relay/backend/contrib/constant_transforms.h:
##########
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file src/relay/backend/contrib/utils.h

Review Comment:
   nit: name needs an update?



##########
src/relay/backend/contrib/cmsisnn/generate_constants.cc:
##########
@@ -64,22 +65,9 @@ class GenerateConstantsMutator : public MixedModeMutator {
     attrs->out_dtype = std::move(conv2d_attrs->out_dtype);
     *new_attrs = tvm::Attrs{attrs};
 
-    std::string kernel_layout = conv2d_attrs->kernel_layout.c_str();
-    int pos_o = kernel_layout.find("O");
-    int pos_h = kernel_layout.find("H");
-    int pos_w = kernel_layout.find("W");
-    int pos_i = kernel_layout.find("I");
-
-    IRModule kernel_module;
-    auto func_body = MakeTranspose(
-        kernel_expr, {Integer(pos_o), Integer(pos_h), Integer(pos_w), 
Integer(pos_i)});
-    auto kernel_func =
-        Function(FreeVars(func_body), func_body, Type(), 
FreeTypeVars(func_body, kernel_module));
-    GlobalVar kernel_var("main");
-    kernel_module->Add(kernel_var, kernel_func);
-    kernel_module = relay::transform::FoldConstant()(kernel_module);
-    kernel_func = Downcast<Function>(kernel_module->Lookup("main"));
-    return kernel_func->body;
+    Constant conv2d_kernel = Downcast<Constant>(kernel_expr);
+    conv2d_kernel = TransposeWeights(conv2d_kernel, 
conv2d_attrs->kernel_layout, "OHWI");
+    return conv2d_kernel;

Review Comment:
   Thanks for taking care of this bit :smile: 



##########
src/relay/backend/contrib/constant_transforms.h:
##########
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file src/relay/backend/contrib/utils.h
+ * \brief Transforms applied to constant operations during codegen for BYOC 
backends.
+ */
+
+#ifndef TVM_RELAY_BACKEND_CONTRIB_CONSTANT_TRANSFORMS_H_
+#define TVM_RELAY_BACKEND_CONTRIB_CONSTANT_TRANSFORMS_H_
+
+#include <tvm/relay/expr.h>
+
+#include <string>
+
+namespace tvm {
+namespace relay {
+namespace contrib {
+
+/*!
+ * \brief Apply constant folding on an expression.
+ *
+ * \param expr The expression to fold.
+ * \param fold_qnn Whether to fold constants for QNN operations.
+ * \returns The new folded expression.
+ */
+Expr FoldConstantExpr(const Expr& expr, bool fold_qnn = true);

Review Comment:
   nit: fold_qnn -> fold_qnn_consts or should_fold_consts for more clarity?



##########
tests/python/contrib/test_ethosn/test_fullyconnected.py:
##########
@@ -66,26 +72,24 @@ def _get_model(
         ((1, 1280), 1000),
     ],
 )
[email protected](
-    "dtype,input_zp,input_sc,kernel_zp,kernel_sc",
-    [
-        ("uint8", 71, 0.580, 176, 1.498),
-        ("uint8", 166, 1.724, 138, 0.180),
-        ("int8", 71, 0.580, 0, 1.498),
-        ("int8", 120, 1.724, 0, 0.180),
-    ],
-)
-def test_fullyconnected(shape, out_channels, dtype, input_zp, input_sc, 
kernel_zp, kernel_sc):
[email protected]("dtype", ["uint8", "int8"])
+def test_fullyconnected(shape, out_channels, dtype):
     """Compare Fully Connected output with TVM."""
 
     np.random.seed(0)
+    iinfo = np.iinfo(dtype)
+    data_min = iinfo.min
+    data_max = iinfo.max
+
     inputs = {
-        "a": tvm.nd.array(
-            np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, 
size=shape, dtype=dtype)
-        ),
+        "a": tvm.nd.array(np.random.randint(data_min, data_max + 1, 
size=shape, dtype=dtype)),
     }
-
     outputs = []
+
+    input_zp = np.random.randint(data_min, data_max)
+    input_sc = np.random.random() * 2
+    kernel_zp = np.random.randint(data_min, data_max)

Review Comment:
   weight zp should be 0, all other cases should be covered in the negative 
test?
   https://www.tensorflow.org/lite/performance/quantization_spec



##########
src/relay/backend/contrib/ethosn/ethosn_api.cc:
##########
@@ -213,27 +217,29 @@ EthosnError EthosnAPI::QnnFullyConnected(const Expr& 
expr, FullyConnectedParams*
                                       data_data_type, sl::DataFormat::NHWC, 
data_q_info);
 
   // Create weights info
-  const auto* weights_dtype = 
dense->args[1]->checked_type().as<TensorTypeNode>();
+  Constant weights_data = Downcast<Constant>(dense->args[1]);
+  weights_data = TransposeWeights(weights_data, "OI", "IO");
+  const auto* weights_ttype = 
weights_data->checked_type().as<TensorTypeNode>();
   sl::TensorShape weights_tensor_shape;
   sl::DataType weights_data_type;
   sl::DataFormat weights_data_format;
   // Ignore the error here because weights don't have a batch axis
-  Tvm2Npu(weights_dtype->shape, &weights_tensor_shape);
-  err += Tvm2Npu(weights_dtype->dtype, &weights_data_type);
+  Tvm2Npu(weights_ttype->shape, &weights_tensor_shape);
+  err += Tvm2Npu(weights_ttype->dtype, &weights_data_type);
   err += Tvm2Npu("HWIO", &weights_data_format);
-  params->weights_info = sl::TensorInfo({1, 1, weights_tensor_shape[1], 
weights_tensor_shape[0]},
+  params->weights_info = sl::TensorInfo({1, 1, weights_tensor_shape[0], 
weights_tensor_shape[1]},
                                         weights_data_type, 
weights_data_format, weights_q_info);
-  params->raw_weights = dense->args[1].as<ConstantNode>()->data->data;
+  params->raw_weights = weights_data->data;
 
   // Create bias info
   params->bias_info =
-      sl::TensorInfo({1, 1, 1, weights_tensor_shape[0]}, 
sl::DataType::INT32_QUANTIZED,
+      sl::TensorInfo({1, 1, 1, weights_tensor_shape[1]}, 
sl::DataType::INT32_QUANTIZED,

Review Comment:
   nit: if not mentioned around this block/function, it will be good to add the 
layout for weights for readability.



##########
src/relay/backend/contrib/ethosn/ethosn_api.cc:
##########
@@ -197,7 +198,10 @@ EthosnError EthosnAPI::QnnFullyConnected(const Expr& expr, 
FullyConnectedParams*
   sl::QuantizationInfo output_q_info;
   err += Tvm2Npu(input_zero_point, input_scale, &data_q_info);
   err += Tvm2Npu(kernel_zero_point, kernel_scale, &weights_q_info);
-  err += Tvm2Npu(0, data_q_info.GetScale() * weights_q_info.GetScale(), 
&bias_q_info);
+  std::valarray<float> bias = data_q_info.GetScale() * 
weights_q_info.GetScales();
+  const int bias_zero_point = 0;
+  const unsigned int bias_axis = 3;
+  err += Tvm2Npu(bias_zero_point, bias, bias_axis, &bias_q_info);

Review Comment:
   nit: this looks like a `bias_scale` in place of `bias`?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to