This is an automated email from the ASF dual-hosted git repository.

sanirudh pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 19903157bb [Relay] Expose qnn ops directly from relay.qnn module 
(#15928)
19903157bb is described below

commit 19903157bb4322f0daa1d9aed92acf1ade80dde1
Author: Anirudh Sundar Subramaniam <[email protected]>
AuthorDate: Tue Oct 17 11:35:27 2023 +0530

    [Relay] Expose qnn ops directly from relay.qnn module (#15928)
    
    * [Relay] Expose qnn ops directly from relay.qnn module
    
    When we access `nn` ops in relay, we can directly access them as
    `relay.nn.<op>` where `<op>` can be any nn op like `conv2d`,
    `avg_pool2d`, etc.
    
    When we access `qnn` ops, we need to access them as `relay.qnn.op.<op>`
    as they're not exposed to the `relay.qnn` module. This change tries to
    add that.
    
    * Update tests to use qnn ops directly
    
    * Fix lint errors
---
 python/tvm/relay/qnn/__init__.py                   |  3 +-
 .../test_hexagon/test_qnn_op_integration.py        | 38 +++++++++++-----------
 .../test_hexagon/test_relay_simplify_qnn_concat.py | 10 +++---
 tests/python/relay/qnn/test_clip_legalization.py   |  2 +-
 .../python/relay/qnn/test_qnn_channel_stripping.py | 16 ++++-----
 tests/python/relay/test_op_qnn_add.py              | 14 ++++----
 tests/python/relay/test_op_qnn_batch_matmul.py     |  4 +--
 tests/python/relay/test_op_qnn_concatenate.py      | 10 +++---
 tests/python/relay/test_op_qnn_conv2_transpose.py  |  4 +--
 tests/python/relay/test_op_qnn_conv2d.py           |  4 +--
 tests/python/relay/test_op_qnn_dense.py            |  4 +--
 tests/python/relay/test_op_qnn_dequantize.py       |  4 +--
 tests/python/relay/test_op_qnn_leaky_relu.py       |  2 +-
 tests/python/relay/test_op_qnn_mul.py              | 10 +++---
 tests/python/relay/test_op_qnn_quantize.py         |  4 +--
 tests/python/relay/test_op_qnn_requantize.py       |  8 ++---
 .../relay/test_op_qnn_simulated_dequantize.py      |  4 +--
 .../python/relay/test_op_qnn_simulated_quantize.py |  4 +--
 tests/python/relay/test_op_qnn_subtract.py         |  2 +-
 .../python/relay/test_op_qnn_unary_elementwise.py  | 32 +++++++++---------
 tests/python/relay/test_pass_qnn_legalize.py       | 18 +++++-----
 tests/python/topi/python/test_topi_qnn.py          |  4 +--
 22 files changed, 100 insertions(+), 101 deletions(-)

diff --git a/python/tvm/relay/qnn/__init__.py b/python/tvm/relay/qnn/__init__.py
index fa888d7ce7..af6a0b0449 100644
--- a/python/tvm/relay/qnn/__init__.py
+++ b/python/tvm/relay/qnn/__init__.py
@@ -14,8 +14,9 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-# pylint: disable=wildcard-import
+# pylint: disable=wildcard-import,redefined-builtin
 """QNN dialect operators and IR passes."""
 from __future__ import absolute_import as _abs
 from . import op
 from . import transform
+from .op.qnn import *
diff --git a/tests/python/contrib/test_hexagon/test_qnn_op_integration.py 
b/tests/python/contrib/test_hexagon/test_qnn_op_integration.py
index 8cff4ed626..dbf217ce4e 100644
--- a/tests/python/contrib/test_hexagon/test_qnn_op_integration.py
+++ b/tests/python/contrib/test_hexagon/test_qnn_op_integration.py
@@ -35,8 +35,8 @@ from .infrastructure import quantize_np
 def test_disable_qnn_legalize_pass():
     """No QNN pass test."""
     x = relay.var("x", shape=(4, 8), dtype="float32")
-    op0 = relay.qnn.op.quantize(x, relay.const(2.0), relay.const(10), 
out_dtype="uint8")
-    op1 = relay.qnn.op.dequantize(op0, relay.const(0.5), relay.const(5))
+    op0 = relay.qnn.quantize(x, relay.const(2.0), relay.const(10), 
out_dtype="uint8")
+    op1 = relay.qnn.dequantize(op0, relay.const(0.5), relay.const(5))
     relay_mod = tvm.IRModule.from_expr(op1)
 
     target_hexagon = tvm.target.hexagon("v68")
@@ -133,7 +133,7 @@ class TestQnnQuantize:
 
         def gen_relay_expr_qnn(output_scale, output_zero_point):
             data = relay.var("data", shape=input_shape, dtype="float32")
-            qnn_quantize = relay.qnn.op.quantize(
+            qnn_quantize = relay.qnn.quantize(
                 data,
                 output_scale=relay.const(output_scale),
                 output_zero_point=relay.const(output_zero_point),
@@ -167,7 +167,7 @@ class TestQnnDequantize:
 
         def gen_relay_expr_qnn(dtype, input_scale, input_zero_point):
             data = relay.var("data", shape=input_shape, dtype=dtype)
-            qnn_dequantize = relay.qnn.op.dequantize(
+            qnn_dequantize = relay.qnn.dequantize(
                 data,
                 input_scale=relay.const(input_scale),
                 input_zero_point=relay.const(input_zero_point),
@@ -199,7 +199,7 @@ class TestQnnRequantize:
         data_shape = [256]
         data = relay.var("data", shape=data_shape, dtype="int32")
 
-        op = relay.qnn.op.requantize(
+        op = relay.qnn.requantize(
             data,
             input_scale=relay.const(0.156),
             input_zero_point=relay.const(2),
@@ -256,7 +256,7 @@ class TestQnnAvgPool2d:
             dtype, input_scale, input_zero_point, output_scale, 
output_zero_point
         ):
             data = relay.var("data", shape=input_shape, dtype=dtype)
-            qnn_avg_pool = relay.qnn.op.avg_pool2d(
+            qnn_avg_pool = relay.qnn.avg_pool2d(
                 data,
                 input_scale=relay.const(input_scale),
                 input_zero_point=relay.const(input_zero_point),
@@ -305,7 +305,7 @@ class TestQnnAvgPool2d:
 class TestQnnBinaryOp:
     """QNN binary op test class"""
 
-    operation = tvm.testing.parameter(relay.qnn.op.add, relay.qnn.op.subtract, 
relay.qnn.op.mul)
+    operation = tvm.testing.parameter(relay.qnn.add, relay.qnn.subtract, 
relay.qnn.mul)
     dtype = tvm.testing.parameter("uint8", "int8")
     input_shape = tvm.testing.parameter([256], [4, 256])
 
@@ -375,7 +375,7 @@ class TestQnnConcatenate:
         input_y = relay.var("y", shape=y_shape, dtype="uint8")
         input_z = relay.var("z", shape=z_shape, dtype="uint8")
 
-        op = relay.qnn.op.concatenate(
+        op = relay.qnn.concatenate(
             (input_x, input_y, input_z),
             input_scales=(relay.const(0.3), relay.const(0.7), 
relay.const(1.3)),
             input_zero_points=(relay.const(0), relay.const(1), relay.const(2)),
@@ -404,9 +404,9 @@ class TestQnnConv2D:
         weight_shape = [16, 8, 3, 3]
         data = relay.var("data", shape=data_shape, dtype="float32")
         weight = relay.var("weight", shape=weight_shape, dtype="float32")
-        op0 = relay.qnn.op.quantize(data, relay.const(0.078), relay.const(0), 
out_dtype="uint8")
-        op1 = relay.qnn.op.quantize(weight, relay.const(0.07), relay.const(0), 
out_dtype="int8")
-        op2 = relay.qnn.op.conv2d(
+        op0 = relay.qnn.quantize(data, relay.const(0.078), relay.const(0), 
out_dtype="uint8")
+        op1 = relay.qnn.quantize(weight, relay.const(0.07), relay.const(0), 
out_dtype="int8")
+        op2 = relay.qnn.conv2d(
             op0,
             op1,
             input_zero_point=relay.const(0),
@@ -417,7 +417,7 @@ class TestQnnConv2D:
             channels=16,
             kernel_size=[3, 3],
         )
-        op5 = relay.qnn.op.requantize(
+        op5 = relay.qnn.requantize(
             op2,
             input_scale=relay.const(0.05),
             input_zero_point=relay.const(0),
@@ -448,11 +448,11 @@ class TestQnnDense:
         wscale = relay.const(0.37)
 
         def before():
-            return relay.qnn.op.dense(data, weight, zero, zero, iscale, 
wscale, units=None)
+            return relay.qnn.dense(data, weight, zero, zero, iscale, wscale, 
units=None)
 
         def expected():
             op0 = relay.layout_transform(weight, src_layout="NC", 
dst_layout="NC32n4c")
-            return relay.qnn.op.contrib_dense_pack(data, op0, zero, zero, 
iscale, wscale, "NC32n4c")
+            return relay.qnn.contrib_dense_pack(data, op0, zero, zero, iscale, 
wscale, "NC32n4c")
 
         target = tvm.target.hexagon("v68")
         with tvm.target.Target(target):
@@ -478,7 +478,7 @@ class TestQnnDense:
         weight = relay.var("weight", shape=weight_shape, dtype=dtype)
         bias = relay.var("bias", shape=bias_shape, dtype="int32")
 
-        op0 = relay.qnn.op.dense(
+        op0 = relay.qnn.dense(
             data,
             weight,
             input_zero_point=relay.const(2),
@@ -488,7 +488,7 @@ class TestQnnDense:
             units=None,
         )
         op1 = relay.nn.bias_add(op0, bias)
-        op2 = relay.qnn.op.requantize(
+        op2 = relay.qnn.requantize(
             op1,
             input_scale=relay.const(1.3),
             input_zero_point=relay.const(4),
@@ -520,7 +520,7 @@ class TestQnnDense:
         data = relay.var("data", shape=data_shape, dtype="uint8")
         weight = relay.var("weight", shape=weight_shape, dtype="int8")
 
-        op0 = relay.qnn.op.dense(
+        op0 = relay.qnn.dense(
             data,
             weight,
             input_zero_point=relay.const(0),
@@ -529,7 +529,7 @@ class TestQnnDense:
             kernel_scale=relay.const(0.19),
             units=64,
         )
-        op1 = relay.qnn.op.requantize(
+        op1 = relay.qnn.requantize(
             op0,
             input_scale=relay.const(0.1),
             input_zero_point=relay.const(0),
@@ -558,7 +558,7 @@ class TestQnnTanh:
         data_shape = [256]
         data = relay.var("data", shape=data_shape, dtype="uint8")
 
-        op = relay.qnn.op.tanh(
+        op = relay.qnn.tanh(
             data,
             scale=relay.const(0.518),
             zero_point=relay.const(137),
diff --git 
a/tests/python/contrib/test_hexagon/test_relay_simplify_qnn_concat.py 
b/tests/python/contrib/test_hexagon/test_relay_simplify_qnn_concat.py
index ad1d7592fc..728ec81243 100644
--- a/tests/python/contrib/test_hexagon/test_relay_simplify_qnn_concat.py
+++ b/tests/python/contrib/test_hexagon/test_relay_simplify_qnn_concat.py
@@ -43,11 +43,11 @@ def get_test_module():
         ceil_mode=False,
         layout="NHWC",
     )
-    r2 = relay.qnn.op.requantize(q2, s2, z1, s5, z1, axis=1, out_dtype="uint8")
+    r2 = relay.qnn.requantize(q2, s2, z1, s5, z1, axis=1, out_dtype="uint8")
     q_tuple = relay.expr.Tuple([r1, r2, q3])
     s_tuple = relay.expr.Tuple([s4, s5, s3])
     z_tuple = relay.expr.Tuple([z1, z1, z1])
-    graph = relay.qnn.op.concatenate(q_tuple, s_tuple, z_tuple, s3, z1, axis=1)
+    graph = relay.qnn.concatenate(q_tuple, s_tuple, z_tuple, s3, z1, axis=1)
 
     func = relay.Function(relay.analysis.free_vars(graph), graph)
     mod = tvm.IRModule.from_expr(func)
@@ -72,12 +72,10 @@ def get_expected_output_module():
         ceil_mode=False,
         layout="NHWC",
     )
-    out_r1 = relay.qnn.op.requantize(
+    out_r1 = relay.qnn.requantize(
         nn_max_pool, out_s4, out_z1, out_s3, out_z1, axis=1, out_dtype="uint8"
     )
-    out_r2 = relay.qnn.op.requantize(
-        out_q2, out_s2, out_z1, out_s3, out_z1, axis=1, out_dtype="uint8"
-    )
+    out_r2 = relay.qnn.requantize(out_q2, out_s2, out_z1, out_s3, out_z1, 
axis=1, out_dtype="uint8")
     out_q_tuple = relay.expr.Tuple([out_r1, out_r2, out_q3])
     out_graph = relay.op.concatenate(out_q_tuple, axis=1)
 
diff --git a/tests/python/relay/qnn/test_clip_legalization.py 
b/tests/python/relay/qnn/test_clip_legalization.py
index d1a9c5901a..b7ccaccd98 100644
--- a/tests/python/relay/qnn/test_clip_legalization.py
+++ b/tests/python/relay/qnn/test_clip_legalization.py
@@ -52,7 +52,7 @@ def test_removes_redundant_requantize_clip_ops(dtype, 
min_val, max_val, is_redun
     the clip operator match the min and max values of the data type."""
 
     input_var = relay.var("input", shape=(1, 3, 3, 4), dtype="int32")
-    out = relay.qnn.op.requantize(
+    out = relay.qnn.requantize(
         input_var,
         tvm_const(np.float32(1.0)),
         tvm_const(np.int32(0)),
diff --git a/tests/python/relay/qnn/test_qnn_channel_stripping.py 
b/tests/python/relay/qnn/test_qnn_channel_stripping.py
index 25197ca84c..d0d32567a8 100644
--- a/tests/python/relay/qnn/test_qnn_channel_stripping.py
+++ b/tests/python/relay/qnn/test_qnn_channel_stripping.py
@@ -58,7 +58,7 @@ def make_test_conv_depthwise_conv():
     input_scale_1 = np.float32(0.5)
     output_scale_1 = np.array([0.5, 2.0, 0.25, 4.0], dtype="float32")
 
-    out = relay.qnn.op.conv2d(
+    out = relay.qnn.conv2d(
         input_var,
         tvm_const(kernel_1),
         tvm_const(np.int32(-128)),
@@ -80,7 +80,7 @@ def make_test_conv_depthwise_conv():
     )
 
     input_scale_2 = np.float32(0.25)
-    out = relay.qnn.op.requantize(
+    out = relay.qnn.requantize(
         out,
         tvm_const(input_scale_1 * output_scale_1),
         tvm_const(np.int32(0)),
@@ -106,7 +106,7 @@ def make_test_conv_depthwise_conv():
         dtype="int8",
     ).reshape((3, 3, 4, 1))
     output_scale_2 = np.array([0.25, 0.125, 2.0, 0.125], dtype="float32")
-    out = relay.qnn.op.conv2d(
+    out = relay.qnn.conv2d(
         out,
         tvm_const(kernel_2),
         tvm_const(np.int32(-128)),
@@ -129,7 +129,7 @@ def make_test_conv_depthwise_conv():
     )
 
     input_scale_3 = np.float32(0.125)
-    out = relay.qnn.op.requantize(
+    out = relay.qnn.requantize(
         out,
         tvm_const(input_scale_2 * output_scale_2),
         tvm_const(np.int32(0)),
@@ -145,7 +145,7 @@ def make_test_conv_depthwise_conv():
     ).reshape((1, 1, 4, 4))
     output_scale_3 = np.array([0.25, 0.125, 1.0, 0.5], dtype="float32")
 
-    out = relay.qnn.op.conv2d(
+    out = relay.qnn.conv2d(
         out,
         tvm_const(kernel_3),
         tvm_const(np.int32(-128)),
@@ -181,7 +181,7 @@ def make_test_conv_pool_dense():
     input_scale = np.float32(0.029626124)
     output_scale = np.array([0.5, 2.0, 0.25, 4.0], dtype="float32")
 
-    out = relay.qnn.op.conv2d(
+    out = relay.qnn.conv2d(
         input_var,
         tvm_const(kernel),
         tvm_const(np.int32(-128)),
@@ -202,7 +202,7 @@ def make_test_conv_pool_dense():
         axis=3,
     )
 
-    out = relay.qnn.op.requantize(
+    out = relay.qnn.requantize(
         out,
         tvm_const(input_scale * output_scale),
         tvm_const(np.int32(0)),
@@ -226,7 +226,7 @@ def make_test_conv_pool_dense():
     out = relay.reshape(out, newshape=[-1, 4])
 
     dense_weights = np.array([[15, -2, -3, 11], [12, -10, 13, -10]], 
dtype="int8")
-    out = relay.qnn.op.dense(
+    out = relay.qnn.dense(
         out,
         tvm_const(dense_weights),
         tvm_const(np.int32(-128)),
diff --git a/tests/python/relay/test_op_qnn_add.py 
b/tests/python/relay/test_op_qnn_add.py
index 0599e159a6..ed2b1723bb 100644
--- a/tests/python/relay/test_op_qnn_add.py
+++ b/tests/python/relay/test_op_qnn_add.py
@@ -25,7 +25,7 @@ def test_tflite_same_io_qnn_params():
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.00784314, "float32"),
@@ -74,7 +74,7 @@ def test_tflite_different_io_qnn_params():
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.0156863, "float32"),
@@ -123,7 +123,7 @@ def test_saturation():
     data_dtype = "uint8"
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.125, "float32"),
@@ -151,7 +151,7 @@ def test_saturation():
     np.testing.assert_equal(op_res.numpy(), golden_output)
 
     # Same params, different scale
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.125, "float32"),
@@ -178,7 +178,7 @@ def test_saturation():
     np.testing.assert_equal(op_res.numpy(), golden_output)
 
     # Same io params, different output scale
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.125, "float32"),
@@ -205,7 +205,7 @@ def test_saturation():
     np.testing.assert_equal(op_res.numpy(), golden_output)
 
     # All params different
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.5, "float32"),
@@ -237,7 +237,7 @@ def test_ignore_channel_axis():
 
     x = relay.var("x", shape=(4,), dtype=data_dtype)
     y = relay.var("y", shape=(4,), dtype=data_dtype)
-    z = relay.qnn.op.add(
+    z = relay.qnn.add(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(0.00784314, "float32"),
diff --git a/tests/python/relay/test_op_qnn_batch_matmul.py 
b/tests/python/relay/test_op_qnn_batch_matmul.py
index 8e0d962352..278b6f7253 100644
--- a/tests/python/relay/test_op_qnn_batch_matmul.py
+++ b/tests/python/relay/test_op_qnn_batch_matmul.py
@@ -182,7 +182,7 @@ def qnn_batch_matmul_driver(test_configuration):
     expected_out_dtype = test_configuration["out_dtype"]
     quantized_x = relay.var(quantized_x_name, 
shape=test_configuration["x_shape"], dtype=in_dtype)
     quantized_y = relay.var(quantized_y_name, 
shape=test_configuration["y_shape"], dtype=in_dtype)
-    mod = relay.qnn.op.batch_matmul(
+    mod = relay.qnn.batch_matmul(
         quantized_x,
         quantized_y,
         relay.const(test_configuration["x_zero_point"], "int32"),
@@ -192,7 +192,7 @@ def qnn_batch_matmul_driver(test_configuration):
     )
     if test_configuration["requantize"] is not None:
         requantize_config = test_configuration["requantize"]
-        mod = relay.qnn.op.requantize(
+        mod = relay.qnn.requantize(
             mod,
             input_scale=relay.const(requantize_config["input_scale"], 
"float32"),
             input_zero_point=relay.const(0, "int32"),
diff --git a/tests/python/relay/test_op_qnn_concatenate.py 
b/tests/python/relay/test_op_qnn_concatenate.py
index c5f7bf1908..7ad6318ae4 100644
--- a/tests/python/relay/test_op_qnn_concatenate.py
+++ b/tests/python/relay/test_op_qnn_concatenate.py
@@ -34,7 +34,7 @@ def test_same_io_qnn_params():
 
     x = relay.var("x", shape=(1, 64), dtype=data_dtype)
     y = relay.var("y", shape=(1, 64), dtype=data_dtype)
-    z = relay.qnn.op.concatenate(
+    z = relay.qnn.concatenate(
         (x, y),
         input_scales=(x_scale, y_scale),
         input_zero_points=(zero, zero),
@@ -70,7 +70,7 @@ def test_different_io_qnn_params():
 
     x = relay.var("x", shape=(1, 64), dtype=data_dtype)
     y = relay.var("y", shape=(1, 64), dtype=data_dtype)
-    z = relay.qnn.op.concatenate(
+    z = relay.qnn.concatenate(
         (x, y),
         input_scales=(x_scale, y_scale),
         input_zero_points=(x_zero_point, y_zero_point),
@@ -106,7 +106,7 @@ def test_few_same_io_qnn_params():
 
     x = relay.var("x", shape=(1, 64), dtype=data_dtype)
     y = relay.var("y", shape=(1, 64), dtype=data_dtype)
-    z = relay.qnn.op.concatenate(
+    z = relay.qnn.concatenate(
         (x, y),
         input_scales=(x_scale, y_scale),
         input_zero_points=(x_zero_point, y_zero_point),
@@ -142,7 +142,7 @@ def test_same_i_qnn_params():
 
     x = relay.var("x", shape=(1, 64), dtype=data_dtype)
     y = relay.var("y", shape=(1, 64), dtype=data_dtype)
-    z = relay.qnn.op.concatenate(
+    z = relay.qnn.concatenate(
         (x, y),
         input_scales=(x_scale, y_scale),
         input_zero_points=(x_zero_point, y_zero_point),
@@ -177,7 +177,7 @@ def test_call_input():
     y_zero_point = relay.const(0, "int32")
 
     tup = relay.split(x, 2, axis=0)
-    z = relay.qnn.op.concatenate(
+    z = relay.qnn.concatenate(
         tup,
         input_scales=(x_scale, y_scale),
         input_zero_points=(x_zero_point, y_zero_point),
diff --git a/tests/python/relay/test_op_qnn_conv2_transpose.py 
b/tests/python/relay/test_op_qnn_conv2_transpose.py
index 18ad68df9e..b226d0a33a 100644
--- a/tests/python/relay/test_op_qnn_conv2_transpose.py
+++ b/tests/python/relay/test_op_qnn_conv2_transpose.py
@@ -80,7 +80,7 @@ def get_qnn_func(
     channels,
     groups,
 ):
-    func = relay.qnn.op.conv2d_transpose(
+    func = relay.qnn.conv2d_transpose(
         data,
         kernel,
         input_zero_point=relay.const(input_zero_point, "int32"),
@@ -681,7 +681,7 @@ def test_per_channel_kernel_scale():
     kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
     kernel_scales = [2, 2, 2]
     kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
-    func = relay.qnn.op.conv2d_transpose(
+    func = relay.qnn.conv2d_transpose(
         data,
         kernel,
         input_zero_point=relay.const(0, "int32"),
diff --git a/tests/python/relay/test_op_qnn_conv2d.py 
b/tests/python/relay/test_op_qnn_conv2d.py
index e7d2c8941b..e10decb060 100644
--- a/tests/python/relay/test_op_qnn_conv2d.py
+++ b/tests/python/relay/test_op_qnn_conv2d.py
@@ -104,7 +104,7 @@ def get_qnn_func(
     if isinstance(kernel_zero_point, (int, float)):
         kernel_zero_point = relay.const(kernel_zero_point, "int32")
 
-    func = relay.qnn.op.conv2d(
+    func = relay.qnn.conv2d(
         data,
         kernel,
         input_zero_point=input_zero_point,
@@ -1077,7 +1077,7 @@ def test_per_channel_kernel_scale():
         kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
         kernel_scales = [2, 2, 2]
         kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
-        func = relay.qnn.op.conv2d(
+        func = relay.qnn.conv2d(
             data,
             kernel,
             input_zero_point=relay.const(0, "int32"),
diff --git a/tests/python/relay/test_op_qnn_dense.py 
b/tests/python/relay/test_op_qnn_dense.py
index 3609d8f8ed..d28742ddf0 100644
--- a/tests/python/relay/test_op_qnn_dense.py
+++ b/tests/python/relay/test_op_qnn_dense.py
@@ -181,7 +181,7 @@ def qnn_dense_driver(test_configuration):
     quantized_kernel = relay.var(
         quantized_kernel_name, shape=test_configuration["kernel_shape"], 
dtype=in_dtype
     )
-    mod = relay.qnn.op.dense(
+    mod = relay.qnn.dense(
         quantized_data,
         quantized_kernel,
         relay.const(test_configuration["input_zero_point"], "int32"),
@@ -195,7 +195,7 @@ def qnn_dense_driver(test_configuration):
         mod = relay.nn.bias_add(mod, bias)
     if test_configuration["requantize"] is not None:
         requantize_config = test_configuration["requantize"]
-        mod = relay.qnn.op.requantize(
+        mod = relay.qnn.requantize(
             mod,
             input_scale=relay.const(requantize_config["input_scale"], 
"float32"),
             input_zero_point=relay.const(0, "int32"),
diff --git a/tests/python/relay/test_op_qnn_dequantize.py 
b/tests/python/relay/test_op_qnn_dequantize.py
index 3b2ae97eb6..68908b4d7e 100644
--- a/tests/python/relay/test_op_qnn_dequantize.py
+++ b/tests/python/relay/test_op_qnn_dequantize.py
@@ -30,7 +30,7 @@ def dequantize_test_driver(
     input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
     input_zero_point = relay.const(quant_args["in_zero_point"], "int32")
     input_scale = relay.const(quant_args["in_scale"], "float32")
-    quantized_output = relay.qnn.op.dequantize(
+    quantized_output = relay.qnn.dequantize(
         input_data,
         input_scale=input_scale,
         input_zero_point=input_zero_point,
@@ -175,7 +175,7 @@ def test_dynamic_dequantize():
     scale_var = relay.var("scale", shape=(), dtype="float32")
     zp_var = relay.var("zp", shape=(), dtype="int32")
 
-    deq_x = relay.qnn.op.dequantize(x, scale_var * scale_var, zp_var + zp_var)
+    deq_x = relay.qnn.dequantize(x, scale_var * scale_var, zp_var + zp_var)
     tt = run_infer_type(deq_x)
 
     assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "float32")
diff --git a/tests/python/relay/test_op_qnn_leaky_relu.py 
b/tests/python/relay/test_op_qnn_leaky_relu.py
index ade897bf6e..d3216a793b 100644
--- a/tests/python/relay/test_op_qnn_leaky_relu.py
+++ b/tests/python/relay/test_op_qnn_leaky_relu.py
@@ -47,7 +47,7 @@ def test_qnn_leaky_relu():
     alpha = 0.9
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
-    y = relay.qnn.op.leaky_relu(
+    y = relay.qnn.leaky_relu(
         x=x,
         alpha=alpha,
         input_scale=relay.const(input_scale, "float32"),
diff --git a/tests/python/relay/test_op_qnn_mul.py 
b/tests/python/relay/test_op_qnn_mul.py
index af84f97786..bbc1bfd2ae 100644
--- a/tests/python/relay/test_op_qnn_mul.py
+++ b/tests/python/relay/test_op_qnn_mul.py
@@ -44,7 +44,7 @@ def test_tflite_same_io_qnn_params():
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.mul(
+    z = relay.qnn.mul(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(lhs_scale, "float32"),
@@ -99,7 +99,7 @@ def test_tflite_different_io_qnn_params():
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.mul(
+    z = relay.qnn.mul(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(lhs_scale, "float32"),
@@ -149,7 +149,7 @@ def test_saturation():
 
     x = relay.var("x", shape=(1, 4), dtype=data_dtype)
     y = relay.var("y", shape=(1, 4), dtype=data_dtype)
-    z = relay.qnn.op.mul(
+    z = relay.qnn.mul(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(lhs_scale, "float32"),
@@ -184,7 +184,7 @@ def test_saturation():
     lhs_scale = rhs_scale = 0.125
     output_scale = 0.25
 
-    z = relay.qnn.op.mul(
+    z = relay.qnn.mul(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(lhs_scale, "float32"),
@@ -220,7 +220,7 @@ def test_saturation():
     rhs_scale = 0.25
     output_scale = 0.125
 
-    z = relay.qnn.op.mul(
+    z = relay.qnn.mul(
         lhs=x,
         rhs=y,
         lhs_scale=relay.const(lhs_scale, "float32"),
diff --git a/tests/python/relay/test_op_qnn_quantize.py 
b/tests/python/relay/test_op_qnn_quantize.py
index 3a3521b11e..89f8904698 100644
--- a/tests/python/relay/test_op_qnn_quantize.py
+++ b/tests/python/relay/test_op_qnn_quantize.py
@@ -28,7 +28,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, 
out_dtype, in_data, verify_
     input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
     output_zero_point = relay.const(quant_args["out_zero_point"])
     output_scale = relay.const(quant_args["out_scale"])
-    quantized_output = relay.qnn.op.quantize(
+    quantized_output = relay.qnn.quantize(
         input_data,
         output_scale=output_scale,
         output_zero_point=output_zero_point,
@@ -175,7 +175,7 @@ def test_dynamic_quantize():
     scale_var = relay.var("scale", shape=(), dtype="float32")
     zp_var = relay.var("zp", shape=(), dtype="int32")
 
-    q_x = relay.qnn.op.quantize(x, scale_var * scale_var, zp_var + zp_var)
+    q_x = relay.qnn.quantize(x, scale_var * scale_var, zp_var + zp_var)
     tt = run_infer_type(q_x)
 
     assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "int8")
diff --git a/tests/python/relay/test_op_qnn_requantize.py 
b/tests/python/relay/test_op_qnn_requantize.py
index 1dee1f5b61..4c0f2c7ee7 100644
--- a/tests/python/relay/test_op_qnn_requantize.py
+++ b/tests/python/relay/test_op_qnn_requantize.py
@@ -61,7 +61,7 @@ def get_mod(
     else:
         input_zero_point_expr = 
relay.const(np.array(input_zero_point).astype("int32"))
 
-    mod = relay.qnn.op.requantize(
+    mod = relay.qnn.requantize(
         input_data,
         input_scale=input_scale_expr,
         input_zero_point=input_zero_point_expr,
@@ -568,7 +568,7 @@ def test_default_cfg_and_no_args():
 def test_non_default_cfg_and_no_args():
     for rounding_cfg in roundings:
         for qnn_out_dtype in out_dtypes:
-            with relay.qnn.op.requantize_config(rounding=rounding_cfg):
+            with relay.qnn.requantize_config(rounding=rounding_cfg):
                 mod = get_mod(
                     data_shape=(32,),
                     data_dtype="int32",
@@ -589,7 +589,7 @@ def test_non_default_cfg_and_no_args():
 def test_default_cfg_and_args():
     for rounding in roundings:
         for qnn_out_dtype in out_dtypes:
-            with relay.qnn.op.requantize_config(rounding="UPWARD"):
+            with relay.qnn.requantize_config(rounding="UPWARD"):
                 mod = get_mod(
                     data_shape=(32,),
                     data_dtype="int32",
@@ -612,7 +612,7 @@ def test_non_default_cfg_and_args():
     for rounding_arg in roundings:
         for rounding_cfg in roundings:
             for qnn_out_dtype in out_dtypes:
-                with relay.qnn.op.requantize_config(rounding=rounding_cfg):
+                with relay.qnn.requantize_config(rounding=rounding_cfg):
                     mod = get_mod(
                         data_shape=(32,),
                         data_dtype="int32",
diff --git a/tests/python/relay/test_op_qnn_simulated_dequantize.py 
b/tests/python/relay/test_op_qnn_simulated_dequantize.py
index e15fdf770c..75d7f9727a 100644
--- a/tests/python/relay/test_op_qnn_simulated_dequantize.py
+++ b/tests/python/relay/test_op_qnn_simulated_dequantize.py
@@ -29,7 +29,7 @@ def dequantize_test_driver(in_dtype, quant_args, axis, 
in_data):
     input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
     input_zero_point = relay.const(quant_args["in_zero_point"])
     input_scale = relay.const(quant_args["in_scale"])
-    dequantized_output = relay.qnn.op.dequantize(
+    dequantized_output = relay.qnn.dequantize(
         input_data,
         input_scale=input_scale,
         input_zero_point=input_zero_point,
@@ -48,7 +48,7 @@ def dequantize_test_driver(in_dtype, quant_args, axis, 
in_data):
 
 
 def build_simulated_dequantize(input_data, scale, zp, dtype, axis=-1):
-    sim_q = relay.qnn.op.simulated_dequantize(
+    sim_q = relay.qnn.simulated_dequantize(
         input_data,
         scale,
         zp,
diff --git a/tests/python/relay/test_op_qnn_simulated_quantize.py 
b/tests/python/relay/test_op_qnn_simulated_quantize.py
index 69ce261f6b..c0f45837e4 100644
--- a/tests/python/relay/test_op_qnn_simulated_quantize.py
+++ b/tests/python/relay/test_op_qnn_simulated_quantize.py
@@ -36,7 +36,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, 
out_dtype, in_data):
     input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
     output_zero_point = relay.const(quant_args["out_zero_point"])
     output_scale = relay.const(quant_args["out_scale"])
-    quantized_output = relay.qnn.op.quantize(
+    quantized_output = relay.qnn.quantize(
         input_data,
         output_scale=output_scale,
         output_zero_point=output_zero_point,
@@ -56,7 +56,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, 
out_dtype, in_data):
 
 
 def build_simulated_quantize(input_data, scale, zp, dtype, axis=-1):
-    sim_q = relay.qnn.op.simulated_quantize(
+    sim_q = relay.qnn.simulated_quantize(
         input_data,
         scale,
         zp,
diff --git a/tests/python/relay/test_op_qnn_subtract.py 
b/tests/python/relay/test_op_qnn_subtract.py
index f7117b5594..16f26e77e7 100644
--- a/tests/python/relay/test_op_qnn_subtract.py
+++ b/tests/python/relay/test_op_qnn_subtract.py
@@ -33,7 +33,7 @@ def qnn_subtract_driver(x_datas, y_datas, golden_outputs, 
scale_and_zp, data_dty
     rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
     output_scale = relay.const(scale_and_zp["output_scale"], "float32")
     output_zp = relay.const(scale_and_zp["output_zp"], "int32")
-    z = relay.qnn.op.subtract(
+    z = relay.qnn.subtract(
         lhs=x,
         rhs=y,
         lhs_scale=lhs_scale,
diff --git a/tests/python/relay/test_op_qnn_unary_elementwise.py 
b/tests/python/relay/test_op_qnn_unary_elementwise.py
index 52f74c24e9..01a7374a0b 100644
--- a/tests/python/relay/test_op_qnn_unary_elementwise.py
+++ b/tests/python/relay/test_op_qnn_unary_elementwise.py
@@ -131,7 +131,7 @@ class TestRSqrt:
         # Same qparams in and out
         x_data = np.array((255, 133, 0, 9)).reshape((1, 4))
         run_condition(
-            relay.qnn.op.rsqrt,
+            relay.qnn.rsqrt,
             lambda x: 1 / np.sqrt(x),
             x_data,
             input_scale=0.125,
@@ -143,7 +143,7 @@ class TestRSqrt:
 
         # Different scale
         run_condition(
-            relay.qnn.op.rsqrt,
+            relay.qnn.rsqrt,
             lambda x: 1 / np.sqrt(x),
             x_data,
             input_scale=0.125,
@@ -154,11 +154,11 @@ class TestRSqrt:
         )
 
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.rsqrt, lambda x: 1 / np.sqrt(x), 
input_dtype="uint8")
+        generic_test(relay.qnn.rsqrt, lambda x: 1 / np.sqrt(x), 
input_dtype="uint8")
 
     def test_all_numbers_int8(self):
         generic_test(
-            relay.qnn.op.rsqrt,
+            relay.qnn.rsqrt,
             lambda x: 1 / np.sqrt(x),
             input_dtype="int8",
             x_data=np.arange(1, 128, dtype="int8"),
@@ -167,11 +167,11 @@ class TestRSqrt:
 
 class Sqrt:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.sqrt, np.sqrt, input_dtype="uint8")
+        generic_test(relay.qnn.sqrt, np.sqrt, input_dtype="uint8")
 
     def test_all_numbers_int8(self):
         generic_test(
-            relay.qnn.op.sqrt,
+            relay.qnn.sqrt,
             np.sqrt,
             input_dtype="int8",
             x_data=np.arange(1, 128, dtype="int8"),
@@ -180,42 +180,42 @@ class Sqrt:
 
 class TestExp:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.exp, np.exp, input_dtype="uint8")
+        generic_test(relay.qnn.exp, np.exp, input_dtype="uint8")
 
     def test_all_numbers_int8(self):
-        generic_test(relay.qnn.op.exp, np.exp, input_dtype="int8")
+        generic_test(relay.qnn.exp, np.exp, input_dtype="int8")
 
 
 class TestTanh:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="uint8")
+        generic_test(relay.qnn.tanh, np.tanh, input_dtype="uint8")
 
     def test_all_numbers_int8(self):
-        generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="int8")
+        generic_test(relay.qnn.tanh, np.tanh, input_dtype="int8")
 
 
 class TestErf:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="uint8")
+        generic_test(relay.qnn.erf, scipy.special.erf, input_dtype="uint8")
 
     def test_all_numbers_int8(self):
-        generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="int8")
+        generic_test(relay.qnn.erf, scipy.special.erf, input_dtype="int8")
 
 
 class TestSigmoid:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), 
input_dtype="uint8")
+        generic_test(relay.qnn.sigmoid, lambda x: 1 / (1 + np.exp(-x)), 
input_dtype="uint8")
 
     def test_all_numbers_int8(self):
-        generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), 
input_dtype="int8")
+        generic_test(relay.qnn.sigmoid, lambda x: 1 / (1 + np.exp(-x)), 
input_dtype="int8")
 
 
 class TestHardswish:
     def test_all_numbers_uint8(self):
-        generic_test(relay.qnn.op.hardswish, hardswish_func, 
input_dtype="uint8")
+        generic_test(relay.qnn.hardswish, hardswish_func, input_dtype="uint8")
 
     def test_all_numbers_int8(self):
-        generic_test(relay.qnn.op.hardswish, hardswish_func, 
input_dtype="int8")
+        generic_test(relay.qnn.hardswish, hardswish_func, input_dtype="int8")
 
 
 if __name__ == "__main__":
diff --git a/tests/python/relay/test_pass_qnn_legalize.py 
b/tests/python/relay/test_pass_qnn_legalize.py
index a32100ea20..4bb4e4813e 100644
--- a/tests/python/relay/test_pass_qnn_legalize.py
+++ b/tests/python/relay/test_pass_qnn_legalize.py
@@ -51,7 +51,7 @@ def test_qnn_legalize():
 
     def before():
         x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
-        y = relay.qnn.op.requantize(
+        y = relay.qnn.requantize(
             x,
             input_scale=relay.const(1, "float32"),
             input_zero_point=relay.const(0, "int32"),
@@ -65,7 +65,7 @@ def test_qnn_legalize():
     def legalize_qnn_requantize(attrs, inputs, types):
         data = inputs[0]
         data = relay.add(relay.const(0, "int8"), data)
-        y = relay.qnn.op.requantize(
+        y = relay.qnn.requantize(
             data,
             input_scale=relay.const(1, "float32"),
             input_zero_point=relay.const(0, "int32"),
@@ -78,7 +78,7 @@ def test_qnn_legalize():
     def expected():
         x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
         y = relay.add(relay.const(0, "int8"), x)
-        z = relay.qnn.op.requantize(
+        z = relay.qnn.requantize(
             y,
             input_scale=relay.const(1, "float32"),
             input_zero_point=relay.const(0, "int32"),
@@ -110,7 +110,7 @@ def test_qnn_legalize_qnn_conv2d():
         kernel_shape = (128, 64, 3, 3)
         data = relay.var("data", shape=data_shape, dtype=data_dtype)
         kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
-        func = relay.qnn.op.conv2d(
+        func = relay.qnn.conv2d(
             data,
             kernel,
             input_zero_point=relay.const(1, "int32"),
@@ -212,7 +212,7 @@ def test_qnn_legalize_qnn_dense():
         kernel_shape = (20, 3)
         data = relay.var("data", shape=data_shape, dtype=data_dtype)
         kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
-        func = relay.qnn.op.dense(
+        func = relay.qnn.dense(
             data,
             kernel,
             input_zero_point=relay.const(1, "int32"),
@@ -317,7 +317,7 @@ def test_qnn_legalize_qnn_conv2d_non_scalar_qnn_params():
     data_scale = relay.const(0.15)
 
     def before():
-        op = relay.qnn.op.conv2d(
+        op = relay.qnn.conv2d(
             data,
             weights,
             input_zero_point=data_zp,
@@ -336,7 +336,7 @@ def test_qnn_legalize_qnn_conv2d_non_scalar_qnn_params():
         op0 = relay.nn.pad(weights, pad_width=[[0, 0], [0, in_diff], [0, 0], 
[0, 0]])
         op1 = relay.nn.pad(data, pad_width=[[0, 0], [0, in_diff], [0, 0], [0, 
0]])
         op2 = relay.nn.pad(op0, pad_width=[[0, out_diff], [0, 0], [0, 0], [0, 
0]])
-        op3 = relay.qnn.op.conv2d(
+        op3 = relay.qnn.conv2d(
             op1,
             op2,
             input_zero_point=data_zp,
@@ -373,7 +373,7 @@ def test_qnn_legalize_qnn_dense_non_scalar_qnn_params():
     def before():
         wzp = relay.const([1] * N)
         wscale = relay.const([0.17] * N)
-        op = relay.qnn.op.dense(data, weights, data_zp, wzp, data_scale, 
wscale, units=N)
+        op = relay.qnn.dense(data, weights, data_zp, wzp, data_scale, wscale, 
units=N)
         return op
 
     def expected():
@@ -381,7 +381,7 @@ def test_qnn_legalize_qnn_dense_non_scalar_qnn_params():
         wzp = relay.const([1] * N + [0] * diff)
         wscale = relay.const([0.17] * N + [1.0] * diff)
         op0 = relay.nn.pad(weights, pad_width=[[0, diff], [0, 0]])
-        op1 = relay.qnn.op.dense(data, op0, data_zp, wzp, data_scale, wscale, 
units=(N + diff))
+        op1 = relay.qnn.dense(data, op0, data_zp, wzp, data_scale, wscale, 
units=(N + diff))
         op2 = relay.strided_slice(op1, begin=[0, 0], end=[data_shape[0], N], 
strides=[1], axes=None)
         return op2
 
diff --git a/tests/python/topi/python/test_topi_qnn.py 
b/tests/python/topi/python/test_topi_qnn.py
index 12c868029b..38212d9705 100644
--- a/tests/python/topi/python/test_topi_qnn.py
+++ b/tests/python/topi/python/test_topi_qnn.py
@@ -54,7 +54,7 @@ def verify_simulated_quantize(data_shape, out_dtype, 
channels, axis):
         else:
             s_var = relay.const(s_np[0])
             z_var = relay.const(z_np[0])
-        real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, 
out_dtype=out_dtype)
+        real_q_op = relay.qnn.quantize(a_var, s_var, z_var, axis=axis, 
out_dtype=out_dtype)
         with tvm.transform.PassContext(opt_level=3):
             lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
 
@@ -121,7 +121,7 @@ def verify_simulated_dequantize(data_shape, in_dtype, 
channels, axis):
         else:
             s_var = relay.const(s_np[0])
             z_var = relay.const(z_np[0])
-        real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
+        real_dq_op = relay.qnn.dequantize(a_var, s_var, z_var, axis=axis)
         with tvm.transform.PassContext(opt_level=3):
             lib = relay.build(tvm.IRModule.from_expr(real_dq_op), 
target=target)
 


Reply via email to