This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 5ac2d1a219 [Pylint] fix pylint issues for cblas (#16015)
5ac2d1a219 is described below

commit 5ac2d1a219fd66f92a7bf720ab196c295bc1e11c
Author: Tlopex <68688494+tlo...@users.noreply.github.com>
AuthorDate: Tue Oct 31 15:04:56 2023 +0800

    [Pylint] fix pylint issues for cblas (#16015)
    
    * Update pylint.sh
    
    * Update test_cblas.py
---
 tests/lint/pylint.sh               |   1 +
 tests/python/contrib/test_cblas.py | 165 +++++++++++++++++++++++--------------
 2 files changed, 103 insertions(+), 63 deletions(-)

diff --git a/tests/lint/pylint.sh b/tests/lint/pylint.sh
index ac93a6f15d..75244de49d 100755
--- a/tests/lint/pylint.sh
+++ b/tests/lint/pylint.sh
@@ -27,6 +27,7 @@ python3 -m pylint tests/python/relay/aot/*.py 
--rcfile="$(dirname "$0")"/pylintr
 python3 -m pylint tests/python/ci --rcfile="$(dirname "$0")"/pylintrc
 python3 -m pylint tests/python/integration/ --rcfile="$(dirname "$0")"/pylintrc
 python3 -m pylint tests/python/conftest.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_cblas.py --rcfile="$(dirname 
"$0")"/pylintrc
 
 # tests/python/contrib/test_hexagon tests
 python3 -m pylint tests/python/contrib/test_hexagon/*.py --rcfile="$(dirname 
"$0")"/pylintrc
diff --git a/tests/python/contrib/test_cblas.py 
b/tests/python/contrib/test_cblas.py
index 59872e1297..3c90aefeb6 100644
--- a/tests/python/contrib/test_cblas.py
+++ b/tests/python/contrib/test_cblas.py
@@ -14,35 +14,41 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+"""Configure pytest"""
 import pytest
+import numpy as np
 import tvm
 from tvm import te
-import numpy as np
-import tvm.topi.testing
 from tvm.contrib import cblas
 from tvm.contrib import mkl
 from tvm.contrib import dnnl
 import tvm.testing
+import tvm.topi.testing
 
 
-def verify_matmul_add(m, l, n, lib, transa=False, transb=False, 
dtype="float32"):
+def verify_matmul_add(
+    matrix_m, matrix_l, matrix_n, lib, transa=False, transb=False, 
dtype="float32"
+):
+    """Tests matmul+add op"""
     bias = te.var("bias", dtype=dtype)
-    ashape = (l, n) if transa else (n, l)
-    bshape = (m, l) if transb else (l, m)
-    A = te.placeholder(ashape, name="A", dtype=dtype)
-    B = te.placeholder(bshape, name="B", dtype=dtype)
-    C = lib.matmul(A, B, transa, transb)
-    D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
-    s = te.create_schedule(D.op)
-
-    def get_numpy(a, b, bb, transa, transb):
+    ashape = (matrix_l, matrix_n) if transa else (matrix_n, matrix_l)
+    bshape = (matrix_m, matrix_l) if transb else (matrix_l, matrix_m)
+    input1_data = te.placeholder(ashape, name="input1_data", dtype=dtype)
+    input2_data = te.placeholder(bshape, name="input2_data", dtype=dtype)
+    matmul_result = lib.matmul(input1_data, input2_data, transa, transb)
+    final_result = te.compute(
+        matmul_result.shape, lambda i, j: matmul_result[i, j] + bias, 
name="final_result"
+    )
+    s = te.create_schedule(final_result.op)
+
+    def get_numpy(a, b, matrix_bias, transa, transb):
         if transa:
             a = a.transpose()
         if transb:
             b = b.transpose()
-        return np.dot(a, b) + bb
+        return np.dot(a, b) + matrix_bias
 
-    def compile(f, name="test_matmul_add", ext=".so"):
+    def compiling(f, name="test_matmul_add", ext=".so"):
         path = name + ext
         f.export_library(path)
         mod = tvm.runtime.load_module(path)
@@ -58,16 +64,18 @@ def verify_matmul_add(m, l, n, lib, transa=False, 
transb=False, dtype="float32")
             return
         dev = tvm.cpu(0)
         name = "test_matmul_add"
-        f = tvm.build(s, [A, B, D, bias], target, name=name)
+        f = tvm.build(s, [input1_data, input2_data, final_result, bias], 
target, name=name)
         if target == "c":
-            f = compile(f, name)
-        a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
-        b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
-        d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
-        bb = 10.0
-        f(a, b, d, bb)
+            f = compiling(f, name)
+        matrix_input1 = 
tvm.nd.array(np.random.uniform(size=ashape).astype(input1_data.dtype), dev)
+        matrix_input2 = 
tvm.nd.array(np.random.uniform(size=bshape).astype(input2_data.dtype), dev)
+        matrix_result = tvm.nd.array(np.zeros((matrix_n, matrix_m), 
dtype=final_result.dtype), dev)
+        matrix_bias = 10.0
+        f(matrix_input1, matrix_input2, matrix_result, matrix_bias)
         tvm.testing.assert_allclose(
-            d.numpy(), get_numpy(a.numpy(), b.numpy(), bb, transa, transb), 
rtol=1e-5
+            matrix_result.numpy(),
+            get_numpy(matrix_input1.numpy(), matrix_input2.numpy(), 
matrix_bias, transa, transb),
+            rtol=1e-5,
         )
 
     verify("llvm")
@@ -75,6 +83,7 @@ def verify_matmul_add(m, l, n, lib, transa=False, 
transb=False, dtype="float32")
 
 
 def test_matmul_add():
+    """Tests of matmul+add op"""
     verify_matmul_add(235, 128, 1024, cblas)
     verify_matmul_add(235, 128, 1024, cblas, True, False)
     verify_matmul_add(235, 128, 1024, cblas, False, True)
@@ -101,27 +110,30 @@ def test_matmul_add():
     verify_matmul_add(1, 16, 3, dnnl, True, True)
 
 
-def verify_quantized_matmul_add(m, l, n, transa=False, transb=False):
+def verify_quantized_matmul_add(matrix_m, matrix_l, matrix_n, transa=False, 
transb=False):
+    """Tests quantized matmul+add op"""
     if not tvm.get_global_func("tvm.contrib.mkl.matmul_u8s8s32", True):
         pytest.skip("Quantized dense is supported only for MKL. TVM GPU CI 
uses openblas")
     data_dtype = "uint8"
     kernel_dtype = "int8"
     out_dtype = "int32"
     bias = te.var("bias", dtype=out_dtype)
-    ashape = (l, n) if transa else (n, l)
-    bshape = (m, l) if transb else (l, m)
-    A = te.placeholder(ashape, name="A", dtype=data_dtype)
-    B = te.placeholder(bshape, name="B", dtype=kernel_dtype)
-    C = mkl.matmul_u8s8s32(A, B, transa, transb, dtype=out_dtype)
-    D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D")
-    s = te.create_schedule(D.op)
-
-    def get_numpy(a, b, bb, transa, transb):
+    ashape = (matrix_l, matrix_n) if transa else (matrix_n, matrix_l)
+    bshape = (matrix_m, matrix_l) if transb else (matrix_l, matrix_m)
+    input1_data = te.placeholder(ashape, name="input1_data", dtype=data_dtype)
+    input2_data = te.placeholder(bshape, name="input2_data", 
dtype=kernel_dtype)
+    matmul_result = mkl.matmul_u8s8s32(input1_data, input2_data, transa, 
transb, dtype=out_dtype)
+    final_result = te.compute(
+        matmul_result.shape, lambda i, j: matmul_result[i, j] + bias, 
name="final_result"
+    )
+    s = te.create_schedule(final_result.op)
+
+    def get_numpy(a, b, matrix_bias, transa, transb):
         if transa:
             a = a.transpose()
         if transb:
             b = b.transpose()
-        return np.dot(a, b) + bb
+        return np.dot(a, b) + matrix_bias
 
     def verify(target="llvm"):
         if not tvm.testing.device_enabled(target):
@@ -131,15 +143,25 @@ def verify_quantized_matmul_add(m, l, n, transa=False, 
transb=False):
             print("skip because extern function is not available")
             return
         dev = tvm.cpu(0)
-        f = tvm.build(s, [A, B, D, bias], target)
-        a = tvm.nd.array(np.random.randint(low=0, high=50, 
size=ashape).astype(A.dtype), dev)
-        b = tvm.nd.array(np.random.randint(low=0, high=50, 
size=bshape).astype(B.dtype), dev)
-        d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev)
-        bb = 10
-        f(a, b, d, bb)
+        f = tvm.build(s, [input1_data, input2_data, final_result, bias], 
target)
+        matrix_input1 = tvm.nd.array(
+            np.random.randint(low=0, high=50, 
size=ashape).astype(input1_data.dtype), dev
+        )
+        matrix_input2 = tvm.nd.array(
+            np.random.randint(low=0, high=50, 
size=bshape).astype(input2_data.dtype), dev
+        )
+        matrix_result = tvm.nd.array(np.zeros((matrix_n, matrix_m), 
dtype=final_result.dtype), dev)
+        matrix_bias = 10
+        f(matrix_input1, matrix_input2, matrix_result, matrix_bias)
         tvm.testing.assert_allclose(
-            d.numpy(),
-            get_numpy(a.numpy().astype("int32"), b.numpy().astype("int32"), 
bb, transa, transb),
+            matrix_result.numpy(),
+            get_numpy(
+                matrix_input1.numpy().astype("int32"),
+                matrix_input2.numpy().astype("int32"),
+                matrix_bias,
+                transa,
+                transb,
+            ),
             rtol=1e-5,
         )
 
@@ -147,6 +169,7 @@ def verify_quantized_matmul_add(m, l, n, transa=False, 
transb=False):
 
 
 def test_quantized_matmul_add():
+    """Tests of quantized matmul+add op"""
     verify_quantized_matmul_add(235, 128, 1024)
     verify_quantized_matmul_add(235, 128, 1024, True, False)
     verify_quantized_matmul_add(235, 128, 1024, False, True)
@@ -158,16 +181,27 @@ def test_quantized_matmul_add():
 
 
 def verify_batch_matmul(
-    batch_a, batch_b, m, l, n, lib, transa=False, transb=False, 
iterative=False, dtype="float32"
+    batch_a,
+    batch_b,
+    matrix_m,
+    matrix_l,
+    matrix_n,
+    lib,
+    transa=False,
+    transb=False,
+    dtype="float32",
 ):
+    """Tests matmul op where matrices are in batch"""
     batch = max(batch_a, batch_b)
-    ashape = (batch_a, l, n) if transa else (batch_a, n, l)
-    bshape = (batch_b, m, l) if transb else (batch_b, l, m)
-    A = te.placeholder(ashape, name="A", dtype=dtype)
-    B = te.placeholder(bshape, name="B", dtype=dtype)
-    C = lib.batch_matmul(A, B, transa, transb)
-    D = te.compute(C.shape, lambda k, i, j: C[k, i, j], name="D")
-    s = te.create_schedule(D.op)
+    ashape = (batch_a, matrix_l, matrix_n) if transa else (batch_a, matrix_n, 
matrix_l)
+    bshape = (batch_b, matrix_m, matrix_l) if transb else (batch_b, matrix_l, 
matrix_m)
+    input1_data = te.placeholder(ashape, name="input1_data", dtype=dtype)
+    input2_data = te.placeholder(bshape, name="input2_data", dtype=dtype)
+    matmul_result = lib.batch_matmul(input1_data, input2_data, transa, transb)
+    final_result = te.compute(
+        matmul_result.shape, lambda k, i, j: matmul_result[k, i, j], 
name="final_result"
+    )
+    s = te.create_schedule(final_result.op)
 
     def get_numpy(a, b, transa, transb):
         if transa:
@@ -176,7 +210,7 @@ def verify_batch_matmul(
             b = b.transpose(0, 2, 1)
         return tvm.topi.testing.batch_matmul(a, b)
 
-    def compile(f, name="test_batch_matmul", ext=".so"):
+    def compiling(f, name="test_batch_matmul", ext=".so"):
         path = name + ext
         f.export_library(path)
         mod = tvm.runtime.load_module(path)
@@ -192,15 +226,19 @@ def verify_batch_matmul(
             return
         dev = tvm.cpu(0)
         name = "test_batch_matmul"
-        f = tvm.build(s, [A, B, D], target, name=name)
+        f = tvm.build(s, [input1_data, input2_data, final_result], target, 
name=name)
         if target == "c":
-            f = compile(f, name)
-        a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
-        b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
-        d = tvm.nd.array(np.zeros((batch, n, m), dtype=D.dtype), dev)
-        f(a, b, d)
+            f = compiling(f, name)
+        matrix_input1 = 
tvm.nd.array(np.random.uniform(size=ashape).astype(input1_data.dtype), dev)
+        matrix_input2 = 
tvm.nd.array(np.random.uniform(size=bshape).astype(input2_data.dtype), dev)
+        matrix_result = tvm.nd.array(
+            np.zeros((batch, matrix_n, matrix_m), dtype=final_result.dtype), 
dev
+        )
+        f(matrix_input1, matrix_input2, matrix_result)
         tvm.testing.assert_allclose(
-            d.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), 
rtol=1e-5
+            matrix_result.numpy(),
+            get_numpy(matrix_input1.numpy(), matrix_input2.numpy(), transa, 
transb),
+            rtol=1e-5,
         )
 
     verify("llvm")
@@ -208,6 +246,7 @@ def verify_batch_matmul(
 
 
 def test_batch_matmul():
+    """Tests of matmul op where matrices are in batch"""
     verify_batch_matmul(16, 16, 235, 128, 1024, cblas)
     verify_batch_matmul(16, 16, 235, 128, 1024, cblas, True, False)
     verify_batch_matmul(16, 16, 235, 128, 1024, cblas, False, True)
@@ -218,22 +257,22 @@ def test_batch_matmul():
     verify_batch_matmul(16, 16, 235, 128, 1024, mkl, True, True)
     verify_batch_matmul(16, 1, 235, 128, 1024, cblas)
     verify_batch_matmul(1, 16, 235, 128, 1024, cblas)
-    verify_batch_matmul(16, 1, 235, 128, 1024, cblas, iterative=True)
-    verify_batch_matmul(1, 16, 235, 128, 1024, cblas, iterative=True)
+    verify_batch_matmul(16, 1, 235, 128, 1024, cblas)
+    verify_batch_matmul(1, 16, 235, 128, 1024, cblas)
+    verify_batch_matmul(16, 1, 235, 128, 1024, mkl)
+    verify_batch_matmul(1, 16, 235, 128, 1024, mkl)
     verify_batch_matmul(16, 1, 235, 128, 1024, mkl)
     verify_batch_matmul(1, 16, 235, 128, 1024, mkl)
-    verify_batch_matmul(16, 1, 235, 128, 1024, mkl, iterative=True)
-    verify_batch_matmul(1, 16, 235, 128, 1024, mkl, iterative=True)
     verify_batch_matmul(1, 1, 1, 16, 3, cblas)
     verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, False)
     verify_batch_matmul(1, 1, 1, 16, 3, cblas, False, False)
     verify_batch_matmul(1, 1, 1, 16, 3, cblas, True, True)
-    verify_batch_matmul(1, 1, 1, 16, 3, cblas, iterative=True)
+    verify_batch_matmul(1, 1, 1, 16, 3, cblas)
     verify_batch_matmul(1, 1, 1, 16, 3, mkl)
     verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, False)
     verify_batch_matmul(1, 1, 1, 16, 3, mkl, False, False)
     verify_batch_matmul(1, 1, 1, 16, 3, mkl, True, True)
-    verify_batch_matmul(1, 1, 1, 16, 3, mkl, iterative=True)
+    verify_batch_matmul(1, 1, 1, 16, 3, mkl)
 
 
 if __name__ == "__main__":

Reply via email to