jcf94 commented on a change in pull request #8336:
URL: https://github.com/apache/tvm/pull/8336#discussion_r659451517



##########
File path: python/tvm/topi/gpu/dense.py
##########
@@ -0,0 +1,218 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# pylint: disable=invalid-name, unused-argument
+"""Schedule for dense operator"""
+
+import logging
+
+from tvm import autotvm, te
+from tvm.autotvm.task.space import SplitEntity
+
+from .. import nn
+from ..utils import traverse_inline, get_const_tuple
+
+logger = logging.getLogger("topi")
+
+
[email protected]_topi_compute("dense_small_batch.gpu")
+def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None):
+    """Dense operator on CUDA"""

Review comment:
       ```suggestion
       """Dense operator on gpu."""
   ```

##########
File path: tests/python/topi/python/test_topi_dense.py
##########
@@ -43,108 +54,115 @@
 }
 
 
-def verify_dense(batch, in_dim, out_dim, use_bias=True):
-    A = te.placeholder((batch, in_dim), name="A")
-    B = te.placeholder((out_dim, in_dim), name="B")
-    C = te.placeholder((out_dim,), name="C")
-    dtype = A.dtype
-
-    # use memoize to pickle the test data for next time use
-    @memoize("topi.tests.test_topi_dense")
-    def get_ref_data():
-        a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)
-        b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)
-        c_np = np.random.uniform(size=(out_dim,)).astype(dtype)
-        if use_bias:
-            d_np = np.maximum(np.dot(a_np, b_np.T) + c_np, 0.0)
-        else:
-            d_np = np.maximum(np.dot(a_np, b_np.T), 0.0)
-        return (a_np, b_np, c_np, d_np)
-
-    # get the test data
-    a_np, b_np, c_np, d_np = get_ref_data()
-
-    def check_device(device, dev):
-        print("Running on target: %s" % device)
-        for fcompute, fschedule in tvm.topi.testing.dispatch(device, 
_dense_implement):
-            with tvm.target.Target(device):
-                D = fcompute(A, B, C if use_bias else None)
-                D = topi.nn.relu(D)
-                s = fschedule([D])
-            a = tvm.nd.array(a_np, dev)
-            b = tvm.nd.array(b_np, dev)
-            c = tvm.nd.array(c_np, dev)
-            d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), 
dev)
-            f = tvm.build(s, [A, B, C, D], device, name="dense")
-            f(a, b, c, d)
-            tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-5)
-
-    for device, dev in tvm.testing.enabled_targets():
-        check_device(device, dev)
-
-
-def verify_dense_int8(batch, in_dim, out_dim, use_bias=True):
-    dtype = "int8"
-    out_dtype = "int32"
-    A = te.placeholder((batch, in_dim), name="A", dtype=dtype)
-    B = te.placeholder((out_dim, in_dim), name="B", dtype=dtype)
[email protected](cache_return_value=True)
+def dense_ref_data(batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype):
+    if "float" in in_dtype:
+        a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
+        b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
+        c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
+    elif in_dtype == "int8":
+        a_np = np.random.randint(low=-128, high=127, size=(batch_size, 
in_dim)).astype(in_dtype)
+        b_np = np.random.randint(low=-128, high=127, size=(out_dim, 
in_dim)).astype(in_dtype)
+        c_np = np.random.randint(low=-128, high=127, 
size=(out_dim,)).astype(out_dtype)
+    else:
+        raise ValueError("No method to generate test data for data type 
'{}'".format(in_dtype))
+
+    matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
+
+    if use_bias:
+        matmul += c_np
+
+    d_np = np.maximum(matmul, 0)
+    return (a_np, b_np, c_np, d_np)
+
+
+def test_dense(

Review comment:
       I'm thinking that wheather this can test different targets as you 
expected.
   
   Shouldn't this function be decorated with "@tvm.testing.parametrize_targets"?

##########
File path: python/tvm/topi/gpu/dense.py
##########
@@ -0,0 +1,218 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# pylint: disable=invalid-name, unused-argument
+"""Schedule for dense operator"""
+
+import logging
+
+from tvm import autotvm, te
+from tvm.autotvm.task.space import SplitEntity
+
+from .. import nn
+from ..utils import traverse_inline, get_const_tuple
+
+logger = logging.getLogger("topi")
+
+
[email protected]_topi_compute("dense_small_batch.gpu")
+def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None):
+    """Dense operator on CUDA"""
+    return nn.dense(data, weight, bias, out_dtype)
+
+
[email protected]_topi_schedule("dense_small_batch.gpu")
+def schedule_dense_small_batch(cfg, outs):
+    """Schedule float32/64 dense with small batch size"""
+    outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
+    s = te.create_schedule([x.op for x in outs])
+
+    def _callback(op):
+        if op.tag == "dense":
+            _schedule_dense_small_batch(cfg, s, op.output(0))
+
+    traverse_inline(s, outs[0].op, _callback)
+    return s
+
+
+def _schedule_dense_small_batch(cfg, s, C):
+    A, weights = C.op.input_tensors
+    _, in_dim_weights = get_const_tuple(weights.shape)
+    _, in_dim_A = get_const_tuple(A.shape)
+
+    if isinstance(in_dim_A, int):
+        in_dim = in_dim_A
+    elif isinstance(in_dim_weights, int):
+        in_dim = in_dim_weights
+    else:
+        in_dim = None
+
+    if in_dim is not None:
+        cfg.define_split("tile_k", in_dim, num_outputs=2)
+        if cfg.is_fallback:
+            cfg["tile_k"] = SplitEntity([-1, 64] if in_dim > 64 else [1, 64])
+        _, kf = cfg["tile_k"].apply(s, C, C.op.reduce_axis[0])
+    else:
+        tile_k = 64
+        _, kf = s[C].split(C.op.reduce_axis[0], tile_k)
+
+    CF = s.rfactor(C, kf)
+
+    if C.op in s.outputs:
+        Out = C
+    else:
+        Out = s.outputs[0].output(0)
+        s[C].compute_at(s[Out], s[Out].op.axis[1])
+    s[Out].bind(s[Out].op.axis[0], te.thread_axis("blockIdx.y"))
+    s[Out].bind(s[Out].op.axis[1], te.thread_axis("blockIdx.x"))
+
+    tx = s[C].op.reduce_axis[0]
+    thread_x = te.thread_axis("threadIdx.x")
+    s[C].bind(tx, thread_x)
+    s[CF].compute_at(s[C], tx)
+    s[C].set_store_predicate(thread_x.var.equal(0))
+    s[Out].set_store_predicate(thread_x.var.equal(0))
+
+
[email protected]_topi_compute("dense_large_batch.gpu")
+def dense_large_batch(cfg, data, weight, bias=None, out_dtype=None):
+    """Dense operator on CUDA"""

Review comment:
       ditto.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to