Shawn-Inspur commented on a change in pull request #5099: [TOPI][Tensor Core] Conv2d and Dense ops support on Tensor Core URL: https://github.com/apache/incubator-tvm/pull/5099#discussion_r397237931
########## File path: topi/python/topi/cuda/conv2d_nhwc_tensorcore.py ########## @@ -0,0 +1,361 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, too-many-locals, too-many-arguments +# pylint: disable=too-many-statements, unused-argument +"""Tensorcore template for cuda backend""" +import numpy as np +import tvm +from tvm import te +from tvm import autotvm +from ..util import get_const_tuple, traverse_inline, simplify +from ..nn.pad import pad +from ..nn.util import get_pad_tuple +from .tensor_intrin import intrin_wmma_load_matrix_A +from .tensor_intrin import intrin_wmma_load_matrix_W +from .tensor_intrin import intrin_wmma_store_matrix + + +def intrin_wmma_gemm(strides_A, strides_W, strides_Conv, shape, out_dtype): + """Intrin for wmma fill_fragment and mma_sync""" + wmma_m, wmma_n, wmma_k = shape + A = te.placeholder((wmma_m, 1, 1, wmma_k), name='A', dtype='float16') + B = te.placeholder((wmma_k, wmma_n), name='B', dtype='float16') + k = te.reduce_axis((0, wmma_k), name="k") + C = te.compute((wmma_m, 1, 1, wmma_n), + lambda ii, t0, t1, jj: + te.sum(A[ii, t0, t1, k].astype(out_dtype) * \ + B[k, jj].astype(out_dtype), axis=k), + name='C') + BA = tvm.tir.decl_buffer(A.shape, A.dtype, name='BA', + scope='wmma.matrix_a', data_alignment=32, + offset_factor=8, strides=strides_A) + BB = tvm.tir.decl_buffer(B.shape, B.dtype, name='BB', + scope='wmma.matrix_b', data_alignment=32, + offset_factor=8, strides=strides_W) + BC = tvm.tir.decl_buffer(C.shape, C.dtype, name='BC', + scope='wmma.accumulator', data_alignment=32, + offset_factor=8, strides=strides_Conv) + + def intrin_func(ins, outs): + BA, BB = ins + BC, = outs + + def warp_idnex(offset, row, col): + row = row * col + return offset // row + offset % row // col + + warp_index_A = warp_idnex(BA.elem_offset, wmma_m, wmma_k) + warp_index_B = warp_idnex(BB.elem_offset, wmma_k, wmma_n) + warp_index_C = warp_idnex(BC.elem_offset, wmma_m, wmma_n) + + def init(): + ib = tvm.tir.ir_builder.create() + ib.emit( + tvm.tir.call_intrin('handle', 'tvm_fill_fragment', BC.data, wmma_m, wmma_n, wmma_k, + warp_index_C, 0.0)) + return ib.get() + + def update(): + ib = tvm.tir.ir_builder.create() + ib.emit(tvm.tir.call_intrin('handle', 'tvm_mma_sync', + BC.data, warp_index_C, + BA.data, warp_index_A, + BB.data, warp_index_B, + BC.data, warp_index_C)) + return ib.get() + + return update(), init(), update() + + return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC}) + + +def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype): + """Compute declaration for tensorcore""" + assert isinstance(stride, int) or len(stride) == 2 + assert isinstance(dilation, int) or len(dilation) == 2 + + if isinstance(stride, int): + stride_h = stride_w = stride + else: + stride_h, stride_w = stride + + if isinstance(dilation, int): + dilation_h = dilation_w = dilation + else: + dilation_h, dilation_w = dilation + + batch, in_height, in_width, in_channel = Input.shape + kernel_h, kernel_w, _, num_filter = Filter.shape + # compute the output shape + dilated_kernel_h = (kernel_h - 1) * dilation_h + 1 + dilated_kernel_w = (kernel_w - 1) * dilation_w + 1 + pad_top, pad_left, pad_down, pad_right = get_pad_tuple( + padding, (dilated_kernel_h, dilated_kernel_w)) + out_channel = num_filter + out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1) + out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1) + pad_before = [0, pad_top, pad_left, 0] + pad_after = [0, pad_down, pad_right, 0] + PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput") + rc = te.reduce_axis((0, in_channel), name='rc') + ry = te.reduce_axis((0, kernel_h), name='ry') + rx = te.reduce_axis((0, kernel_w), name='rx') + # convert data type of input feature maps and weights + TransPaddedInput = te.compute( + PaddedInput.shape, + lambda h, w, i, o: PaddedInput[h, w, i, o].astype('float16')) + TransFilter = te.compute( + Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16')) Review comment: This is a very good suggestion! Regarding this pass, we think the following two functions should be implemented: 1, Pre-quantizing weights from fp32 to fp16 to avoid conversions of weights in runtime. 2, Removing coupled conversions, like converting from fp16 to fp32 followed by an operation that converting from fp32 to fp16. In this case, the pass can automatically insert or remove conversion operations in graph, and keeps the cost of data type conversions as few as possible. In addition to functions above, there is an open issue: Should we place this pass in the quantization module? The pass is complex enough that we would like to submit it as an optimization for Tensor Core in the future. The current submission focuses on supporting Tensor Core for Conv2d and Dense to enable computing CNNs on Tensor Core. The performance is fairly good on Tensor Core even with these data type conversions. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] With regards, Apache Git Services
