fzi-peccia commented on code in PR #13770: URL: https://github.com/apache/tvm/pull/13770#discussion_r1149198727
########## gallery/tutorial/micro_gemmini_conv2d.py: ########## @@ -0,0 +1,221 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Running TVM on the Gemmini accelerator - A single 2d convolutional layer example +====================================================================================== +**Author**: +`Federico Peccia <https://fPecc.github.io/>`_ + +This tutorials shows how a quantized 2d convolution layer can be compiled to be executed on the Gemmini accelerator. The generated baremetal C code is then tested on the Spike RISC-V ISA simulator. Before starting this tutorial, you should have downloaded the Chipyard repository and installed the Spike simulator with the Gemmini extension. + +""" + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +import numpy as np +import os +import tvm.contrib.gemmini as gemmini +from tvm import relay +import tvm + +################################## +# Pre-requisites +# -------------------------------- +# +# After the installation of the Chipyard development tools, you should have an env.sh file in your Chipyard home directory. This file needs to be sourced before running this tutorial: +# +# .. code-block:: bash +# +# source <your chipyard home path>/env.sh +# +# WARNING: if you have installed TVM in a virtual environment, FIRST activate the Chipyard environment, and THEN activate the tvm entironment. + +################################## +# Baseline generation +# -------------------------------- +# +# In this section, we will generate the baseline input and expected output, which we are going to use to compare with the actual obtained output after running on the Gemmini accelerator. + +# Then we define the parameters of the layer we want to test. In this case: +input_height = 16 +input_width = 16 +input_channels = 16 +output_channels = 16 +kernel_size = 3 +stride = 1 +padding = "valid" +activation = None +bias = True + +# We can add a max pooling layer after the convolution. This can be merged by the integration and can be executed together with the convolution on the Gemmini accelerator. +pool_size = 1 +pool_stride = 1 +pool_padding = "valid" +use_pool = False + +# We will generate a prequantized TFLite model, because for now the Gemmini integration only supports models that were quantized with specific flags as input. + +layer_sequence = [ + layers.Conv2D( + output_channels, + kernel_size=kernel_size, + padding=padding, + activation=activation, + use_bias=True, + bias_initializer="ones", + input_shape=(input_height, input_width, input_channels), + strides=stride, + ) +] +if use_pool: + layer_sequence.append( + layers.MaxPool2D(pool_size=pool_size, strides=pool_stride, padding=pool_padding) + ) + +model = keras.Sequential(layer_sequence) + +# Convert the concrete functions using TFLiteConverter +converter = tf.lite.TFLiteConverter.from_keras_model(model) + + +def representative_data_gen(): + dataset = [ + np.array( + np.random.randint(0, 10, size=(100, input_height, input_width, input_channels)), + dtype=np.float32, + ) + for s in range(10) + ] + for input_value in dataset: + # Model has only one input so each data point has one element.s + yield [input_value] + + +converter.optimizations = [tf.lite.Optimize.DEFAULT] +converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] +converter.inference_input_type = tf.uint8 +converter.inference_output_type = tf.int8 +converter.representative_dataset = representative_data_gen +converter._experimental_disable_per_channel = True + +tflite_model = converter.convert() + +# Save the model. +with open("conv.tflite", "wb") as f: + f.write(tflite_model) + +# Now that we have created the model, we import the model and run it. We store the output, in order to compare it with the output that will be later obtained from the Gemmini accelerator. + +os.system("rm -rf model.tar dev/ include/ generated-project/") Review Comment: Thanks, I applied this suggestion for all examples for all temporal files. ########## python/tvm/contrib/gemmini/__init__.py: ########## @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Gemmini package is a TVM backend extension to support the Gemmini hardware accelerator +===================== +**Author**: `Federico Peccia <https://fPecc.github.io/>`_ +""" + +import tvm._ffi.base + +from tvm.relay.backend.contrib.gemmini import * +from .environment import Environment +from .build_module import build_config, lower, build, preprocess_pass +from .helpers import create_header_file +from .utils import * + +__version__ = "0.1.0" Review Comment: Removed version -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
