maheshambule commented on a change in pull request #5052: URL: https://github.com/apache/incubator-tvm/pull/5052#discussion_r453334865
########## File path: python/tvm/contrib/target/onnx.py ########## @@ -0,0 +1,899 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines, redefined-builtin +"""Relay to ONNX codegen """ + +import os +import struct +import copy +import numpy +import onnx +import onnx.utils +from onnx import numpy_helper, OperatorSetIdProto, defs +import tvm +from tvm import relay +import tvm._ffi +from tvm.relay.expr_functor import ExprVisitor +from tvm.relay.ty import TupleType, TensorType + +ONNX_OPSET_VERSONS_SUPPORTED = [11] + + +def tvm_array_to_list(arr): + return tuple(x.value for x in arr) + + +def get_onnx_version(): + return onnx.__version__ + + +def infer_type(node): + """A method to infer the type of a relay expression.""" + mod = tvm.IRModule.from_expr(node) + mod = relay.transform.InferType()(mod) + entry = mod["main"] + return entry if isinstance(node, relay.Function) else entry.body + + +def call_node_infer_type(node): + """infer the output types of call node""" + infer_out = infer_type(node) + out_type = infer_out._checked_type_ + if isinstance(out_type, TensorType): + types = [out_type] + elif isinstance(out_type, TupleType): + types = list(out_type.fields) + else: + raise RuntimeError("Unsupported output type %s in operator %s" + % (type(out_type), node.op.nae)) + + return types + + +def add_input(data, name, model_container): + dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[data.dtype] + tensor_value_info = onnx.helper.make_tensor_value_info(name, dtype, shape=data.shape) + model_container.add_inputs([tensor_value_info]) + data_tensor = numpy_helper.from_array(data, name) + model_container.add_initializers([data_tensor]) + + +class OpConverter(object): + """ Operator converter Base Class. + """ + + @classmethod + def convert_attributes(cls, attrs): + """convert Relay attributes to ONNX attributes. + The derived classes should implement this method + if attributes are required by the operator + otherwise by default no attributes are passed + """ + return {} + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + onnx_node = onnx.helper.make_node(cls.__name__, + node_entry['input_names'], + node_entry['output_names'], + **attrs) + model_container.add_nodes([onnx_node]) + + +def rename(op_name): + """ This method creates dynamic operator of name op_name with empty attributes + """ + return type(op_name, (OpConverter,), {}) + + +class Reshape(object): + """ Operator converter for Reshape. + """ + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + """Converts Relay operator Reshape to ONNX operator. + Relay operator accepts shape as attribute but ONNX operator + accepts it as a input. + """ + + shape = numpy.asarray([a.value for a in node_entry['relay_node'].attrs.newshape], + dtype=numpy.int64) + input_name = 'shape{}'.format(node_entry['name']) + node = onnx.helper.make_node(cls.__name__, [node_entry['input_names'][0], input_name], + node_entry['output_names']) + model_container.add_nodes([node]) + add_input(shape, input_name, model_container) + + +class Conv(OpConverter): + """ Operator converter for Conv. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'group': attrs.get_int("groups"), + 'pads': attrs.get_int_tuple("padding"), + 'strides': attrs.get_int_tuple("strides"), + 'dilations': attrs.get_int_tuple("dilation"), + 'kernel_shape': attrs.get_int_tuple("kernel_size"), + } + + +class MaxPool(OpConverter): + """ Operator converter for MaxPool. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'pads': attrs.get_int_tuple("padding"), + 'strides': attrs.get_int_tuple("strides"), + 'kernel_shape': attrs.get_int_tuple("pool_size"), + } + + +class Transpose(OpConverter): + """ Operator converter for Transpose. + """ + + @classmethod + def convert_attributes(cls, attrs): + return {'perm': attrs.get_int_tuple("axes")} if attrs["axes"] else {} + + +class MatMul(OpConverter): + """ Operator converter for MatMul. + """ + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + inter_output_name = 'inter{}'.format(node_entry['name']) + transpose_node = onnx.helper.make_node(Transpose.__name__, + [node_entry['input_names'][1]], + [inter_output_name], + perm=(1, 0)) + model_container.add_nodes([transpose_node]) + + inputs = [node_entry['input_names'][0], inter_output_name] + matmul_node = onnx.helper.make_node(cls.__name__, inputs, node_entry['output_names']) + model_container.add_nodes([matmul_node]) + + +class Flatten(OpConverter): + """ Operator converter for Flatten. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'axis': 1, + } + + +class BatchNormalization(OpConverter): + """ Operator converter for BatchNormalization. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'epsilon': float(attrs.get_str('epsilon')), + 'axis': float(attrs.get_int('axis')), + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + """Converts Relay operator batch_norm to ONNX operator. + Relay operator has property axis to handle data in NHWC format. + """ + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + transpose_out_name = node_entry['input_names'][0] + inter_output_names = [node_entry['output_names'][0]] + # axis==3 means channel is specified along the 3rd axis + if attrs['axis'] == 3: + transpose_out_name = 'transpose_{}'.format(node_entry['name']) + node_transposed = onnx.helper.make_node(Transpose.__name__, + [node_entry['input_names'][0]], + [transpose_out_name], + perm=[0, 3, 1, 2]) + model_container.add_nodes([node_transposed]) + inter_output_names = ['batch_norm_{}'.format(node_entry['name'])] + + input_names = [transpose_out_name] + node_entry['input_names'][1:] + batch_norm_node = onnx.helper.make_node(cls.__name__, + input_names, + inter_output_names, + epsilon=attrs['epsilon']) + model_container.add_nodes([batch_norm_node]) + + if attrs['axis'] == 3: + node_transposed = onnx.helper.make_node(Transpose.__name__, + inter_output_names, + [node_entry['output_names'][0]], + perm=[0, 2, 3, 1]) + model_container.add_nodes([node_transposed]) + + +class Dropout(OpConverter): + """ Operator converter for Dropout. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'ratio': float(attrs.get_str('rate')), + } + + +class AveragePool(MaxPool): + """ Operator converter for AveragePool. + """ + + +class Concat(OpConverter): + """ Operator converter for Concat. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'axis': attrs.get_int("axis"), + } + + +class BiasAdd(OpConverter): + """ Operator converter for BiasAdd. + """ + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node_entry can not be a Tuple" + input_node = input_node[0] + data_ndim = len(input_node['types'][0].shape) + axis = node_entry['relay_node'].attrs.get_int("axis") + if axis < 0: + axis = axis + data_ndim + new_axes = data_ndim - axis - 1 + if new_axes: + inter_output_name = 'inter{}'.format(node_entry['name']) + unsqueeze_node = onnx.helper.make_node('Unsqueeze', + [node_entry['input_names'][1]], + [inter_output_name], + axes=tuple(range(1, new_axes + 1))) + model_container.add_nodes([unsqueeze_node]) + else: + inter_output_name = node_entry['input_names'][1] + + inputs = [node_entry['input_names'][0], inter_output_name] + matmul_node = onnx.helper.make_node('Add', inputs, node_entry['output_names']) + model_container.add_nodes([matmul_node]) + + +class ReduceMean(OpConverter): + """ Operator converter for ReduceMean. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'axes': attrs.axis, + 'keepdims': 0 if bool(attrs.get_int("keepdims", 0)) is False else 1 + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node can not be a Tuple" + input_node = input_node[0] + shape = input_node['types'][0].shape + axis = node_entry['relay_node'].attrs.axis + axis = list(range(shape.size())) if not axis else tvm_array_to_list(axis) + exclude = 0 if not bool(node_entry['relay_node'].attrs.exclude) else 1 + keepdims = 0 if not bool(node_entry['relay_node'].attrs.keepdims) else 1 + if exclude: + all_axis = list(range(len(shape))) + axis = set(all_axis) - set(axis) + + node = onnx.helper.make_node(cls.__name__, + node_entry['input_names'], + node_entry['output_names'], + axes=axis, + keepdims=keepdims) + model_container.add_nodes([node]) + + +class Pad(OpConverter): + """ Operator converter for Pad. + """ + + @classmethod + def convert_attributes(cls, attrs): + before = [] + after = [] + for axis_pads in attrs.pad_width: + before.append(axis_pads[0]) + after.append(axis_pads[1]) + pads = before + after + pads = numpy.asarray(pads, dtype=pads[0].dtype) + return { + 'pads': pads, + 'mode': attrs.get_str('pad_mode'), + 'constant_value': attrs.pad_value + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + """Converts Relay operator Pad to ONNX operator. + Relay operator accepts pads as attribute but ONNX operator + accepts it as a input. + """ + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + + name = node_entry['name'] + data = numpy.asarray(attrs['pads'], dtype=attrs['pads'][0].dtype).astype(numpy.int64) + input_name = 'pads_{}'.format(name) + value = numpy.dtype(node_entry['types'][0].dtype).type(attrs['constant_value']) + input_value_name = 'value_{}'.format(name) + add_input(data, input_name, model_container) + add_input(value, input_value_name, model_container) + + input_names = [node_entry['input_names'][0], input_name, input_value_name] + node = onnx.helper.make_node(cls.__name__, input_names, node_entry['output_names']) + model_container.add_nodes([node]) + + +class Softmax(OpConverter): + """ Operator converter for SoftMax. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'axis': attrs.axis, + } + + +class Squeeze(OpConverter): + """ Operator converter for Squeeze. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'axes': attrs.axis, + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node can not be a Tuple" + input_node = input_node[0] + shape = input_node['types'][0].shape + axis = node_entry['relay_node'].attrs.get_int("axis") + if not axis: + axis = [] + for axis_idx, val in enumerate(shape): + if val.value == 1: + axis.append(axis_idx) + else: + axis = node_entry['relay_node'].attrs.get_int_tuple("axis") + + node = onnx.helper.make_node(cls.__name__, + node_entry['input_names'], + node_entry['output_names'], + axes=axis) + model_container.add_nodes([node]) + + +class Slice(OpConverter): + """ Operator converter for Slice. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'starts': attrs.get_int_tuple('begin'), + 'ends': attrs.get_int_tuple('end'), + 'steps': attrs.get_int_tuple('strides'), + 'slice_mode': attrs.get_str('slice_mode') + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + + name = node_entry['name'] + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node can not be a Tuple" + input_node = input_node[0] + shape = input_node['types'][0].shape + + starts = list(attrs['starts']) + ends = list(attrs['ends']) + steps = list(attrs['steps']) + starts += [0] * (len(shape) - len(starts)) + ends += [shape[i] + 1 for i in range(len(ends), len(shape))] + axes = list(range(len(shape))) + + if attrs['slice_mode'] == 'size': + ends = [starts[i] + (shape[i] + 1 if ends[i] < 0 else ends[i]) + for i in range(len(shape))] + steps = [1] * len(shape) + else: + steps += [1] * (len(shape) - len(steps)) + + def _add_input(val, input_name): + val_arr = numpy.asarray(val).astype(numpy.int64) + input_name = '{}_{}'.format(name, input_name) + add_input(val_arr, input_name, model_container) + return input_name + + input_names = [] + input_names.append(_add_input(starts, 'starts')) + input_names.append(_add_input(ends, 'ends')) + input_names.append(_add_input(axes, 'axes')) + input_names.append(_add_input(steps, 'steps')) + + input_names = [node_entry['input_names'][0]] + input_names + + slice_node = onnx.helper.make_node(cls.__name__, + input_names, + node_entry['output_names']) + model_container.add_nodes([slice_node]) + + +class Split(OpConverter): + """ Operator converter for Split. + """ + + @classmethod + def convert_attributes(cls, attrs): + indices_or_sections = attrs['indices_or_sections'] + + if isinstance(indices_or_sections, (list, tvm.ir.container.Array)): + indices_or_sections = attrs.get_int_tuple('indices_or_sections') + if isinstance(indices_or_sections, tvm.ir.PrimExpr): + indices_or_sections = indices_or_sections.value + + return { + 'indices_or_section': indices_or_sections, + 'axis': attrs.get_int('axis'), + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node can not be a Tuple" + input_node = input_node[0] + shape = input_node['types'][0].concrete_shape + + indices_or_sect = attrs["indices_or_section"] + axis = attrs["axis"] + axis_length = shape[axis] + + if isinstance(indices_or_sect, int): + split = [axis_length // indices_or_sect] * indices_or_sect + else: + split = [] + for i in range(len(indices_or_sect) + 1): + if i == 0: + split.append(indices_or_sect[0]) + elif i == len(indices_or_sect): + split.append(axis_length - indices_or_sect[-1]) + else: + split.append(indices_or_sect[i] - indices_or_sect[i - 1]) + + slice_node = onnx.helper.make_node(cls.__name__, + node_entry['input_names'], + node_entry['output_names'], + split=split, + axis=axis) + model_container.add_nodes([slice_node]) + + +class ConstantOfShapeZeros(OpConverter): + """ Operator converter for ConstantOfShape. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'value': 0 + } + + @classmethod + def convert(cls, node_entry, model_container, node_dict): + attrs = cls.convert_attributes(node_entry['relay_node'].attrs) + input_node = node_dict[node_entry['inputs'][0]] + assert len(input_node) == 1, "input node can not be a Tuple" + input_node = input_node[0] + dtype = input_node['relay_node'].type_annotation.dtype + input_shape_name = 'shape_{}'.format(node_entry['name']) + shape = [val.value for val in input_node['relay_node'].type_annotation.shape] + shape = numpy.asarray(shape).astype(numpy.int64) + add_input(shape, input_shape_name, model_container) + + dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(dtype)] + tensor_value = onnx.helper.make_tensor("value", dtype, + [1], [attrs['value']]) + + node = onnx.helper.make_node('ConstantOfShape', + [input_shape_name], + node_entry['output_names'], + value=tensor_value) + model_container.add_nodes([node]) + + +class ConstantOfShapeOnes(ConstantOfShapeZeros): + """ Operator converter for ConstantOfShape. + """ + + @classmethod + def convert_attributes(cls, attrs): + return { + 'value': 1 + } + + +relay_to_onnx_op_mapping = { + 'reshape': Reshape, + 'nn.conv2d': Conv, + 'add': rename('Add'), + 'nn.relu': rename('Relu'), + 'transpose': Transpose, + 'nn.dense': MatMul, + 'nn.max_pool2d': MaxPool, + 'nn.batch_flatten': Flatten, + 'multiply': rename('Mul'), + 'nn.bias_add': BiasAdd, + 'nn.batch_norm': BatchNormalization, + 'nn.global_avg_pool2d': rename('GlobalAveragePool'), + 'concatenate': Concat, + 'nn.dropout': Dropout, + 'nn.avg_pool2d': AveragePool, + 'divide': rename('Div'), + 'mean': ReduceMean, + 'nn.pad': Pad, + 'nn.softmax': Softmax, + 'squeeze': Squeeze, + 'strided_slice': Slice, + 'greater': rename('Greater'), + 'less': rename('Less'), + 'equal': rename('Equal'), + 'zeros_like': ConstantOfShapeZeros, + 'ones_like': ConstantOfShapeOnes, + 'subtract': rename('Sub'), + 'split': Split +} + + +class ModelContainer(object): + """ A container class to hold different attributes of ONNX model graph + """ + + def __init__(self, name, opset_version): + self._name = name + self._opset_version = opset_version + self._inputs = [] + self._outputs = [] + self._nodes = [] + self._initializers = [] + + def add_inputs(self, inputs): + self._inputs.extend(inputs) + + def add_outputs(self, outputs): + self._outputs.extend(outputs) + + def add_nodes(self, nodes): + self._nodes.extend(nodes) + + def add_initializers(self, initializers): + self._initializers.extend(initializers) + + def _get_opsets(self): + opsets = [] + imp = OperatorSetIdProto() + imp.version = self._opset_version + opsets.append(imp) + return opsets + + def make_model(self): + """ Creates the onnx model from the graph """ + onnx_graph = onnx.helper.make_graph( + self._nodes, + self._name, + self._inputs, + self._outputs, + self._initializers + ) + kwargs = {} + kwargs["opset_imports"] = self._get_opsets() + kwargs["producer_name"] = 'TVM Relay' + kwargs["producer_version"] = tvm.__version__ + + return onnx.helper.make_model(onnx_graph, **kwargs) + + +class RelayToONNXConverter(ExprVisitor): + """A helper class to traverse the Relay graph and convert Relay nodes to ONNX model + + Parameters + ---------- + name : str + name of the model + + params : dict + dict of the parameter names and NDarray values + + opset_version : int + target onnx opset version + + """ + + def __init__(self, name, params, opset_version): + super().__init__() + self._name = {} + self._mc = ModelContainer(name, opset_version) + self._params = params + self._node_dict = {} + self._node_count = 0 + self.last_node = None + + @classmethod + def _get_node_entry(cls, relay_node, name): + return {"relay_node": relay_node, + "inputs": [relay_node], # inputs in the form of relay nodes + "types": [], # output types in case of call nodes else self type + "name": name, # name of the node + "input_names": [name], # input names in case of call nodes else self name + "output_names": [name], # output names in case of call nodes else self name + "op": None, # op name in case of call node else None + } + + def convert_to_onnx(self, func): + """ Traverse Relay graph and generate a ONNX model""" + + self.visit(func) + self._add_output(self._node_dict[self.last_node]) + model = self._mc.make_model() + polished_model = onnx.utils.polish_model(model) + return polished_model + + def visit(self, expr): + self._node_count += 1 + super().visit(expr) + + def visit_constant(self, const): + node_index = self._node_count + name = "Constant_" + str(node_index) + node_entry = self._get_node_entry(const, name) + node_entry["types"] = [const.checked_type] + + self._add_constant_input(node_entry, node_index) + self._node_dict[const] = [node_entry] + + def visit_var(self, var): + node_index = self._node_count + node_entry = self._get_node_entry(var, var.name_hint) + node_entry["types"] = [var.type_annotation] + + self._add_input(node_entry, node_index) + self._node_dict[var] = [node_entry] + + def visit_tuple(self, tup): + self._node_dict[tup] = [] + for f in tup.fields: + self.visit(f) + self._node_dict[tup].extend(self._node_dict[f]) + + self.last_node = tup + + def visit_tuple_getitem(self, t): + self.visit(t.tuple_value) + tup_node = self._node_dict[t.tuple_value] + if len(tup_node) > 1: + self._node_dict[t] = tup_node[t.index] + else: + node_entry = copy.deepcopy(tup_node[0]) + output_names = [node_entry["output_names"][t.index]] + node_entry["output_names"] = output_names + self._node_dict[t] = [node_entry] + self.last_node = t + + def visit_call(self, call): + node_index = self._node_count + op = call.op + name = "{}_{}".format(op, node_index) + node_entry = self._get_node_entry(call, name) + + node_entry["op"] = op + node_entry["input_names"] = [] + node_entry["inputs"] = [] + node_entry["output_names"] = None + for input_arg in call.args: + self.visit(input_arg) + input_names = [] + for arg_node_entry in self._node_dict[input_arg]: + input_names.extend(arg_node_entry["output_names"]) + node_entry["input_names"].extend(input_names) + node_entry["inputs"].extend([input_arg]) + + node_entry['types'] = call_node_infer_type(call) + node_entry["output_names"] = [] + for i in range(len(node_entry['types'])): + node_entry["output_names"].append(name + str(i)) + self.last_node = call + self._add_node(node_entry, node_index) + self._node_dict[call] = [node_entry] + + def _add_node(self, node_entry, idx): + """Convert Relay operator node to ONNX operator and add it to container nodes list""" + if node_entry['op'].name not in relay_to_onnx_op_mapping: + raise NotImplementedError("Currently the operator '{0}' is " + "not supported.".format(node_entry['op'].name)) + + converter = relay_to_onnx_op_mapping[node_entry['op'].name]() + + return converter.convert(node_entry, self._mc, self._node_dict) + + def _add_params(self, node_entry, idx): + """Add param value to initializer and name to inputs""" + param_name = node_entry['name'] + assert param_name in self._params, "The parameter {0} is not present" \ + "in params dict provided.".format(param_name) + value = self._params[param_name] + numpy_array = value.asnumpy() + tensor = numpy_helper.from_array(numpy_array, param_name) + self._mc.add_initializers([tensor]) + dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype] + input = onnx.helper.make_tensor_value_info(param_name, + dtype, + shape=numpy_array.shape) + self._mc.add_inputs([input]) + + def _add_constant_input(self, node_entry, idx): + """Create named input for constant and add it to container inputs. + If input is a parameter then add to param + """ + node = node_entry['relay_node'] + param_name = node_entry['name'] + self._params[param_name] = node.data + self._add_params(node_entry, idx) + + def _add_input(self, node_entry, idx): + """Add input node to container inputs. If input is a parameter then add to param""" + if node_entry['name'] in self._params: + self._add_params(node_entry, idx) + else: + type = node_entry['types'][0] Review comment: @alexwong, While converting the module I faced issue related to type inference for ConstantOfShapeZeros op, I have fixed that issue. After fixing the issue, found an issue with Relay module itself. It seems arguments provided to reshape op are not correct. Below is the error I am getting. Could you please look into it? ``` test_onnx_model.py:36: in func_to_onnx onnx_model = to_onnx(mod, params, name, path=None) ../../../python/tvm/contrib/target/onnx.py:845: in to_onnx onnx_model = converter.convert_to_onnx(func) ../../../python/tvm/contrib/target/onnx.py:678: in convert_to_onnx self.visit(func) ../../../python/tvm/contrib/target/onnx.py:686: in visit super().visit(expr) ../../../python/tvm/relay/expr_functor.py:44: in visit res = self.visit_function(expr) ../../../python/tvm/relay/expr_functor.py:153: in visit_function self.visit(f.body) ../../../python/tvm/contrib/target/onnx.py:686: in visit super().visit(expr) ../../../python/tvm/relay/expr_functor.py:56: in visit res = self.visit_tuple(expr) ../../../python/tvm/contrib/target/onnx.py:708: in visit_tuple self.visit(f) ../../../python/tvm/contrib/target/onnx.py:686: in visit super().visit(expr) ../../../python/tvm/relay/expr_functor.py:46: in visit res = self.visit_call(expr) ../../../python/tvm/contrib/target/onnx.py:736: in visit_call self.visit(input_arg) ../../../python/tvm/contrib/target/onnx.py:686: in visit super().visit(expr) ../../../python/tvm/relay/expr_functor.py:46: in visit res = self.visit_call(expr) ../../../python/tvm/contrib/target/onnx.py:736: in visit_call self.visit(input_arg) ../../../python/tvm/contrib/target/onnx.py:686: in visit super().visit(expr) ../../../python/tvm/relay/expr_functor.py:46: in visit res = self.visit_call(expr) ../../../python/tvm/contrib/target/onnx.py:743: in visit_call node_entry['types'] = call_node_infer_type(call) ../../../python/tvm/contrib/target/onnx.py:54: in call_node_infer_type infer_out = infer_type(node) ../../../python/tvm/contrib/target/onnx.py:46: in infer_type mod = tvm.IRModule.from_expr(node) ../../../python/tvm/ir/module.py:222: in from_expr return _ffi_api.Module_FromExpr(expr, funcs, defs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <tvm.runtime.packed_func.PackedFunc object at 0x118015780> args = (CallNode(Op(reshape), [CallNode(Op(concatenate), [Tuple([CallNode(Op(nn.batch_flatten), [CallNode(Op(transpose), [Cal...1 21])], relay.attrs.ReshapeAttrs(0x7fc0c2626908), [TensorType([1, 128772], float32), TensorType([3], int32)]), {}, {}) temp_args = [{}, {}] values = <tvm._ffi._ctypes.packed_func.TVMValue_Array_3 object at 0x11e962c40> tcodes = <tvm._ffi._ctypes.packed_func.c_int_Array_3 object at 0x11e962040> def __call__(self, *args): """Call the function with positional arguments args : list The positional arguments to the function call. """ temp_args = [] values, tcodes, num_args = _make_tvm_args(args, temp_args) ret_val = TVMValue() ret_tcode = ctypes.c_int() if _LIB.TVMFuncCall( self.handle, values, tcodes, ctypes.c_int(num_args), ctypes.byref(ret_val), ctypes.byref(ret_tcode)) != 0: > raise get_last_ffi_error() E tvm._ffi.base.TVMError: Traceback (most recent call last): E [bt] (8) 9 libtvm.dylib 0x00000001189cff09 tvm::relay::TypeInferencer::GetType(tvm::RelayExpr const&) + 297 E [bt] (7) 8 libtvm.dylib 0x00000001189d855a tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&) + 138 E [bt] (6) 7 libtvm.dylib 0x00000001189de80f tvm::NodeFunctor<tvm::Type (tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>*)>::operator()(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>*) const + 255 E [bt] (5) 6 libtvm.dylib 0x00000001189dfe58 tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>::InitVTable()::'lambda4'(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>*)::__invoke(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::Type (tvm::RelayExpr const&)>*) + 24 E [bt] (4) 5 libtvm.dylib 0x00000001189d977d tvm::relay::TypeInferencer::VisitExpr_(tvm::relay::CallNode const*) + 717 E [bt] (3) 4 libtvm.dylib 0x00000001189e23b2 tvm::relay::TypeInferencer::GeneralCall(tvm::relay::CallNode const*, tvm::runtime::Array<tvm::Type, void>) + 2866 E [bt] (2) 3 libtvm.dylib 0x00000001189e00ba tvm::relay::TypeInferencer::ReportFatalError(tvm::runtime::ObjectRef const&, tvm::Error const&) + 154 E [bt] (1) 2 libtvm.dylib 0x00000001181a9850 tvm::ErrorReporter::RenderErrors(tvm::IRModule const&, bool) + 5296 E [bt] (0) 1 libtvm.dylib 0x0000000118086781 dmlc::LogMessageFatal::~LogMessageFatal() + 113 E File "/Users/demo/git/tvm/src/ir/error.cc", line 132 E TVMError: E Error(s) have occurred. The program has been annotated with them: E E In `main`: E v0.0.4 E fn (%cv22_0_i0: Tensor[(1, 3, 512, 512), float32]) { E %0 = nn.conv2d(%cv22_0_i0, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]); E %1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]); E %2 = %1.0; E %3 = nn.relu(%2); E %4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]); E %5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]); E %6 = %5.0; E %7 = nn.relu(%6); E %8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]); E %9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]); E %10 = %9.0; E %11 = nn.relu(%10); E %12 = nn.conv2d(%11, meta[relay.Constant][15], strides=[2, 2], padding=[1, 1, 1, 1], groups=64, channels=64, kernel_size=[3, 3]); E %13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]); E %14 = %13.0; E %15 = nn.relu(%14); E %16 = nn.conv2d(%15, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]); E %17 = nn.batch_norm(%16, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]); E %18 = %17.0; E %19 = nn.relu(%18); E %20 = nn.conv2d(%19, meta[relay.Constant][25], padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3]); E %21 = nn.batch_norm(%20, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]); E %22 = %21.0; E %23 = nn.relu(%22); E %24 = nn.conv2d(%23, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]); E %25 = nn.batch_norm(%24, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]); E %26 = %25.0; E %27 = nn.relu(%26); E %28 = nn.conv2d(%27, meta[relay.Constant][35], strides=[2, 2], padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3]); E %29 = nn.batch_norm(%28, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]); E %30 = %29.0; E %31 = nn.relu(%30); E %32 = nn.conv2d(%31, meta[relay.Constant][40], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]); E %33 = nn.batch_norm(%32, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]); E %34 = %33.0; E %35 = nn.relu(%34); E %36 = nn.conv2d(%35, meta[relay.Constant][45], padding=[1, 1, 1, 1], groups=256, channels=256, kernel_size=[3, 3]); E %37 = nn.batch_norm(%36, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]); E %38 = %37.0; E %39 = nn.relu(%38); E %40 = nn.conv2d(%39, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]); E %41 = nn.batch_norm(%40, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]); E %42 = %41.0; E %43 = nn.relu(%42); E %44 = nn.conv2d(%43, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=256, channels=256, kernel_size=[3, 3]); E %45 = nn.batch_norm(%44, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]); E %46 = %45.0; E %47 = nn.relu(%46); E %48 = nn.conv2d(%47, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %49 = nn.batch_norm(%48, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]); E %50 = %49.0; E %51 = nn.relu(%50); E %52 = nn.conv2d(%51, meta[relay.Constant][65], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %53 = nn.batch_norm(%52, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]); E %54 = %53.0; E %55 = nn.relu(%54); E %56 = nn.conv2d(%55, meta[relay.Constant][70], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %57 = nn.batch_norm(%56, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]); E %58 = %57.0; E %59 = nn.relu(%58); E %60 = nn.conv2d(%59, meta[relay.Constant][75], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %61 = nn.batch_norm(%60, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]); E %62 = %61.0; E %63 = nn.relu(%62); E %64 = nn.conv2d(%63, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %65 = nn.batch_norm(%64, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]); E %66 = %65.0; E %67 = nn.relu(%66); E %68 = nn.conv2d(%67, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %69 = nn.batch_norm(%68, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]); E %70 = %69.0; E %71 = nn.relu(%70); E %72 = nn.conv2d(%71, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %73 = nn.batch_norm(%72, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]); E %74 = %73.0; E %75 = nn.relu(%74); E %76 = nn.conv2d(%75, meta[relay.Constant][95], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %77 = nn.batch_norm(%76, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]); E %78 = %77.0; E %79 = nn.relu(%78); E %80 = nn.conv2d(%79, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %81 = nn.batch_norm(%80, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]); E %82 = %81.0; E %83 = nn.relu(%82); E %84 = nn.conv2d(%83, meta[relay.Constant][105], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %85 = nn.batch_norm(%84, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]); E %86 = %85.0; E %87 = nn.relu(%86); E %88 = nn.conv2d(%87, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %89 = nn.batch_norm(%88, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]); E %90 = %89.0; E %91 = nn.relu(%90); E %92 = nn.conv2d(%91, meta[relay.Constant][115], padding=[1, 1, 1, 1], channels=84, kernel_size=[3, 3]); E %93 = nn.bias_add(%92, meta[relay.Constant][116]); E %94 = transpose(%93, axes=[0, 2, 3, 1]); E %95 = nn.batch_flatten(%94); E %96 = nn.conv2d(%91, meta[relay.Constant][117], strides=[2, 2], padding=[1, 1, 1, 1], groups=512, channels=512, kernel_size=[3, 3]); E %97 = nn.batch_norm(%96, meta[relay.Constant][118], meta[relay.Constant][119], meta[relay.Constant][120], meta[relay.Constant][121]); E %98 = %97.0; E %99 = nn.relu(%98); E %100 = nn.conv2d(%99, meta[relay.Constant][122], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]); E %101 = nn.batch_norm(%100, meta[relay.Constant][123], meta[relay.Constant][124], meta[relay.Constant][125], meta[relay.Constant][126]); E %102 = %101.0; E %103 = nn.relu(%102); E %104 = nn.conv2d(%103, meta[relay.Constant][127], padding=[1, 1, 1, 1], groups=1024, channels=1024, kernel_size=[3, 3]); E %105 = nn.batch_norm(%104, meta[relay.Constant][128], meta[relay.Constant][129], meta[relay.Constant][130], meta[relay.Constant][131]); E %106 = %105.0; E %107 = nn.relu(%106); E %108 = nn.conv2d(%107, meta[relay.Constant][132], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]); E %109 = nn.batch_norm(%108, meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135], meta[relay.Constant][136]); E %110 = %109.0; E %111 = nn.relu(%110); E %112 = nn.conv2d(%111, meta[relay.Constant][137], padding=[1, 1, 1, 1], channels=126, kernel_size=[3, 3]); E %113 = nn.bias_add(%112, meta[relay.Constant][138]); E %114 = transpose(%113, axes=[0, 2, 3, 1]); E %115 = nn.batch_flatten(%114); E %116 = nn.conv2d(%111, meta[relay.Constant][139], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %117 = nn.batch_norm(%116, meta[relay.Constant][140], meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], epsilon=0.001f, scale=False); E %118 = %117.0; E %119 = nn.relu(%118); E %120 = nn.conv2d(%119, meta[relay.Constant][144], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]); E %121 = nn.batch_norm(%120, meta[relay.Constant][145], meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], epsilon=0.001f, scale=False); E %122 = %121.0; E %123 = nn.relu(%122); E %124 = nn.conv2d(%123, meta[relay.Constant][149], padding=[1, 1, 1, 1], channels=126, kernel_size=[3, 3]); E %125 = nn.bias_add(%124, meta[relay.Constant][150]); E %126 = transpose(%125, axes=[0, 2, 3, 1]); E %127 = nn.batch_flatten(%126); E %128 = nn.conv2d(%123, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]); E %129 = nn.batch_norm(%128, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155], epsilon=0.001f, scale=False); E %130 = %129.0; E %131 = nn.relu(%130); E %132 = nn.conv2d(%131, meta[relay.Constant][156], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]); E %133 = nn.batch_norm(%132, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160], epsilon=0.001f, scale=False); E %134 = %133.0; E %135 = nn.relu(%134); E %136 = nn.conv2d(%135, meta[relay.Constant][161], padding=[1, 1, 1, 1], channels=126, kernel_size=[3, 3]); E %137 = nn.bias_add(%136, meta[relay.Constant][162]); E %138 = transpose(%137, axes=[0, 2, 3, 1]); E %139 = nn.batch_flatten(%138); E %140 = nn.conv2d(%135, meta[relay.Constant][163], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]); E %141 = nn.batch_norm(%140, meta[relay.Constant][164], meta[relay.Constant][165], meta[relay.Constant][166], meta[relay.Constant][167], epsilon=0.001f, scale=False); E %142 = %141.0; E %143 = nn.relu(%142); E %144 = nn.conv2d(%143, meta[relay.Constant][168], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]); E %145 = nn.batch_norm(%144, meta[relay.Constant][169], meta[relay.Constant][170], meta[relay.Constant][171], meta[relay.Constant][172], epsilon=0.001f, scale=False); E %146 = %145.0; E %147 = nn.relu(%146); E %148 = nn.conv2d(%147, meta[relay.Constant][173], padding=[1, 1, 1, 1], channels=84, kernel_size=[3, 3]); E %149 = nn.bias_add(%148, meta[relay.Constant][174]); E %150 = transpose(%149, axes=[0, 2, 3, 1]); E %151 = nn.batch_flatten(%150); E %152 = nn.conv2d(%147, meta[relay.Constant][175], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]); E %153 = nn.batch_norm(%152, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], epsilon=0.001f, scale=False); E %154 = %153.0; E %155 = nn.relu(%154); E %156 = nn.conv2d(%155, meta[relay.Constant][180], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]); E %157 = nn.batch_norm(%156, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], epsilon=0.001f, scale=False); E %158 = %157.0; E %159 = nn.relu(%158); E %160 = nn.conv2d(%159, meta[relay.Constant][185], padding=[1, 1, 1, 1], channels=84, kernel_size=[3, 3]); E %161 = nn.bias_add(%160, meta[relay.Constant][186]); E %162 = transpose(%161, axes=[0, 2, 3, 1]); E %163 = nn.batch_flatten(%162); E %164 = (%95, %115, %127, %139, %151, %163); E %165 = concatenate(%164, axis=1); E reshape(%165, meta[relay.Constant][187], newshape=[0, -1, 21]) the function is provided too many arguments expected 1, found 2; E } E // meta data omitted. you can use show_meta_data=True to include meta data ``` ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
