spidyDev commented on a change in pull request #11213: [MXNET-533] MXNet-ONNX 
export
URL: https://github.com/apache/incubator-mxnet/pull/11213#discussion_r194868703
 
 

 ##########
 File path: python/mxnet/contrib/onnx/_export/op_translations.py
 ##########
 @@ -0,0 +1,1667 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Based on
+#  https://github.com/NVIDIA/mxnet_to_onnx/blob/master/mx2onnx_converter/
+# mx2onnx_converter_functions.py
+#  Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+#
+#  Redistribution and use in source and binary forms, with or without
+#  modification, are permitted provided that the following conditions
+#  are met:
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#  * Neither the name of NVIDIA CORPORATION nor the names of its
+#    contributors may be used to endorse or promote products derived
+#    from this software without specific prior written permission.
+#
+#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+#  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+#  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+#  PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+#  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+#  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+#  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+#  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+#  OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# coding: utf-8
+# pylint: disable=too-many-locals,no-else-return,too-many-lines
+# pylint: disable=anomalous-backslash-in-string,eval-used
+"""
+Conversion Functions for common layers.
+Add new functions here with a decorator.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import re
+import numpy as np
+
+from onnx import helper, numpy_helper, mapping
+from .export_onnx import MXNetGraph as mx_op
+
+
+@mx_op.register("null")
+def convert_weights_and_inputs(node, **kwargs):
+    """Helper function to convert weights and inputs.
+    """
+    name = node["name"]
+
+    if kwargs["is_input"] is False:
+        weights = kwargs["weights"]
+        initializer = kwargs["initializer"]
+        np_arr = weights[name]
+        data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
+        dims = np.shape(np_arr)
+
+        tensor_node = helper.make_tensor_value_info(name, data_type, dims)
+
+        initializer.append(
+            helper.make_tensor(
+                name=name,
+                data_type=data_type,
+                dims=dims,
+                vals=np_arr.flatten().tolist(),
+                raw=False,
+            )
+        )
+
+        return [tensor_node]
+    else:
+        tval_node = helper.make_tensor_value_info(name, kwargs["in_type"], 
kwargs["in_shape"])
+        return [tval_node]
+
+
+def parse_helper(attrs, attrs_name, alt_value=None):
+    """Helper function to parse operator attributes in required format."""
+    tuple_re = re.compile('\([0-9L|,| ]+\)')
+    if attrs is None:
+        return alt_value
+    attrs_str = str(attrs.get(attrs_name))
+    if attrs_str is None:
+        return alt_value
+    attrs_match = tuple_re.search(attrs_str)
+    if attrs_match is not None:
+        if attrs_match.span() == (0, len(attrs_str)):
+            dims = eval(attrs_str)
+            return dims
+        else:
+            raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, 
str(attrs_str)))
+    return alt_value
+
+
+@mx_op.register("Convolution")
+def convert_convolution(node, **kwargs):
+    """Map MXNet's convolution operator attributes to onnx's Conv operator
+    and return the created node.
+    """
+    name = node["name"]
+    inputs = node["inputs"]
+
+    num_inputs = len(inputs)
+
+    proc_nodes = kwargs["proc_nodes"]
+    input_node = proc_nodes[kwargs["index_lookup"][inputs[0][0]]].name
+    weights_node = proc_nodes[kwargs["index_lookup"][inputs[1][0]]].name
+
+    if num_inputs > 2:
+        bias_node = proc_nodes[kwargs["index_lookup"][inputs[2][0]]].name
+
+    attrs = node.get("attrs")
+
+    kernel_dims = list(parse_helper(attrs, "kernel"))
+    stride_dims = list(parse_helper(attrs, "stride", [1, 1]))
+    pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
+    num_group = int(attrs.get("num_group", 1))
+    dilations = list(parse_helper(attrs, "dilate", [1, 1]))
+
+    pad_dims = pad_dims + pad_dims
+
+    input_nodes = [input_node, weights_node]
+    if num_inputs > 2:
+        input_nodes.append(bias_node)
+
+    conv_node = helper.make_node(
+        "Conv",
+        inputs=input_nodes,
+        outputs=[name],
+        kernel_shape=kernel_dims,
+        strides=stride_dims,
+        dilations=dilations,
+        pads=pad_dims,
+        group=num_group,
+        name=name
+    )
+
+    return [conv_node]
+
+
+@mx_op.register("FullyConnected")
+def convert_fully_connected(node, **kwargs):
+    """Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
+    and return the created node.
+    """
+    name = node["name"]
+    inputs = node["inputs"]
+    input_node_id = kwargs["index_lookup"][inputs[0][0]]
+    weight_node_id = kwargs["index_lookup"][inputs[1][0]]
+    bias_node_id = kwargs["index_lookup"][inputs[2][0]]
+    proc_nodes = kwargs["proc_nodes"]
+    input_node = proc_nodes[input_node_id]
+    weights_node = proc_nodes[weight_node_id]
+    bias_node = proc_nodes[bias_node_id]
+
+    input_name = input_node.name
+    weights_name = weights_node.name
+    bias_name = bias_node.name
+
+    node = helper.make_node(
+        "Gemm",
+        [input_name, weights_name, bias_name],  # input (A, B, C) - C can be 
in place
+        [name],  # output
+        alpha=1.0,
+        beta=1.0,
+        transA=False,
+        transB=True,
+        name=name
+    )
+
+    return [node]
+
+
+@mx_op.register("BatchNorm")
+def convert_batchnorm(node, **kwargs):
+    """Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization 
operator
+    and return the created node.
+    """
+    name = node["name"]
+    proc_nodes = kwargs["proc_nodes"]
+    inputs = node["inputs"]
+
+    attrs = node["attrs"]
+    momentum = float(node.get("attrs", {}).get("momentum", 0.9))
+    eps = float(attrs.get("eps", 0.001))
+
+    data_idx = kwargs["index_lookup"][inputs[0][0]]
+    gamma_idx = kwargs["index_lookup"][inputs[1][0]]
+    beta_idx = kwargs["index_lookup"][inputs[2][0]]
+    moving_mean_idx = kwargs["index_lookup"][inputs[3][0]]
+    moving_var_idx = kwargs["index_lookup"][inputs[4][0]]
+
+    data_node = proc_nodes[data_idx].name
+    gamma_node = proc_nodes[gamma_idx].name
+    beta_node = proc_nodes[beta_idx].name
+
+    mov_mean_node = proc_nodes[moving_mean_idx]
+    mov_mean_node = mov_mean_node.name
+    mov_var_node = proc_nodes[moving_var_idx].name
+
+    bn_node = helper.make_node(
+        "BatchNormalization",
+        [data_node,
+         gamma_node,  # scale
+         beta_node,  # bias
+         mov_mean_node,
+         mov_var_node
+        ],
+        [name],
+        name=name,
+        epsilon=eps,
+        momentum=momentum,
+        spatial=1
+    )
+    return [bn_node]
+
+
+@mx_op.register("tanh")
+def convert_tanh(node, **kwargs):
+    """Map MXNet's tanh operator attributes to onnx's Tanh operator
+    and return the created node.
+    """
+    name = node["name"]
+    inputs = node["inputs"]
+    input_node_idx = kwargs["index_lookup"][inputs[0][0]]
+    proc_nodes = kwargs["proc_nodes"]
+    input_node = proc_nodes[input_node_idx].name
+
+    node = helper.make_node(
+        'Tanh',
+        [input_node],
+        [name],
+        name=name
+    )
+    return [node]
+
+#Basic neural network functions
+@mx_op.register("sigmoid")
+def convert_sigmoid(node, **kwargs):
+    """Map MXNet's sigmoid operator attributes to onnx's Sigmoid operator
+    and return the created node.
+    """
+    name = node["name"]
+    inputs = node["inputs"]
+    input_node_idx = kwargs["index_lookup"][inputs[0][0]]
+    proc_nodes = kwargs["proc_nodes"]
+    input_node = proc_nodes[input_node_idx].name
+
+    node = helper.make_node(
+        'Sigmoid',
+        [input_node],
+        [name],
+        name=name
+    )
+    return [node]
+
+@mx_op.register("relu")
+def convert_relu(node, **kwargs):
+    """Map MXNet's relu operator attributes to onnx's Relu operator
+    and return the created node.
+    """
+    name = node["name"]
+    inputs = node["inputs"]
+    input_node_idx = kwargs["index_lookup"][inputs[0][0]]
+    proc_nodes = kwargs["proc_nodes"]
+    input_node = proc_nodes[input_node_idx].name
+
+    node = helper.make_node(
+        'Relu',
+        [input_node],
+        [name],
+        name=name
+    )
+
+    return [node]
+
+@mx_op.register("Activation")
+def convert_activation(node, **kwargs):
+    """Map MXNet's Activation operator attributes to onnx's Tanh/Relu operator
+    and return the created node.
+    """
+    name = node["name"]
+
+    proc_nodes = kwargs["proc_nodes"]
+    attrs = node["attrs"]
+    act_type = attrs["act_type"]
+
+    inputs = node["inputs"]
+    input_node_idx = kwargs["index_lookup"][inputs[0][0]]
+    input_node = proc_nodes[input_node_idx].output[0]
+
+    # Creating a dictionary here, but if this titlecase pattern
+    # mxnet_name.title()
+    act_types = {
+        "tanh": "Tanh",
+        "relu": "Relu"
+    }
+
+    act_name = act_types.get(act_type)
+    if act_name:
+        node = helper.make_node(
+            act_name,
+            [input_node],
+            [name],
+            name=name
+        )
+    else:
+        raise AttributeError(
+            "Activation %s not implemented or recognized in the converter" % 
act_type
+        )
+
+    return [node]
+
+def transform_padding(pad_width):
+    """Helper function to convert padding format for pad operator.
+    """
+    num_pad_values = len(pad_width)
+    onnx_pad_width = [0]*num_pad_values
+
+    start_index = 0
+    end_index = int(num_pad_values/2)
 
 Review comment:
   MXNet pad values in pad 
op(https://mxnet.incubator.apache.org/api/python/symbol/symbol.html#mxnet.symbol.pad)
 is always multiple of two. Will add comment to clarify.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to