anirudhacharya commented on a change in pull request #10605: [MXNET-310] 
[ONNX-MXNet] API to import ONNX models into Gluon.
URL: https://github.com/apache/incubator-mxnet/pull/10605#discussion_r182850613
 
 

 ##########
 File path: tests/python-pytest/onnx/gluon_backend.py
 ##########
 @@ -0,0 +1,148 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# coding: utf-8
+"""Gluon backend wrapper for onnx test infrastructure"""
+import mxnet as mx
+from mxnet import nd
+from mxnet.contrib.onnx._import.import_onnx import GraphProto
+import numpy as np
+try:
+    from onnx import helper, TensorProto
+    from onnx.backend.base import Backend
+except ImportError:
+    raise ImportError("Onnx and protobuf need to be installed. Instructions to"
+                      + " install - https://github.com/onnx/onnx#installation";)
+from gluon_backend_rep import GluonBackendRep
+
+# GluonBackend class will take an ONNX model with inputs, perform a 
computation,
+# and then return the output.
+# Implemented by following onnx docs guide:
+# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
+
+class GluonBackend(Backend):
+    """Gluon backend for ONNX"""
+
+    @staticmethod
+    def make_graph(node, inputs):
+        """ Created ONNX GraphProto from node"""
+        initializer = []
+        tensor_input_info = []
+        tensor_output_info = []
+
+        # Adding input tensor info.
+        for index in range(len(node.input)):
+            tensor_input_info.append(
+                helper.make_tensor_value_info(str(node.input[index]), 
TensorProto.FLOAT, [1]))
+
+            # Creating an initializer for Weight params.
+            # Assumes that weight params is named as 'W'.
+            if node.input[index] == 'W':
+                dim = inputs[index].shape
+                param_tensor = helper.make_tensor(
+                    name=node.input[index],
+                    data_type=TensorProto.FLOAT,
+                    dims=dim,
+                    vals=inputs[index].flatten())
+
+                initializer.append(param_tensor)
+
+        # Adding output tensor info.
+        for index in range(len(node.output)):
+            tensor_output_info.append(
+                helper.make_tensor_value_info(str(node.output[index]), 
TensorProto.FLOAT, [1]))
+
+        # creating graph proto object.
+        graph_proto = helper.make_graph(
+            [node],
+            "test",
+            tensor_input_info,
+            tensor_output_info,
+            initializer=initializer)
+
+        return graph_proto
+
+    @classmethod
+    def run_node(cls, node, inputs, device='CPU'):
+        """Running individual node inference on gluon backend and
+        return the result to onnx test infrastructure.
+
+        Parameters
+        ----------
+        node   : onnx node object
+            loaded onnx node (individual layer)
+        inputs : numpy array
+            input to run a node on
+        device : 'CPU'
+            device to run a node on
+
+        Returns
+        -------
+        params : numpy array
+            result obtained after running the operator
+        """
+        graph = GraphProto()
+        net = graph.graph_to_gluon(GluonBackend.make_graph(node, inputs))
+
+        # create module, passing cpu context
+        if device == 'CPU':
+            ctx = mx.cpu()
+        else:
+            raise NotImplementedError("Only CPU context is supported for now")
 
 Review comment:
   ONNX is implementation independent. Here we are running a particular ONNX 
model using gluon, so we need to specify the context. In the CI pipeline these 
tests are running on a CPU, hence the above assignment. Saying "GPU is not 
implemented" is pretty misleading. I will correct this in the code.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to