samskalicky commented on a change in pull request #15886: Graph Partition API
URL: https://github.com/apache/incubator-mxnet/pull/15886#discussion_r317711711
 
 

 ##########
 File path: tests/python/unittest/test_subgraph_op.py
 ##########
 @@ -146,11 +146,137 @@ def get_executor(sym, subgraph_backend=None, 
op_names=None, original_exec=None):
         for i in range(len(outputs1)):
             assert_almost_equal((outputs1[i] - 
outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
 
+    def set_random_inputs(exe1, input_names):
+        """Sets random values to exe1's args and auxs"""
+        for name in input_names:
+            if name in exe1.arg_dict:
+                exe1.arg_dict[name][:] = 
mx.nd.random.uniform(shape=exe1.arg_dict[name].shape)
+            else:
+                assert name in exe1.aux_dict
+                exe1.aux_dict[name][:] = 
mx.nd.random.uniform(shape=exe1.aux_dict[name].shape)
+
+    def copy_inputs_between_executors(exe1, exe2, input_names):
+        """Copies values of args and auxs from exe1 to exe2"""
+        for name in input_names:
+            if name in exe2.arg_dict:
+                exe2.arg_dict[name][:] = exe1.arg_dict[name]
+            else:
+                assert name in exe2.aux_dict
+                exe2.aux_dict[name][:] = exe1.aux_dict[name]
+
+    def _check_subgraph_exe5(sym, subgraph_backend, op_names):
+        """Call optimize_for to trigger graph partitioning without infer 
shapes/types before,
+        then simple_bind and compare results of the partitioned sym and the 
original sym."""
+        # simple_bind
+        exe1 = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
+        input_names = sym.list_inputs()
+        set_random_inputs(exe1, input_names)
+        exe1.forward()
+
+        # partition before simple_bind
+        check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), 
mx_uint(len(op_names)),
+                                                     c_str_array(op_names)))
+        part_sym = sym.optimize_for(subgraph_backend)
+        
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
+
+        exe2 = part_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
+        copy_inputs_between_executors(exe1, exe2, input_names)
+        exe2.forward()
+
+        # compare outputs
+        outputs1 = exe1.outputs
+        outputs2 = exe2.outputs
+        assert len(outputs1) == len(outputs2)
+        for i in range(len(outputs1)):
+            assert_almost_equal((outputs1[i] - 
outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
+
+    def _check_subgraph_exe6(sym, subgraph_backend, op_names):
+        """Call optimize_for to trigger graph partitioning without infer 
shapes/types before,
+        then simple_bind and compare results of the partitioned sym and the 
original sym."""
+        # simple_bind
+        exe1 = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
+        input_names = sym.list_inputs()
+        set_random_inputs(exe1, input_names)
+        exe1.forward()
+
+        # infer shape/type before partition before simple_bind
+        check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), 
mx_uint(len(op_names)),
+                                                     c_str_array(op_names)))
+        part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict)
+        
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
+
+        exe2 = part_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
+        copy_inputs_between_executors(exe1, exe2, input_names)
+        exe2.forward()
+
+        # compare outputs
+        outputs1 = exe1.outputs
+        outputs2 = exe2.outputs
+        assert len(outputs1) == len(outputs2)
+        for i in range(len(outputs1)):
+            assert_almost_equal((outputs1[i] - 
outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
+
+    def _check_subgraph_exe7(sym, subgraph_backend, op_names):
+        """Call optimize_for to trigger graph partitioning without infer 
shapes/types before,
+        then bind and compare results of the partitioned sym and the original 
sym."""
+        # bind
+        arg_shapes, _, aux_shapes = sym.infer_shape()
+        arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
+        aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
+        exe1 = sym.bind(ctx=mx.current_context(), args=arg_array, 
aux_states=aux_array, grad_req='null')
+        exe1.forward()
+
+        # partition before bind
+        check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), 
mx_uint(len(op_names)),
+                                                     c_str_array(op_names)))
+        part_sym = sym.optimize_for(subgraph_backend)
+        
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
+
+        exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_array, 
aux_states=aux_array, grad_req='null')
+        exe2.forward()
+
+        # compare outputs
+        outputs1 = exe1.outputs
+        outputs2 = exe2.outputs
+        assert len(outputs1) == len(outputs2)
+        for i in range(len(outputs1)):
+            assert_almost_equal((outputs1[i] - 
outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
+
+    def _check_subgraph_exe8(sym, subgraph_backend, op_names):
+        """Call optimize_for to infer shapes, types and dtypes followed by 
graph partitioning,
+        then bind and compare results of the partitioned sym and the original 
sym."""
+        # bind
+        arg_shapes, _, aux_shapes = sym.infer_shape()
+        arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
+        aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
+        exe1 = sym.bind(ctx=mx.current_context(), args=arg_array, 
aux_states=aux_array, grad_req='null')
+        exe1.forward()
+
+        # infer shape/type before partition before bind
+        check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), 
mx_uint(len(op_names)),
+                                                     c_str_array(op_names)))
+        part_sym = sym.optimize_for(subgraph_backend, arg_array)
+        
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
+
+        exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_array, 
aux_states=aux_array, grad_req='null')
+        exe2.forward()
+
+        # compare outputs
+        outputs1 = exe1.outputs
+        outputs2 = exe2.outputs
+        assert len(outputs1) == len(outputs2)
+        for i in range(len(outputs1)):
+            assert_almost_equal((outputs1[i] - 
outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
+
     def check_subgraph_exe(sym, subgraph_backend, op_names):
         _check_subgraph_exe1(sym, subgraph_backend, op_names)
         _check_subgraph_exe2(sym, subgraph_backend, op_names)
         _check_subgraph_exe3(sym, subgraph_backend, op_names)
         _check_subgraph_exe4(sym, subgraph_backend, op_names)
+        _check_subgraph_exe5(sym, subgraph_backend, op_names)
+        _check_subgraph_exe6(sym, subgraph_backend, op_names)
+        _check_subgraph_exe7(sym, subgraph_backend, op_names)
+        _check_subgraph_exe8(sym, subgraph_backend, op_names)
 
 Review comment:
   Currently no subgraph property supports the excluded_ops option, so testing 
now would not be helpful. We do plan to create a test for this in a follow-up 
PR where we implement support for subgraph properties in libraries though. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to