This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new b96affb  Fix output names of nn operators. (#10410)
b96affb is described below

commit b96affb251a72f4f10895c8adced1acbca19edb0
Author: Da Zheng <zhengda1...@gmail.com>
AuthorDate: Thu Apr 12 13:01:57 2018 -0700

    Fix output names of nn operators. (#10410)
    
    * Fix output names of nn operators.
    
    * Fix typos.
    
    * add tests.
    
    * add pooling output names.
    
    * add checks.
    
    * Fix tests.
    
    * Print tests.
    
    * Fix a bug in the test.
    
    * add test for pooling outputs.
    
    * Update test_operator.py
---
 src/executor/graph_executor.cc         |  1 +
 src/operator/nn/activation.cc          |  4 ++++
 src/operator/nn/concat.cc              |  4 ++++
 src/operator/nn/pooling.cc             |  6 +++++-
 src/operator/nn/softmax.cc             |  4 ++++
 src/operator/nn/softmax_activation.cc  |  4 ++++
 src/operator/nn/upsampling.cc          |  4 ++++
 tests/python/unittest/test_operator.py | 19 +++++++++++++++++++
 8 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc
index 9108bae..4d24f55 100644
--- a/src/executor/graph_executor.cc
+++ b/src/executor/graph_executor.cc
@@ -1457,6 +1457,7 @@ void GraphExecutor::ExecuteMonCallback(size_t nid) {
       output_names.emplace_back(std::to_string(i));
     }
   }
+  CHECK_EQ(opnode.exec->out_array.size(), output_names.size());
   for (index_t i = 0; i < opnode.exec->out_array.size(); ++i) {
     NDArray *cpy = new NDArray(opnode.exec->out_array[i]);
     std::string name = inode.source->attrs.name + "_" + output_names[i];
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 0802826..382efeb 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -150,6 +150,10 @@ The following activation functions are supported:
 )code" ADD_FILELINE)
 .set_attr_parser(ParamParser<ActivationParam>)
 .set_attr<FInferStorageType>("FInferStorageType", ActivationStorageType)
+.set_attr<nnvm::FListOutputNames>("FListOutputNames",
+    [](const NodeAttrs& attrs) {
+    return std::vector<std::string>{"output"};
+})
 .set_attr<FCompute>("FCompute<cpu>", ActivationCompute<cpu>)
 #if MXNET_USE_MKLDNN == 1
 .set_attr<FComputeEx>("FComputeEx<cpu>", ActivationComputeExCPU)
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index 81dc95f..a7fcb1c 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -248,6 +248,10 @@ Example::
   }
   return ret;
 })
+.set_attr<nnvm::FListOutputNames>("FListOutputNames",
+    [](const NodeAttrs& attrs) {
+    return std::vector<std::string>{"output"};
+})
 #if MXNET_USE_MKLDNN == 1
 .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
   return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 7ee655f..9e4063e 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -365,7 +365,11 @@ height, width)*.
 })
 .set_attr<nnvm::FListOutputNames>("FListOutputNames",
     [](const NodeAttrs& attrs) {
-  return std::vector<std::string>{"output"};
+  const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
+  if (GetNumOutputs(param) == 2)
+    return std::vector<std::string>{"output", "workspace"};
+  else
+    return std::vector<std::string>{"output"};
 })
 .set_attr_parser(PoolingParamParser)
 .set_attr<FInferStorageType>("FInferStorageType", PoolingStorageType)
diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc
index 0f55947..f8cc6fe 100644
--- a/src/operator/nn/softmax.cc
+++ b/src/operator/nn/softmax.cc
@@ -96,6 +96,10 @@ Example::
 
 )code" ADD_FILELINE)
 .set_attr_parser(ParamParser<SoftmaxParam>)
+.set_attr<nnvm::FListOutputNames>("FListOutputNames",
+    [](const NodeAttrs& attrs) {
+    return std::vector<std::string>{"output"};
+})
 .set_attr<FCompute>("FCompute<cpu>", SoftmaxCompute<cpu, 
mxnet_op::softmax_fwd>)
 #if MXNET_USE_MKLDNN == 1
 .set_attr<FComputeEx>("FComputeEx<cpu>", SoftmaxComputeExCPU)
diff --git a/src/operator/nn/softmax_activation.cc 
b/src/operator/nn/softmax_activation.cc
index bdfd8b0..8a28243 100644
--- a/src/operator/nn/softmax_activation.cc
+++ b/src/operator/nn/softmax_activation.cc
@@ -58,6 +58,10 @@ Example::
 
 )code" ADD_FILELINE)
 .set_attr_parser(ParamParser<SoftmaxActivationParam>)
+.set_attr<nnvm::FListOutputNames>("FListOutputNames",
+    [](const NodeAttrs& attrs) {
+    return std::vector<std::string>{"output"};
+})
 .set_attr<FCompute>("FCompute<cpu>", SoftmaxActivationCompute<cpu>)
 .set_attr<nnvm::FGradient>("FGradient", 
ElemwiseGradUseOut{"_backward_SoftmaxActivation"})
 .add_arguments(SoftmaxActivationParam::__FIELDS__());
diff --git a/src/operator/nn/upsampling.cc b/src/operator/nn/upsampling.cc
index 44b619a..5aa111e 100644
--- a/src/operator/nn/upsampling.cc
+++ b/src/operator/nn/upsampling.cc
@@ -132,6 +132,10 @@ NNVM_REGISTER_OP(UpSampling)
     [](const NodeAttrs& attrs) {
   return ListArguments(nnvm::get<UpSamplingParam>(attrs.parsed));
 })
+.set_attr<nnvm::FListOutputNames>("FListOutputNames",
+    [](const NodeAttrs& attrs) {
+    return std::vector<std::string>{"output"};
+})
 .set_attr<nnvm::FInferShape>("FInferShape", UpSamplingShape)
 .set_attr<nnvm::FInferType>("FInferType", UpSamplingType)
 .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 2bd5dcc..5d38222 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -5706,6 +5706,25 @@ def test_op_output_names_monitor():
     lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
     check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
 
+    act_sym = mx.sym.Activation(data, act_type='relu', name='act')
+    check_name(act_sym, ['act_output'])
+
+    cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
+    check_name(cc_sym, ['concat_output'])
+
+    sm_sym = mx.sym.softmax(data, name='softmax')
+    check_name(sm_sym, ['softmax_output'])
+
+    sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
+    check_name(sa_sym, ['softmax_output'])
+
+    us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
+                               name='upsampling')
+    check_name(us_sym, ['upsampling_output'])
+
+    us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
+                            name='pooling')
+    check_name(us_sym, ['pooling_output'])
 
 if __name__ == '__main__':
     import nose

-- 
To stop receiving notification emails like this one, please contact
j...@apache.org.

Reply via email to