This is an automated email from the ASF dual-hosted git repository.
anirudh2290 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new c8c3b04 [MXNET-853] Fix for smooth_l1 operator scalar default value
(#12284)
c8c3b04 is described below
commit c8c3b04996113bc05c1384e597b6a80df544177b
Author: Sam Skalicky <[email protected]>
AuthorDate: Mon Sep 10 15:56:21 2018 -0700
[MXNET-853] Fix for smooth_l1 operator scalar default value (#12284)
* changed smooth_l1 operator implementation to not use helper macros since
they do not provide enough support for checking for arguments and
setting custom default values
* added testcase for smooth_l1 operator scalar default value
* fixed whitespace
* added curly braces for if/else to match mxnet style
* added more curly braces
---
.../tensor/elemwise_binary_scalar_op_extended.cc | 35 ++++++++++++++++++----
tests/python/unittest/test_operator.py | 4 +++
2 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/src/operator/tensor/elemwise_binary_scalar_op_extended.cc
b/src/operator/tensor/elemwise_binary_scalar_op_extended.cc
index 9870342..a0c4149 100644
--- a/src/operator/tensor/elemwise_binary_scalar_op_extended.cc
+++ b/src/operator/tensor/elemwise_binary_scalar_op_extended.cc
@@ -83,7 +83,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_hypot_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<
cpu, mshadow_op::hypot_grad_left>);
-MXNET_OPERATOR_REGISTER_BINARY_SCALAR(smooth_l1)
+NNVM_REGISTER_OP(smooth_l1)
.describe(R"code(Calculate Smooth L1 Loss(lhs, scalar) by summing
.. math::
@@ -98,17 +98,40 @@ where :math:`x` is an element of the tensor *lhs* and
:math:`\sigma` is the scal
Example::
+ smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5]
smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5]
)code" ADD_FILELINE)
-.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<
- cpu, mshadow_op::smooth_l1_loss>)
+.set_num_inputs(1)
+.set_num_outputs(1)
+.set_attr_parser([](NodeAttrs* attrs) {
+ if (attrs->dict.find("scalar") != attrs->dict.end()) {
+ attrs->parsed = std::stod(attrs->dict["scalar"]);
+ } else {
+ attrs->parsed = 1.0;
+ }
+ })
+.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
+.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
+.set_attr<nnvm::FInplaceOption>("FInplaceOption",
+ [](const NodeAttrs& attrs){
+ return std::vector<std::pair<int, int> >{{0,
0}};
+ })
+.add_argument("data", "NDArray-or-Symbol", "source input")
+.add_argument("scalar", "float", "scalar input")
+.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu,
mshadow_op::smooth_l1_loss>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{
"_backward_smooth_l1" });
MXNET_OPERATOR_REGISTER_BINARY(_backward_smooth_l1)
-.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed =
std::stod(attrs->dict["scalar"]); })
-.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<
- cpu, mshadow_op::smooth_l1_gradient>);
+ .set_attr_parser([](NodeAttrs *attrs) {
+ if (attrs->dict.find("scalar") != attrs->dict.end()) {
+ attrs->parsed = std::stod(attrs->dict["scalar"]);
+ } else {
+ attrs->parsed = 1.0;
+ }
+})
+.set_attr<FCompute>("FCompute<cpu>",
+ BinaryScalarOp::Backward<cpu,
mshadow_op::smooth_l1_gradient>);
} // namespace op
} // namespace mxnet
diff --git a/tests/python/unittest/test_operator.py
b/tests/python/unittest/test_operator.py
index 9842a69..55a46ca 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -5956,6 +5956,10 @@ def test_unary_math_operators():
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
+ 'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
+ lambda x: np_smooth_l1(x, 1.),
+ lambda x: np_smooth_l1_grad(x, 1.),
+ -2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),