This is an automated email from the ASF dual-hosted git repository.

nswamy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 6ebec87  Softsign Activation Function (#9851)
6ebec87 is described below

commit 6ebec87127a7dfb10e902df20282434a1612e35f
Author: Naveen Swamy <mn.nav...@gmail.com>
AuthorDate: Thu Feb 22 16:54:21 2018 -0800

    Softsign Activation Function (#9851)
    
    * Add SoftSign Activation function
---
 src/operator/mshadow_op.h                      |  4 ++++
 src/operator/nn/activation-inl.h               | 11 ++++++++++-
 src/operator/nn/activation.cc                  |  1 +
 src/operator/operator_tune.cc                  |  2 ++
 src/operator/tensor/elemwise_unary_op_basic.cc | 17 +++++++++++++++++
 src/operator/tensor/elemwise_unary_op_basic.cu |  8 ++++++++
 tests/python/unittest/test_operator.py         | 19 +++++++++++++++++++
 7 files changed, 61 insertions(+), 1 deletion(-)

diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h
index af7ef51..1d4284e 100644
--- a/src/operator/mshadow_op.h
+++ b/src/operator/mshadow_op.h
@@ -111,6 +111,10 @@ MXNET_UNARY_MATH_OP(sigmoid, 1.0f / (1.0f + 
math::exp(-a)));
 
 MXNET_UNARY_MATH_OP(sigmoid_grad, math::id(a) * (1.0f - math::id(a)));
 
+MXNET_UNARY_MATH_OP(softsign, a / (1.0f + math::fabs(a)));
+
+MXNET_UNARY_MATH_OP(softsign_grad, 1.0f /  math::sqr(1.0f + math::fabs(a)));
+
 MXNET_UNARY_MATH_OP_NC(relu, a > DType(0) ? a : DType(0));
 
 MXNET_UNARY_MATH_OP_NC(relu_grad, a > DType(0) ? DType(1) : DType(0));
diff --git a/src/operator/nn/activation-inl.h b/src/operator/nn/activation-inl.h
index a440f97..89a369c 100644
--- a/src/operator/nn/activation-inl.h
+++ b/src/operator/nn/activation-inl.h
@@ -47,7 +47,7 @@ namespace activation {
 enum ActivationOpInputs {kData};
 enum ActivationOpOutputs {kOut};
 enum ActivationOpResource {kTempSpace};
-enum ActivationOpType {kReLU, kSigmoid, kTanh, kSoftReLU};
+enum ActivationOpType {kReLU, kSigmoid, kTanh, kSoftReLU, kSoftSign};
 }  // activation
 
 struct ActivationParam : public dmlc::Parameter<ActivationParam> {
@@ -59,6 +59,7 @@ struct ActivationParam : public 
dmlc::Parameter<ActivationParam> {
     .add_enum("sigmoid", activation::kSigmoid)
     .add_enum("tanh", activation::kTanh)
     .add_enum("softrelu", activation::kSoftReLU)
+    .add_enum("softsign", activation::kSoftSign)
     .describe("Activation function to be applied.");
   }
 
@@ -140,6 +141,10 @@ void ActivationComputeImpl(const ActivationParam &param, 
const OpContext &ctx,
         ActivationForward<xpu, mshadow_op::softrelu, 
mshadow_op::softrelu_grad, DType>(
             ctx, input, req, output);
         break;
+      case activation::kSoftSign:
+        ActivationForward<xpu, mshadow_op::softsign, 
mshadow_op::softsign_grad, DType>(
+                ctx, input, req, output);
+            break;
       default:
         LOG(FATAL) << "unknown activation type";
     }
@@ -168,6 +173,10 @@ void ActivationGradComputeImpl(const ActivationParam 
&param, const OpContext &ct
         ActivationBackward<xpu, mshadow_op::softrelu, 
mshadow_op::softrelu_grad, DType>(
             ctx, out_grad, out_data, req, output);
         break;
+      case activation::kSoftSign:
+        ActivationBackward<xpu, mshadow_op::softsign, 
mshadow_op::softsign_grad, DType>(
+                ctx, out_grad, out_data, req, output);
+            break;
       default:
         LOG(FATAL) << "unknown activation type";
     }
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 7b79a34..8905932 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -149,6 +149,7 @@ The following activation functions are supported:
 - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}`
 - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + 
exp(-x)}`
 - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))`
+- `softsign`: :math:`y = \frac{x}{1 + abs(x)}`
 
 )code" ADD_FILELINE)
 .set_attr_parser(ParamParser<ActivationParam>)
diff --git a/src/operator/operator_tune.cc b/src/operator/operator_tune.cc
index e0f8306..c13f1ac 100644
--- a/src/operator/operator_tune.cc
+++ b/src/operator/operator_tune.cc
@@ -213,6 +213,8 @@ 
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::reciprocal);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::reciprocal_grad);  // 
NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::sigmoid);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::sigmoid_grad);  // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::softsign);  // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::softsign_grad);  // 
NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::relu);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::relu_grad);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::tanh);  // NOLINT()
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc 
b/src/operator/tensor/elemwise_unary_op_basic.cc
index 95fd3bc..acd8f7b 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -106,6 +106,23 @@ The storage type of ``sigmoid`` output is always dense
 
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(_backward_sigmoid,
                                                
unary_bwd<mshadow_op::sigmoid_grad>);
+// softsign
+MXNET_OPERATOR_REGISTER_UNARY(softsign)
+MXNET_ADD_SPARSE_OP_ALIAS(softsign)
+.describe(R"code(Computes softsign of x element-wise.
+
+.. math::
+   y = x / (1 + abs(x))
+
+The storage type of ``softsign`` output is always dense
+
+)code" ADD_FILELINE)
+  .set_attr<FCompute>("FCompute<cpu>", UnaryOp::Compute<cpu, 
mshadow_op::softsign>)
+  .set_attr<nnvm::FGradient>("FGradient", 
ElemwiseGradUseIn{"_backward_softsign"});
+
+MXNET_OPERATOR_REGISTER_BINARY(_backward_softsign)
+.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu,
+  unary_bwd<mshadow_op::softsign_grad> >);
 
 // copy
 static void CopyEx(const nnvm::NodeAttrs& attrs,
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cu 
b/src/operator/tensor/elemwise_unary_op_basic.cu
index 41eef90..8dfa9af 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cu
+++ b/src/operator/tensor/elemwise_unary_op_basic.cu
@@ -40,6 +40,14 @@ NNVM_REGISTER_OP(_backward_sigmoid)
 .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
   gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
 
+// softsign
+NNVM_REGISTER_OP(softsign)
+.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, 
mshadow_op::softsign>);
+
+NNVM_REGISTER_OP(_backward_softsign)
+.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
+  gpu, unary_bwd<mshadow_op::softsign_grad>>);
+
 // copy
 NNVM_REGISTER_OP(_copy)
 .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index 63b7c66..7889e08 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -486,6 +486,21 @@ def test_sigmoid():
     check_symbolic_forward(y, [xa], [ya])
     check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
 
+@with_seed()
+def test_softsign():
+    def fsoftsign(a):
+        return np.divide(a, (1.0 + np.abs(a)))
+    def fsoftsign_grad(a):
+        return np.divide(1.0, np.square((1.0 + np.abs(a))))
+    shape = (3, 4)
+    x = mx.symbol.Variable("x")
+    y = mx.sym.softsign(x)
+    xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
+    ya = fsoftsign(xa)
+    ya_grad = fsoftsign_grad(xa)
+    check_numeric_gradient(y, [xa], numeric_eps=1E-3)
+    check_symbolic_forward(y, [xa], [ya])
+    check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
 
 @with_seed()
 def test_binary_logic():
@@ -4814,6 +4829,10 @@ def test_unary_math_operators():
                     lambda x: 1. / (np.exp(-x) + 1.),
                     lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
                     -3.0, 3.0],
+        'softsign': [lambda x: mx.sym.softsign(x),
+                    lambda x: x / (1. + np.abs(x)),
+                    lambda x: 1. / np.square(1. + np.abs(x)),
+                    -3.0, 3.0],
         'sin': [lambda x: mx.sym.sin(x),
                 lambda x: np.sin(x),
                 lambda x: np.cos(x),

-- 
To stop receiving notification emails like this one, please contact
nsw...@apache.org.

Reply via email to