piiswrong closed pull request #10639: [MXNET-350] Logical NOT Operator
URL: https://github.com/apache/incubator-mxnet/pull/10639
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h
index 5606c64369a..2f5dd97d7b6 100644
--- a/src/operator/mshadow_op.h
+++ b/src/operator/mshadow_op.h
@@ -303,6 +303,8 @@ MXNET_BINARY_MATH_OP(maximum, a > b ? a : b);
 /*! \brief used for generate element of minimum */
 MXNET_BINARY_MATH_OP_NC(minimum, a < b ? a : b);
 
+MXNET_UNARY_MATH_OP_NC(nt, a != DType(0) ? DType(0) : DType(1));
+
 MXNET_BINARY_MATH_OP_NC(ge, a >= b ? DType(1) : DType(0));
 
 MXNET_BINARY_MATH_OP_NC(gt, a > b ? DType(1) : DType(0));
diff --git a/src/operator/operator_tune.cc b/src/operator/operator_tune.cc
index c48d83a3be8..47db78bc188 100644
--- a/src/operator/operator_tune.cc
+++ b/src/operator/operator_tune.cc
@@ -282,6 +282,8 @@ 
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::degrees);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::degrees_grad);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::radians);  // NOLINT()
 IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::radians_grad);  // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::nt);  // NOLINT()
+IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::nt);  // NOLINT()
 IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::clip);  // NOLINT()
 IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::clip);  // NOLINT()
 IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::plus);  // NOLINT()
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc 
b/src/operator/tensor/elemwise_unary_op_basic.cc
index e711148898f..f02f884188f 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cc
+++ b/src/operator/tensor/elemwise_unary_op_basic.cc
@@ -828,5 +828,15 @@ The storage type of ``gammaln`` output is always dense
 MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_gammaln,
                                                   
unary_bwd<mshadow_op::gammaln_grad>);
 
+MXNET_OPERATOR_REGISTER_UNARY(logical_not)
+.describe(R"code(Returns the result of logical NOT (!) function
+
+Example:
+  logical_not([-2., 0., 1.]) = [0., 1., 0.]
+
+)code")
+.set_attr<FCompute>("FCompute<cpu>", UnaryOp::Compute<cpu, mshadow_op::nt>)
+.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);
+
 }  // namespace op
 }  // namespace mxnet
diff --git a/src/operator/tensor/elemwise_unary_op_basic.cu 
b/src/operator/tensor/elemwise_unary_op_basic.cu
index 8dfa9af74ce..30916522255 100644
--- a/src/operator/tensor/elemwise_unary_op_basic.cu
+++ b/src/operator/tensor/elemwise_unary_op_basic.cu
@@ -246,5 +246,9 @@ NNVM_REGISTER_OP(_backward_gammaln)
 .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
   gpu, unary_bwd<mshadow_op::gammaln_grad> >);
 
+// logical not
+NNVM_REGISTER_OP(logical_not)
+.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
+
 }  // namespace op
 }  // namespace mxnet
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index d0089e6f3dd..a581e32762e 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -657,6 +657,22 @@ def _inner_test(forward_gt, logic_sym, x_shape, y_shape, 
test_scalar=True):
                 x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
 
 
+@with_seed()
+def test_unary_logic():
+    def reference(a, dtype):
+        return np.logical_not(a).astype(dtype)
+    shape = (3, 4)
+    xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
+    mx_xa = mx.nd.array(xa)
+    mx_out = mx.nd.logical_not(mx_xa)
+    assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
+    x = mx.sym.Variable('x')
+    y = mx.sym.logical_not(data=x)
+    exe = y.simple_bind(ctx=default_context(), x=shape)
+    sym_out = exe.forward(is_train=True, x=mx_xa)[0]
+    assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
+
+
 @with_seed()
 def test_embedding():
     in_dim = 10
@@ -2095,14 +2111,14 @@ def test_dot():
                     c = mx.sym.dot(a, b)
                     exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
                     outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
-                    assert_almost_equal(outputs[0].asnumpy(), c_npy, 
+                    assert_almost_equal(outputs[0].asnumpy(), c_npy,
                                         rtol=1e-2 if data_type == 'float16' 
else 1e-3,
                                         atol=1e-2 if data_type == 'float16' 
else 1e-3)
                     exe.backward(out_grads=[mx.nd.array(ograd_npy, 
mx.cpu()).astype(data_type)])
-                    assert_almost_equal(exe.grad_dict['a'].asnumpy(), 
agrad_npy, 
+                    assert_almost_equal(exe.grad_dict['a'].asnumpy(), 
agrad_npy,
                                         rtol=1e-2 if data_type == 'float16' 
else 1e-3,
                                         atol=1e-2 if data_type == 'float16' 
else 1e-3)
-                    assert_almost_equal(exe.grad_dict['b'].asnumpy(), 
bgrad_npy, 
+                    assert_almost_equal(exe.grad_dict['b'].asnumpy(), 
bgrad_npy,
                                         rtol=1e-2 if data_type == 'float16' 
else 1e-3,
                                         atol=1e-2 if data_type == 'float16' 
else 1e-3)
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to