This is an automated email from the ASF dual-hosted git repository.

taolv pushed a commit to branch v1.7.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.7.x by this push:
     new 802e5af  [v1.7.x] Backport PRs of numpy features (#18653)
802e5af is described below

commit 802e5af5d0133af88c3c166f6e4fe99508eff42b
Author: Xingjian Shi <xsh...@connect.ust.hk>
AuthorDate: Wed Jul 1 22:41:24 2020 -0700

    [v1.7.x] Backport PRs of numpy features (#18653)
    
    * add zero grad for npi_unique (#18080)
    
    * fix np.clip scalar input case (#17788)
    
    * fix true_divide (#18393)
    
    Co-authored-by: Hao Jin <hjjn.a...@gmail.com>
    Co-authored-by: Xi Wang <xid...@gmail.com>
---
 python/mxnet/numpy/multiarray.py        |  5 +++++
 src/operator/numpy/np_true_divide-inl.h | 16 ++++++++--------
 src/operator/numpy/np_unique_op.cc      |  1 +
 tests/python/unittest/test_numpy_op.py  | 12 +++++++++++-
 4 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py
index fceaaf3..9a803d4 100644
--- a/python/mxnet/numpy/multiarray.py
+++ b/python/mxnet/numpy/multiarray.py
@@ -6174,6 +6174,11 @@ def clip(a, a_min, a_max, out=None):
     >>> np.clip(a, 3, 6, out=a)
     array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
     """
+    from numbers import Number
+    if isinstance(a, Number):
+        # In case input is a scalar, the computation would fall back to native 
numpy.
+        # The value returned would be a python scalar.
+        return _np.clip(a, a_min, a_max, out=None)
     return _mx_nd_np.clip(a, a_min, a_max, out=out)
 
 
diff --git a/src/operator/numpy/np_true_divide-inl.h 
b/src/operator/numpy/np_true_divide-inl.h
index 0bc60a0..be2ce51 100644
--- a/src/operator/numpy/np_true_divide-inl.h
+++ b/src/operator/numpy/np_true_divide-inl.h
@@ -121,7 +121,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs &attrs,
     // Case when types of the 2 input tensors are different
     if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
       // both lhs and rhs are float types, output type is the more precise one
-      LOG(ERROR) << "not implemented yet...";
+      LOG(FATAL) << "not implemented yet...";
     } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
       // one is float type, the other is integer type, the output type should 
be the same as float
       CHECK_EQ(out.type_flag_,
@@ -150,14 +150,14 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs 
&attrs,
       }
     } else {
       // lhs is integer type, rhs is integer type, output type should be float
-      LOG(ERROR) << "not implemented yet...";
+      LOG(FATAL) << "not implemented yet...";
     }
 #else
     // Windows case: using temp space for casting the type
     // Case when types of the 2 input tensors are different
     if (common::is_float(lhs.type_flag_) && common::is_float(rhs.type_flag_)) {
       // both lhs and rhs are float types, output type is the more precise one
-      LOG(ERROR) << "not implemented yet...";
+      LOG(FATAL) << "not implemented yet...";
     } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
       // lhs is float type, rhs is integer type, the output type should be the 
same as lhs
       CHECK_EQ(out.type_flag_,
@@ -187,7 +187,7 @@ void TrueDivideElemwiseCompute(const nnvm::NodeAttrs &attrs,
       }
     } else {
       // lhs is integer type, rhs is integer type, output type should be float
-      LOG(ERROR) << "not implemented yet...";
+      LOG(FATAL) << "not implemented yet...";
     }
 #endif
   }
@@ -241,7 +241,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
       } else {
         if (common::is_float(lhs.type_flag_) && 
common::is_float(rhs.type_flag_)) {
           // lhs and rhs have different float types, the output is the more 
precise one
-          LOG(ERROR) << "not implemented yet...";
+          LOG(FATAL) << "not implemented yet...";
         } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
           // one of lhs and rhs is float, the output is the same type as the 
float one
           if (common::is_float(lhs.type_flag_)) {
@@ -269,7 +269,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
           }
         } else {
           // lhs and rhs have different integer types, the output is float type
-          LOG(ERROR) << "not implemented yet...";
+          LOG(FATAL) << "not implemented yet...";
         }
       }
     });
@@ -302,7 +302,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
     } else {
       if (common::is_float(lhs.type_flag_) && 
common::is_float(rhs.type_flag_)) {
         // lhs and rhs have different float types, the output is the more 
precise one
-        LOG(ERROR) << "not implemented yet...";
+        LOG(FATAL) << "not implemented yet...";
       } else if (common::is_float(lhs.type_flag_) || 
common::is_float(rhs.type_flag_)) {
         // one of lhs and rhs is float, the output is the same type as the 
float one
         TBlob temp_tblob;
@@ -333,7 +333,7 @@ void TrueDivideBroadcastCompute(const nnvm::NodeAttrs& 
attrs,
         }
       } else {
         // lhs and rhs have different integer types, the output is float type
-        LOG(ERROR) << "not implemented yet...";
+        LOG(FATAL) << "not implemented yet...";
       }
     }
 #endif
diff --git a/src/operator/numpy/np_unique_op.cc 
b/src/operator/numpy/np_unique_op.cc
index 2f57733..7a299cd 100644
--- a/src/operator/numpy/np_unique_op.cc
+++ b/src/operator/numpy/np_unique_op.cc
@@ -375,6 +375,7 @@ NNVM_REGISTER_OP(_npi_unique)
   [](const NodeAttrs& attrs) {
     return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
   })
+.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes)
 .add_argument("data", "NDArray-or-Symbol", "The input array")
 .add_arguments(NumpyUniqueParam::__FIELDS__());
 
diff --git a/tests/python/unittest/test_numpy_op.py 
b/tests/python/unittest/test_numpy_op.py
index 15e9bd4..0f35dec 100644
--- a/tests/python/unittest/test_numpy_op.py
+++ b/tests/python/unittest/test_numpy_op.py
@@ -3644,6 +3644,16 @@ def test_np_clip():
 
         def hybrid_forward(self, F, x):
             return x.clip(self._a_min, self._a_max)
+    
+    # Test scalar case
+    for _, a_min, a_max, throw_exception in workloads:
+        a = _np.random.uniform() # A scalar
+        if throw_exception:
+            # No need to test the exception case here.
+            continue
+        mx_ret = np.clip(a, a_min, a_max)
+        np_ret = _np.clip(a, a_min, a_max)
+        assert_almost_equal(mx_ret, np_ret, atol=1e-4, rtol=1e-3, 
use_broadcast=False)
 
     for shape, a_min, a_max, throw_exception in workloads:
         for dtype in dtypes:
@@ -6549,7 +6559,7 @@ def test_np_unique():
         ((5, 3, 4), True, True, True, 1),
     ]
     for dtype in ['float32', 'float64', 'int8', 'uint8', 'int32', 'int64']:
-        for hybridize in [False]:
+        for hybridize in [False, True]:
             for config in configs:
                 test_unique = TestUnique(*config[1:])
                 if hybridize:

Reply via email to