This is an automated email from the ASF dual-hosted git repository.

haibin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new bc570db  [OP] Add a new arange_like operator to contrib (#15400)
bc570db is described below

commit bc570dbd948f23873388bbe9fb84de50679dbef9
Author: Tao Lv <[email protected]>
AuthorDate: Sat Jul 6 14:14:04 2019 +0800

    [OP] Add a new arange_like operator to contrib (#15400)
    
    * add contrib.arange_like operator
    
    * add parameter to template
    
    * fix lint complains
    
    * inum -> num_in
    
    * add unit tests
---
 src/operator/tensor/init_op.cc         | 38 +++++++++++++++++++-
 src/operator/tensor/init_op.cu         |  5 ++-
 src/operator/tensor/init_op.h          | 64 +++++++++++++++++++++++++++++++---
 tests/python/unittest/test_operator.py | 22 ++++++++++++
 4 files changed, 123 insertions(+), 6 deletions(-)

diff --git a/src/operator/tensor/init_op.cc b/src/operator/tensor/init_op.cc
index a58498b..0cbdaa4 100644
--- a/src/operator/tensor/init_op.cc
+++ b/src/operator/tensor/init_op.cc
@@ -32,6 +32,7 @@ DMLC_REGISTER_PARAMETER(InitOpParam);
 DMLC_REGISTER_PARAMETER(InitOpWithScalarParam);
 DMLC_REGISTER_PARAMETER(InitOpWithoutDTypeParam);
 DMLC_REGISTER_PARAMETER(RangeParam);
+DMLC_REGISTER_PARAMETER(RangeLikeParam);
 DMLC_REGISTER_PARAMETER(EyeParam);
 DMLC_REGISTER_PARAMETER(LinspaceParam);
 
@@ -97,9 +98,44 @@ NNVM_REGISTER_OP(_arange)
 .set_attr_parser(RangeParamParser)
 .set_attr<mxnet::FInferShape>("FInferShape", RangeShape)
 .set_attr<nnvm::FInferType>("FInferType", InitType<RangeParam>)
-.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu>)
+.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu, RangeParam>)
 .add_arguments(RangeParam::__FIELDS__());
 
+NNVM_REGISTER_OP(_contrib_arange_like)
+.describe(R"code(Return an array with evenly spaced values. If axis is not 
given, the output will 
+have the same shape as the input array. Otherwise, the output will be a 1-D 
array with size of 
+the specified axis in input shape.
+
+Examples::
+
+  x = [[0.14883883 0.7772398  0.94865847 0.7225052 ]
+       [0.23729339 0.6112595  0.66538996 0.5132841 ]
+       [0.30822644 0.9912457  0.15502319 0.7043658 ]]
+       <NDArray 3x4 @cpu(0)>
+
+  out = mx.nd.contrib.arange_like(x, start=0)
+
+    [[ 0.  1.  2.  3.]
+     [ 4.  5.  6.  7.]
+     [ 8.  9. 10. 11.]]
+     <NDArray 3x4 @cpu(0)>
+
+  out = mx.nd.contrib.arange_like(x, start=0, axis=-1)
+
+    [0. 1. 2. 3.]
+    <NDArray 4 @cpu(0)>
+)code")
+.set_num_inputs(1)
+.set_num_outputs(1)
+.set_attr_parser(ParamParser<RangeLikeParam>)
+.set_attr<mxnet::FInferShape>("FInferShape", RangeLikeShape)
+.set_attr<nnvm::FInferType>("FInferType", InitType<RangeLikeParam, 1>)
+.set_attr<nnvm::FIgnoreInputs>("FIgnoreInputs",
+    [](const NodeAttrs& attrs) { return std::vector<uint32_t>(1, 0); })
+.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu, RangeLikeParam>)
+.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes)
+.add_argument("data", "NDArray-or-Symbol", "The input");
+
 NNVM_REGISTER_OP(_linspace)
 .describe("Return evenly spaced numbers over a specified interval. Similar to 
Numpy")
 .set_num_inputs(0)
diff --git a/src/operator/tensor/init_op.cu b/src/operator/tensor/init_op.cu
index 5829ff2..ee6963b 100644
--- a/src/operator/tensor/init_op.cu
+++ b/src/operator/tensor/init_op.cu
@@ -62,7 +62,10 @@ NNVM_REGISTER_OP(_full)
 .set_attr<FCompute>("FCompute<gpu>", InitFillWithScalarCompute<gpu>);
 
 NNVM_REGISTER_OP(_arange)
-.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu>);
+.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu, RangeParam>);
+
+NNVM_REGISTER_OP(_contrib_arange_like)
+.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu, RangeLikeParam>);
 
 NNVM_REGISTER_OP(_linspace)
 .set_attr<FCompute>("FCompute<gpu>", LinspaceCompute<gpu>);
diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h
index c7a1054..51c8436 100644
--- a/src/operator/tensor/init_op.h
+++ b/src/operator/tensor/init_op.h
@@ -174,6 +174,40 @@ struct RangeParam : public dmlc::Parameter<RangeParam> {
   }
 };
 
+struct RangeLikeParam : public dmlc::Parameter<RangeLikeParam> {
+  double start;
+  double step;
+  int repeat;
+  std::string ctx;
+  int dtype;
+  dmlc::optional<int> axis;
+
+  DMLC_DECLARE_PARAMETER(RangeLikeParam) {
+    DMLC_DECLARE_FIELD(start)
+    .set_default(0)
+    .describe("Start of interval. The interval includes this value. The 
default start value is 0.");
+    DMLC_DECLARE_FIELD(step)
+    .set_default(1)
+    .describe("Spacing between values.");
+    DMLC_DECLARE_FIELD(repeat)
+    .set_default(1)
+    .describe("The repeating time of all elements."
+              " E.g repeat=3, the element a will be repeated three times --> 
a, a, a.");
+    DMLC_DECLARE_FIELD(ctx)
+    .set_default("")
+    .describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
+              "Only used for imperative calls.");
+    DMLC_DECLARE_FIELD(dtype).set_default(mshadow::kFloat32)
+    MXNET_ADD_ALL_TYPES
+    .describe("Target data type.");
+    DMLC_DECLARE_FIELD(axis)
+    .set_default(dmlc::optional<int>())
+    .describe("Arange elements according to the size of a certain axis of 
input array."
+              " The negative numbers are interpreted counting from the 
backward."
+              " If not provided, will arange elements according to the input 
shape.");
+  }
+};
+
 /*! \brief Initialize and fill output with an arbitrary value */
 struct InitOpWithScalarParam : dmlc::Parameter<InitOpWithScalarParam> {
   mxnet::TShape shape;
@@ -250,12 +284,12 @@ inline bool InitShape(const nnvm::NodeAttrs& attrs,
   return shape_is_known(out_attrs->at(0));
 }
 
-template<typename ParamType>
+template<typename ParamType, int num_in = 0U>
 inline bool InitType(const nnvm::NodeAttrs& attrs,
                        std::vector<int> *in_attrs,
                        std::vector<int> *out_attrs) {
   const ParamType& param = nnvm::get<ParamType>(attrs.parsed);
-  CHECK_EQ(in_attrs->size(), 0U);
+  CHECK_EQ(in_attrs->size(), num_in);
   CHECK_EQ(out_attrs->size(), 1U);
   TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype);
   return true;
@@ -493,7 +527,7 @@ struct range_fwd {
   }
 };
 
-template<typename xpu>
+template<typename xpu, typename ParamType>
 void RangeCompute(const nnvm::NodeAttrs& attrs,
                   const OpContext& ctx,
                   const std::vector<TBlob>& inputs,
@@ -501,7 +535,7 @@ void RangeCompute(const nnvm::NodeAttrs& attrs,
                   const std::vector<TBlob>& outputs) {
   using namespace mxnet_op;
   Stream<xpu> *s = ctx.get_stream<xpu>();
-  const RangeParam& param = nnvm::get<RangeParam>(attrs.parsed);
+  const ParamType& param = nnvm::get<ParamType>(attrs.parsed);
   MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
       // Force unsigned params to take two's complement form on ARM to ensure 
consistency with x86
       // results.  Casting negative floats to unsigned types is undefined in 
the CPP standard.
@@ -588,6 +622,28 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs,
   return true;
 }
 
+inline bool RangeLikeShape(const nnvm::NodeAttrs& attrs,
+                           mxnet::ShapeVector *in_attrs,
+                           mxnet::ShapeVector *out_attrs) {
+  const RangeLikeParam& param = nnvm::get<RangeLikeParam>(attrs.parsed);
+  CHECK_EQ(in_attrs->size(), 1U);
+  CHECK_EQ(out_attrs->size(), 1U);
+  int real_axis = -1;
+  if (param.axis.has_value()) {
+    real_axis = param.axis.value() < 0 ?
+        (param.axis.value() + (*in_attrs)[0].ndim()) : param.axis.value();
+    CHECK(real_axis >=0 && real_axis < (*in_attrs)[0].ndim())
+        << "cannot handle param.axis " << param.axis.value() << ".";
+  }
+  if (real_axis == -1) {
+    SHAPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
+  } else {
+    const index_t out_size = (*in_attrs)[0][real_axis];
+    SHAPE_ASSIGN_CHECK(*out_attrs, 0, 
mxnet::TShape({static_cast<nnvm::dim_t>(out_size)}));
+  }
+  return true;
+}
+
 }  // namespace op
 }  // namespace mxnet
 
diff --git a/tests/python/unittest/test_operator.py 
b/tests/python/unittest/test_operator.py
index ca9ecc4..b550139 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -4042,11 +4042,33 @@ def test_init():
         exe.forward()
         assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
 
+    def test_arange_like():
+        shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
+        axis_list = [0, -1]
+        for sh in shape_list:
+            for axis in axis_list:
+                val = np.random.rand(*sh)
+                data = mx.nd.array(val)
+                nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
+                np_out = np.arange(start=0, stop=sh[axis])
+                assert_almost_equal(nd_out.asnumpy(), np_out)
+
+    def test_arange_like_without_axis():
+        shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
+        for sh in shape_list:
+            val = np.random.rand(*sh)
+            data = mx.nd.array(val)
+            nd_out = mx.nd.contrib.arange_like(data, start=0)
+            np_out = np.arange(start=0, stop=val.size)
+            assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
+
     test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
     test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
     test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
     test_arange()
     test_arange_inferstop()
+    test_arange_like()
+    test_arange_like_without_axis()
 
 
 @with_seed()

Reply via email to