This is an automated email from the ASF dual-hosted git repository.
haoj pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new e3d7866 [Numpy] FFI: random.choice, take and clip (#17854)
e3d7866 is described below
commit e3d7866e6854a5c11ab2b2c8bfb63de66f79e132
Author: AntiZpvoh <[email protected]>
AuthorDate: Tue Apr 14 06:35:32 2020 +0800
[Numpy] FFI: random.choice, take and clip (#17854)
* change the header file of np.random.choice
* add np_choice_op.cc file
* add including header file
* implement the basic function of random.choice
* try to use take op in backend
* try to use take op in backend
* add take invoking function
* fix some syntax problems
* fix some problems
* complete numpy.random.choice ffi
* first commit of ffi indexing_op.cc
* add random.choice ffi benchmark
* complete take ffi
* change the implementation of random.choice
* add take op benchmark
* complete clip op ffi and fix a problem
* add clip op benchmark
* fix some sanity problems
* add space before ( and fix reimport
* fix a typo
* remove dead code and remove new operator
Co-authored-by: Ubuntu <[email protected]>
---
benchmark/python/ffi/benchmark_ffi.py | 3 +
python/mxnet/ndarray/numpy/_op.py | 10 +--
python/mxnet/ndarray/numpy/random.py | 22 ++----
.../random/{np_laplace_op.cc => np_choice_op.cc} | 57 +++++++---------
src/api/operator/numpy/random/np_laplace_op.cc | 2 +-
src/api/operator/tensor/indexing_op.cc | 78 ++++++++++++++++++++++
src/api/operator/tensor/matrix_op.cc | 71 ++++++++++++++++++++
src/operator/numpy/random/np_choice_op.h | 11 +++
src/operator/tensor/indexing_op.h | 21 ++++++
src/operator/tensor/matrix_op-inl.h | 8 +++
10 files changed, 228 insertions(+), 55 deletions(-)
diff --git a/benchmark/python/ffi/benchmark_ffi.py
b/benchmark/python/ffi/benchmark_ffi.py
index 6770102..01534f1 100644
--- a/benchmark/python/ffi/benchmark_ffi.py
+++ b/benchmark/python/ffi/benchmark_ffi.py
@@ -92,6 +92,9 @@ def prepare_workloads():
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
+ OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
+ OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
+ OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
diff --git a/python/mxnet/ndarray/numpy/_op.py
b/python/mxnet/ndarray/numpy/_op.py
index 45a71b2..e88796c 100644
--- a/python/mxnet/ndarray/numpy/_op.py
+++ b/python/mxnet/ndarray/numpy/_op.py
@@ -690,9 +690,9 @@ def take(a, indices, axis=None, mode='raise', out=None):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
- return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
+ return _api_internal.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
- return _npi.take(a, indices, axis, mode, out)
+ return _api_internal.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@@ -4551,11 +4551,7 @@ def clip(a, a_min, a_max, out=None):
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
- if a_min is None:
- a_min = float('-inf')
- if a_max is None:
- a_max = float('inf')
- return _npi.clip(a, a_min, a_max, out=out)
+ return _api_internal.clip(a, a_min, a_max, out)
@set_module('mxnet.ndarray.numpy')
diff --git a/python/mxnet/ndarray/numpy/random.py
b/python/mxnet/ndarray/numpy/random.py
index 93bc0e8..8449852 100644
--- a/python/mxnet/ndarray/numpy/random.py
+++ b/python/mxnet/ndarray/numpy/random.py
@@ -535,24 +535,16 @@ def choice(a, size=None, replace=True, p=None, ctx=None,
out=None):
"""
from ...numpy import ndarray as np_ndarray
if ctx is None:
- ctx = current_context()
+ ctx = str(current_context())
+ else:
+ ctx = str(ctx)
if size == ():
size = None
if isinstance(a, np_ndarray):
- ctx = None
- if p is None:
- indices = _npi.choice(a, a=None, size=size,
- replace=replace, ctx=ctx, weighted=False)
- return _npi.take(a, indices)
- else:
- indices = _npi.choice(a, p, a=None, size=size,
- replace=replace, ctx=ctx, weighted=True)
- return _npi.take(a, indices)
+ indices = _api_internal.choice(a, size, replace, p, ctx, out)
+ return _api_internal.take(a, indices, 0, 'raise', out)
else:
- if p is None:
- return _npi.choice(a=a, size=size, replace=replace, ctx=ctx,
weighted=False, out=out)
- else:
- return _npi.choice(p, a=a, size=size, replace=replace, ctx=ctx,
weighted=True, out=out)
+ return _api_internal.choice(a, size, replace, p, ctx, out)
def exponential(scale=1.0, size=None, ctx=None, out=None):
@@ -834,7 +826,7 @@ def beta(a, b, size=None, dtype=None, ctx=None):
# use fp64 to prevent precision loss
X = gamma(a, 1, size=size, dtype='float64', ctx=ctx)
Y = gamma(b, 1, size=size, dtype='float64', ctx=ctx)
- out = X/(X + Y)
+ out = X / (X + Y)
return out.astype(dtype)
diff --git a/src/api/operator/numpy/random/np_laplace_op.cc
b/src/api/operator/numpy/random/np_choice_op.cc
similarity index 62%
copy from src/api/operator/numpy/random/np_laplace_op.cc
copy to src/api/operator/numpy/random/np_choice_op.cc
index 40e7901..fe7b54d 100644
--- a/src/api/operator/numpy/random/np_laplace_op.cc
+++ b/src/api/operator/numpy/random/np_choice_op.cc
@@ -18,70 +18,63 @@
*/
/*!
- * \file np_laplace_op.cc
- * \brief Implementation of the API of functions in
src/operator/numpy/np_laplace_op.cc
+ * \file np_choice_op.cc
+ * \brief Implementation of the API of functions in
src/operator/numpy/np_choice_op.cc
*/
#include <mxnet/api_registry.h>
#include <mxnet/runtime/packed_func.h>
#include "../../utils.h"
-#include "../../../../operator/numpy/random/np_laplace_op.h"
+#include "../../../../operator/numpy/random/np_choice_op.h"
namespace mxnet {
-MXNET_REGISTER_API("_npi.laplace")
+MXNET_REGISTER_API("_npi.choice")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
- const nnvm::Op* op = Op::Get("_npi_laplace");
+ const nnvm::Op* op = Op::Get("_npi_choice");
nnvm::NodeAttrs attrs;
- op::NumpyLaplaceParam param;
+ op::NumpyChoiceParam param;
- NDArray** inputs = new NDArray*[2]();
+ NDArray* inputs[2];
int num_inputs = 0;
- if (args[0].type_code() == kNull) {
- param.loc = dmlc::nullopt;
+ if (args[0].type_code() == kDLInt) {
+ param.a = args[0].operator int();
} else if (args[0].type_code() == kNDArrayHandle) {
- param.loc = dmlc::nullopt;
- inputs[num_inputs] = args[0].operator mxnet::NDArray *();
+ param.a = dmlc::nullopt;
+ inputs[num_inputs] = args[0].operator mxnet::NDArray*();
num_inputs++;
- } else {
- param.loc = args[0].operator double(); // convert arg to T
}
if (args[1].type_code() == kNull) {
- param.scale = dmlc::nullopt;
- } else if (args[1].type_code() == kNDArrayHandle) {
- param.scale = dmlc::nullopt;
- inputs[num_inputs] = args[1].operator mxnet::NDArray *();
- num_inputs++;
+ param.size = dmlc::nullopt;
} else {
- param.scale = args[1].operator double(); // convert arg to T
+ if (args[1].type_code() == kDLInt) {
+ param.size = mxnet::Tuple<int64_t>(1, args[1].operator int64_t());
+ } else {
+ param.size = mxnet::Tuple<int64_t>(args[1].operator ObjectRef());
+ }
}
if (args[2].type_code() == kNull) {
- param.size = dmlc::nullopt;
+ param.replace = true;
} else {
- if (args[2].type_code() == kDLInt) {
- param.size = mxnet::Tuple<int>(1, args[2].operator int64_t());
- } else {
- param.size = mxnet::Tuple<int>(args[2].operator ObjectRef());
- }
+ param.replace = args[2].operator bool();
}
if (args[3].type_code() == kNull) {
- param.dtype = mshadow::kFloat32;
- } else {
- param.dtype = String2MXNetTypeWithBool(args[3].operator std::string());
+ param.weighted = false;
+ } else if (args[0].type_code() == kNDArrayHandle) {
+ param.weighted = true;
+ inputs[num_inputs] = args[3].operator mxnet::NDArray*();
+ num_inputs++;
}
+
attrs.parsed = std::move(param);
attrs.op = op;
- SetAttrDict<op::NumpyLaplaceParam>(&attrs);
if (args[4].type_code() != kNull) {
attrs.dict["ctx"] = args[4].operator std::string();
}
-
- inputs = inputs == nullptr ? nullptr : inputs;
-
NDArray* out = args[5].operator mxnet::NDArray*();
NDArray** outputs = out == nullptr ? nullptr : &out;
int num_outputs = out != nullptr;
diff --git a/src/api/operator/numpy/random/np_laplace_op.cc
b/src/api/operator/numpy/random/np_laplace_op.cc
index 40e7901..57f770b 100644
--- a/src/api/operator/numpy/random/np_laplace_op.cc
+++ b/src/api/operator/numpy/random/np_laplace_op.cc
@@ -19,7 +19,7 @@
/*!
* \file np_laplace_op.cc
- * \brief Implementation of the API of functions in
src/operator/numpy/np_laplace_op.cc
+ * \brief Implementation of the API of functions in
src/operator/numpy/random/np_laplace_op.cc
*/
#include <mxnet/api_registry.h>
#include <mxnet/runtime/packed_func.h>
diff --git a/src/api/operator/tensor/indexing_op.cc
b/src/api/operator/tensor/indexing_op.cc
new file mode 100644
index 0000000..df19401
--- /dev/null
+++ b/src/api/operator/tensor/indexing_op.cc
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file indexing_op.cc
+ * \brief Implementation of the API of functions in
src/operator/tensor/indexing_op.cc
+ */
+#include <mxnet/api_registry.h>
+#include <mxnet/runtime/packed_func.h>
+#include "../utils.h"
+#include "../../../operator/tensor/indexing_op.h"
+
+namespace mxnet {
+
+MXNET_REGISTER_API("_npi.take")
+.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
+ using namespace runtime;
+ const nnvm::Op* op = Op::Get("_npi_take");
+ nnvm::NodeAttrs attrs;
+ op::TakeParam param;
+ NDArray* inputs[2];
+
+ if (args[0].type_code() != kNull) {
+ inputs[0] = args[0].operator mxnet::NDArray *();
+ }
+
+ if (args[1].type_code() != kNull) {
+ inputs[1] = args[1].operator mxnet::NDArray *();
+ }
+
+ if (args[2].type_code() == kDLInt) {
+ param.axis = args[2].operator int();
+ }
+
+ if (args[3].type_code() != kNull) {
+ std::string mode = args[3].operator std::string();
+ if (mode == "raise") {
+ param.mode = op::take_::kRaise;
+ } else if (mode == "clip") {
+ param.mode = op::take_::kClip;
+ } else if (mode == "wrap") {
+ param.mode = op::take_::kWrap;
+ }
+ }
+
+ attrs.parsed = param;
+ attrs.op = op;
+ SetAttrDict<op::TakeParam>(&attrs);
+
+ NDArray* out = args[4].operator mxnet::NDArray*();
+ NDArray** outputs = out == nullptr ? nullptr : &out;
+ // set the number of outputs provided by the `out` arugment
+ int num_outputs = out != nullptr;
+ auto ndoutputs = Invoke(op, &attrs, 2, inputs, &num_outputs, outputs);
+ if (out) {
+ *ret = PythonArg(4);
+ } else {
+ *ret = ndoutputs[0];
+ }
+});
+
+} // namespace mxnet
diff --git a/src/api/operator/tensor/matrix_op.cc
b/src/api/operator/tensor/matrix_op.cc
new file mode 100644
index 0000000..ed91b09
--- /dev/null
+++ b/src/api/operator/tensor/matrix_op.cc
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file matrix_op.cc
+ * \brief Implementation of the API of functions in
src/operator/tensor/matrix_op.cc
+ */
+#include <mxnet/api_registry.h>
+#include <mxnet/runtime/packed_func.h>
+#include "../utils.h"
+#include "../../../operator/tensor/matrix_op-inl.h"
+
+namespace mxnet {
+
+MXNET_REGISTER_API("_npi.clip")
+.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
+ using namespace runtime;
+ const nnvm::Op* op = Op::Get("_npi_clip");
+ nnvm::NodeAttrs attrs;
+ op::ClipParam param;
+ NDArray* inputs[1];
+
+ if (args[0].type_code() != kNull) {
+ inputs[0] = args[0].operator mxnet::NDArray *();
+ }
+
+ if (args[1].type_code() != kNull) {
+ param.a_min = args[1].operator double();
+ } else {
+ param.a_min = -INFINITY;
+ }
+
+ if (args[2].type_code() != kNull) {
+ param.a_max = args[2].operator double();
+ } else {
+ param.a_max = INFINITY;
+ }
+
+ attrs.parsed = param;
+ attrs.op = op;
+ SetAttrDict<op::ClipParam>(&attrs);
+
+ NDArray* out = args[3].operator mxnet::NDArray*();
+ NDArray** outputs = out == nullptr ? nullptr : &out;
+ // set the number of outputs provided by the `out` arugment
+ int num_outputs = out != nullptr;
+ auto ndoutputs = Invoke(op, &attrs, 1, inputs, &num_outputs, outputs);
+ if (out) {
+ *ret = PythonArg(3);
+ } else {
+ *ret = ndoutputs[0];
+ }
+});
+
+} // namespace mxnet
diff --git a/src/operator/numpy/random/np_choice_op.h
b/src/operator/numpy/random/np_choice_op.h
index a6a7cec..bc1e712 100644
--- a/src/operator/numpy/random/np_choice_op.h
+++ b/src/operator/numpy/random/np_choice_op.h
@@ -53,6 +53,17 @@ struct NumpyChoiceParam : public
dmlc::Parameter<NumpyChoiceParam> {
DMLC_DECLARE_FIELD(replace).set_default(true);
DMLC_DECLARE_FIELD(weighted).set_default(false);
}
+ void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
+ std::ostringstream a_s, size_s, replace_s, weighted_s;
+ a_s << a;
+ size_s << size;
+ replace_s << replace;
+ weighted_s << weighted;
+ (*dict)["a"] = a_s.str();
+ (*dict)["size"] = size_s.str();
+ (*dict)["replace"] = replace_s.str();
+ (*dict)["weighted"] = weighted_s.str();
+ }
};
inline bool NumpyChoiceOpType(const nnvm::NodeAttrs &attrs,
diff --git a/src/operator/tensor/indexing_op.h
b/src/operator/tensor/indexing_op.h
index 2b04881..cd85daa 100644
--- a/src/operator/tensor/indexing_op.h
+++ b/src/operator/tensor/indexing_op.h
@@ -680,6 +680,27 @@ struct TakeParam: public dmlc::Parameter<TakeParam> {
" \"wrap\" means to wrap around."
" \"raise\" means to raise an error when index out of range.");
}
+
+ void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
+ std::ostringstream axis_s, mode_s;
+ axis_s << axis;
+ mode_s << mode;
+ (*dict)["axis"] = axis_s.str();
+ (*dict)["mode"] = mode_s.str();
+ switch (mode) {
+ case take_::kRaise:
+ (*dict)["mode"] = "raise";
+ break;
+ case take_::kClip:
+ (*dict)["mode"] = "clip";
+ break;
+ case take_::kWrap:
+ (*dict)["mode"] = "wrap";
+ break;
+ default:
+ (*dict)["mode"] = mode_s.str();
+ }
+ }
};
inline bool TakeOpShape(const nnvm::NodeAttrs& attrs,
diff --git a/src/operator/tensor/matrix_op-inl.h
b/src/operator/tensor/matrix_op-inl.h
index 6efde79..821fa85 100644
--- a/src/operator/tensor/matrix_op-inl.h
+++ b/src/operator/tensor/matrix_op-inl.h
@@ -1605,6 +1605,14 @@ struct ClipParam : public dmlc::Parameter<ClipParam> {
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
+
+ void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
+ std::ostringstream a_min_s, a_max_s;
+ a_min_s << a_min;
+ a_max_s << a_max;
+ (*dict)["a_min"] = a_min_s.str();
+ (*dict)["a_max"] = a_max_s.str();
+ }
};