[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313222153
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.h
 ##
 @@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.h
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "../../elemwise_op_common.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "./dist_common.h"
+
+namespace mxnet {
+namespace op {
+
+struct NumpyUniformParam : public dmlc::Parameter {
+  dmlc::optional low;
+  dmlc::optional high;
+  std::string ctx;
+  int dtype;
+  dmlc::optional> size;
+  DMLC_DECLARE_PARAMETER(NumpyUniformParam) {
+DMLC_DECLARE_FIELD(low);
+DMLC_DECLARE_FIELD(high);
+DMLC_DECLARE_FIELD(size)
+.set_default(dmlc::optional>())
+.describe(
+"Output shape. If the given shape is, "
+"e.g., (m, n, k), then m * n * k samples are drawn. "
+"Default is None, in which case a single value is returned.");
+DMLC_DECLARE_FIELD(ctx).set_default("cpu").describe(
+"Context of output, in format [cpu|gpu|cpu_pinned](n)."
+" Only used for imperative calls.");
+DMLC_DECLARE_FIELD(dtype)
+.add_enum("float32", mshadow::kFloat32)
+.add_enum("float64", mshadow::kFloat64)
+.add_enum("float16", mshadow::kFloat16)
+.set_default(mshadow::kFloat32)
+.describe(
+"DType of the output in case this can't be inferred. "
+"Defaults to float32 if not defined (dtype=None).");
+  }
+};
+
+inline bool NumpyUniformOpType(const nnvm::NodeAttrs ,
+   std::vector *in_attrs,
+   std::vector *out_attrs) {
+  const NumpyUniformParam  = nnvm::get(attrs.parsed);
+  int otype = param.dtype;
+  if (otype != -1) {
+(*out_attrs)[0] = otype;
+  } else {
+(*out_attrs)[0] = mshadow::kFloat32;
+  }
+  return true;
+}
+
+namespace mxnet_op {
+template 
+struct uniform_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, const Shape ,
+  const Shape ,
+  const Shape , IType *low,
+  IType *high, float *uniform, OType *out) {
+Shape coord = unravel(i, oshape);
+auto lidx = static_cast(dot(coord, lstride));
+auto hidx = static_cast(dot(coord, hstride));
+IType low_value = low[lidx];
+IType high_value = high[hidx];
+out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+
+template 
+struct uniform_one_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, int scalar_pos,
+  const Shape ,
+  const Shape , IType *array,
+  float scalar, float *uniform, OType *out) {
+Shape coord = unravel(i, oshape);
+auto idx = static_cast(dot(coord, stride));
+IType low_value;
+IType high_value;
+if (scalar_pos == 0) {
+  low_value = scalar;
+  high_value = array[idx];
+} else {
+  low_value = array[idx];
+  high_value = scalar;
+}
+out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+
+template 
+struct uniform_two_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, float low, float high,
+  float *uniform, OType *out) {
+out[i] = low + uniform[i] * (high - low);
+  }
+};
+}  // namespace mxnet_op
+
+template 
+void NumpyUniformForward(const nnvm::NodeAttrs ,
+ const OpContext ,
+ const std::vector ,
+ const std::vector ,
+ const std::vector ) {
+  using namespace mshadow;
+  using namespace mxnet_op;
+  

[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204568
 
 

 ##
 File path: src/operator/numpy/random/dist_common.h
 ##
 @@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ *  Copyright (c) 2015 by Contributors
+ * \file etwoparams_dist_common.h
+ * \brief Function definition of common functions for distributions
+ * \with two parameters.
+ */
+
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+inline int FillShape(const mxnet::TShape , const mxnet::TShape ,
+ const mxnet::TShape , mxnet::TShape *new_lshape,
+ mxnet::TShape *new_rshape, mxnet::TShape *new_oshape) {
+  const int odim = std::max(oshape.ndim(), broadcast::MAX_DIM);
+  *new_lshape = mxnet::TShape(odim, 1);
+  *new_rshape = mxnet::TShape(odim, 1);
+  *new_oshape = mxnet::TShape(odim, 1);
+  int bl = oshape.ndim() - lshape.ndim();
+  int br = oshape.ndim() - rshape.ndim();
+  int j = 0, lprod = 1, rprod = 1, oprod = 1;
+  for (int i = 0; i < oshape.ndim(); ++i) {
+int l = 1;
+int r = 1;
+int o = oshape[i];
+if (i >= bl)  l = lshape[i - bl];
+if (i >= br)  r = rshape[i - br];
+if ((lprod != rprod || lprod != oprod || l != r || l != o) &&
+(lprod * l > 1 || rprod * r > 1 || oprod * o > 1)) {
+  (*new_lshape)[j] = lprod;
+  (*new_rshape)[j] = rprod;
+  (*new_oshape)[j] = oprod;
+  lprod = rprod = oprod = 1; ++j;
+}
+lprod *= l;
+rprod *= r;
+oprod *= o;
+  }
+  if (lprod > 1 || rprod > 1 || oprod > 1) {
+(*new_lshape)[j] = lprod;
+(*new_rshape)[j] = rprod;
+(*new_oshape)[j] = oprod;
+++j;
+  }
+  if (j <= broadcast::MAX_DIM) {
+BROADCAST_NDIM_SWITCH(j, NDim, {
+  new_lshape->assign(new_lshape->begin(), new_lshape->begin() + NDim);
+  new_rshape->assign(new_rshape->begin(), new_rshape->begin() + NDim);
+  new_oshape->assign(new_oshape->begin(), new_oshape->begin() + NDim);
+});
+  } else {
+LOG(FATAL) << "Too many broadcast dimensions with operands " << lshape << 
" " << rshape;
+  }
+  return j;
+}
+
+inline void CheckBroadcastable(const mxnet::TShape , const mxnet::TShape 
) {
+  const int bl = to.ndim() - from.ndim();
+  const int br = 0;
+  for (int i = 0; i < to.ndim(); ++i) {
+int l = 1, r = 1;
+if (i >= bl)
+  l = from[i - bl];
+if (i >= br)
+  r = to[i - br];
+if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r))
+  continue;
+if (l != r) {
+  // Make it compatible with NumPy.
+  // For example, (2, 3) cannot broadcast to (2, 0, 3), but (1, 3) can
+  // broadcast to (2, 0, 3).
+  CHECK(l == 1 || r == 1)
+  << "operands could not be broadcast together with shapes " << from
+  << " " << to;
+}
+  }
+}
+
+inline void InferBroadcastShape(const mxnet::TShape , const mxnet::TShape 
,
+ mxnet::TShape* out_ptr) {
+  mxnet::TShape& out = (*out_ptr);
+  const int bl = out.ndim() - lhs.ndim();
+  const int br = out.ndim() - rhs.ndim();
+  for (int i = 0; i < out.ndim(); ++i) {
+int l = 1, r = 1;
+if (i >= bl)
+  l = lhs[i - bl];
+if (i >= br)
+  r = rhs[i - br];
+if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r))
+  continue;
 
 Review comment:
   fixed!


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204665
 
 

 ##
 File path: src/operator/numpy/random/dist_common.h
 ##
 @@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ *  Copyright (c) 2015 by Contributors
+ * \file etwoparams_dist_common.h
+ * \brief Function definition of common functions for distributions
+ * \with two parameters.
+ */
+
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+inline int FillShape(const mxnet::TShape , const mxnet::TShape ,
+ const mxnet::TShape , mxnet::TShape *new_lshape,
+ mxnet::TShape *new_rshape, mxnet::TShape *new_oshape) {
+  const int odim = std::max(oshape.ndim(), broadcast::MAX_DIM);
+  *new_lshape = mxnet::TShape(odim, 1);
+  *new_rshape = mxnet::TShape(odim, 1);
+  *new_oshape = mxnet::TShape(odim, 1);
+  int bl = oshape.ndim() - lshape.ndim();
+  int br = oshape.ndim() - rshape.ndim();
+  int j = 0, lprod = 1, rprod = 1, oprod = 1;
+  for (int i = 0; i < oshape.ndim(); ++i) {
+int l = 1;
+int r = 1;
+int o = oshape[i];
+if (i >= bl)  l = lshape[i - bl];
+if (i >= br)  r = rshape[i - br];
+if ((lprod != rprod || lprod != oprod || l != r || l != o) &&
+(lprod * l > 1 || rprod * r > 1 || oprod * o > 1)) {
+  (*new_lshape)[j] = lprod;
+  (*new_rshape)[j] = rprod;
+  (*new_oshape)[j] = oprod;
+  lprod = rprod = oprod = 1; ++j;
+}
+lprod *= l;
+rprod *= r;
+oprod *= o;
+  }
+  if (lprod > 1 || rprod > 1 || oprod > 1) {
+(*new_lshape)[j] = lprod;
+(*new_rshape)[j] = rprod;
+(*new_oshape)[j] = oprod;
+++j;
+  }
+  if (j <= broadcast::MAX_DIM) {
+BROADCAST_NDIM_SWITCH(j, NDim, {
+  new_lshape->assign(new_lshape->begin(), new_lshape->begin() + NDim);
+  new_rshape->assign(new_rshape->begin(), new_rshape->begin() + NDim);
+  new_oshape->assign(new_oshape->begin(), new_oshape->begin() + NDim);
+});
+  } else {
+LOG(FATAL) << "Too many broadcast dimensions with operands " << lshape << 
" " << rshape;
+  }
+  return j;
+}
+
+inline void CheckBroadcastable(const mxnet::TShape , const mxnet::TShape 
) {
+  const int bl = to.ndim() - from.ndim();
+  const int br = 0;
+  for (int i = 0; i < to.ndim(); ++i) {
+int l = 1, r = 1;
+if (i >= bl)
+  l = from[i - bl];
+if (i >= br)
+  r = to[i - br];
+if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r))
+  continue;
+if (l != r) {
+  // Make it compatible with NumPy.
+  // For example, (2, 3) cannot broadcast to (2, 0, 3), but (1, 3) can
+  // broadcast to (2, 0, 3).
+  CHECK(l == 1 || r == 1)
+  << "operands could not be broadcast together with shapes " << from
+  << " " << to;
+}
+  }
+}
+
+inline void InferBroadcastShape(const mxnet::TShape , const mxnet::TShape 
,
+ mxnet::TShape* out_ptr) {
 
 Review comment:
   fixed, thx for pointing out!


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204488
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.cu
 ##
 @@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.cu
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+
+#include "./np_uniform_op.h"
+
+namespace mxnet {
+namespace op {
+
+NNVM_REGISTER_OP(_npi_uniform)
+.set_attr("FCompute", NumpyUniformForward);
+
+}
+}
 
 Review comment:
   fixed!
   thx


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204512
 
 

 ##
 File path: src/operator/numpy/random/dist_common.h
 ##
 @@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ *  Copyright (c) 2015 by Contributors
+ * \file etwoparams_dist_common.h
+ * \brief Function definition of common functions for distributions
+ * \with two parameters.
+ */
+
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_DIST_COMMON_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+inline int FillShape(const mxnet::TShape , const mxnet::TShape ,
+ const mxnet::TShape , mxnet::TShape *new_lshape,
+ mxnet::TShape *new_rshape, mxnet::TShape *new_oshape) {
+  const int odim = std::max(oshape.ndim(), broadcast::MAX_DIM);
+  *new_lshape = mxnet::TShape(odim, 1);
+  *new_rshape = mxnet::TShape(odim, 1);
+  *new_oshape = mxnet::TShape(odim, 1);
+  int bl = oshape.ndim() - lshape.ndim();
+  int br = oshape.ndim() - rshape.ndim();
+  int j = 0, lprod = 1, rprod = 1, oprod = 1;
+  for (int i = 0; i < oshape.ndim(); ++i) {
+int l = 1;
+int r = 1;
+int o = oshape[i];
+if (i >= bl)  l = lshape[i - bl];
+if (i >= br)  r = rshape[i - br];
+if ((lprod != rprod || lprod != oprod || l != r || l != o) &&
+(lprod * l > 1 || rprod * r > 1 || oprod * o > 1)) {
+  (*new_lshape)[j] = lprod;
+  (*new_rshape)[j] = rprod;
+  (*new_oshape)[j] = oprod;
+  lprod = rprod = oprod = 1; ++j;
+}
+lprod *= l;
+rprod *= r;
+oprod *= o;
+  }
+  if (lprod > 1 || rprod > 1 || oprod > 1) {
+(*new_lshape)[j] = lprod;
+(*new_rshape)[j] = rprod;
+(*new_oshape)[j] = oprod;
+++j;
+  }
+  if (j <= broadcast::MAX_DIM) {
+BROADCAST_NDIM_SWITCH(j, NDim, {
+  new_lshape->assign(new_lshape->begin(), new_lshape->begin() + NDim);
+  new_rshape->assign(new_rshape->begin(), new_rshape->begin() + NDim);
+  new_oshape->assign(new_oshape->begin(), new_oshape->begin() + NDim);
+});
+  } else {
+LOG(FATAL) << "Too many broadcast dimensions with operands " << lshape << 
" " << rshape;
+  }
+  return j;
+}
+
+inline void CheckBroadcastable(const mxnet::TShape , const mxnet::TShape 
) {
+  const int bl = to.ndim() - from.ndim();
+  const int br = 0;
+  for (int i = 0; i < to.ndim(); ++i) {
+int l = 1, r = 1;
+if (i >= bl)
+  l = from[i - bl];
+if (i >= br)
+  r = to[i - br];
+if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r))
+  continue;
+if (l != r) {
+  // Make it compatible with NumPy.
+  // For example, (2, 3) cannot broadcast to (2, 0, 3), but (1, 3) can
+  // broadcast to (2, 0, 3).
+  CHECK(l == 1 || r == 1)
+  << "operands could not be broadcast together with shapes " << from
+  << " " << to;
+}
+  }
+}
+
+inline void InferBroadcastShape(const mxnet::TShape , const mxnet::TShape 
,
+ mxnet::TShape* out_ptr) {
+  mxnet::TShape& out = (*out_ptr);
+  const int bl = out.ndim() - lhs.ndim();
+  const int br = out.ndim() - rhs.ndim();
+  for (int i = 0; i < out.ndim(); ++i) {
+int l = 1, r = 1;
+if (i >= bl)
+  l = lhs[i - bl];
+if (i >= br)
+  r = rhs[i - br];
+if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r))
+  continue;
+if (l != r) {
+  // Make it compatible with NumPy.
+  // For example, (2, 3) cannot broadcast to (2, 0, 3), but (1, 3) can
+  // broadcast to (2, 0, 3).
+  CHECK(l == 1 || r == 1)
+  << "operands could not be broadcast together with shapes " << lhs
+  << " " << rhs;
+  out[i] = (l == 1 ? r : l);
+} else {
+  out[i] = l;
+}
+  }
+}
+
+template
+inline bool TwoparamsDistOpShape(const nnvm::NodeAttrs ,
+

[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204436
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.h
 ##
 @@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.h
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "./dist_common.h"
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+struct NumpyUniformParam : public dmlc::Parameter {
+  dmlc::optional low;
+  dmlc::optional high;
+  std::string ctx;
+  int dtype;
+  dmlc::optional> size;
+  DMLC_DECLARE_PARAMETER(NumpyUniformParam) {
+DMLC_DECLARE_FIELD(low);
+DMLC_DECLARE_FIELD(high);
+DMLC_DECLARE_FIELD(size)
+.set_default(dmlc::optional>())
+.describe("Output shape. If the given shape is, "
+  "e.g., (m, n, k), then m * n * k samples are drawn. "
+  "Default is None, in which case a single value is 
returned.");
+DMLC_DECLARE_FIELD(ctx)
+.set_default("cpu")
+.describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
+  " Only used for imperative calls.");
+DMLC_DECLARE_FIELD(dtype)
+.add_enum("float32", mshadow::kFloat32)
+.add_enum("float64", mshadow::kFloat64)
+.add_enum("float16", mshadow::kFloat16)
+.set_default(mshadow::kFloat32)
+.describe("DType of the output in case this can't be inferred. "
+  "Defaults to float32 if not defined (dtype=None).");
+  }
+};
+
+inline bool NumpyUniformOpType(const nnvm::NodeAttrs ,
+   std::vector *in_attrs,
+   std::vector *out_attrs) {
+  const NumpyUniformParam  = nnvm::get(attrs.parsed);
+  int otype = param.dtype;
+  if (otype != -1) {
+(*out_attrs)[0] = otype;
+  } else {
+(*out_attrs)[0] = mshadow::kFloat32;
+  }
+  return true;
+}
+
+namespace mxnet_op {
+template 
+struct uniform_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  const Shape  , const Shape 
 ,
+  const Shape  ,
+  IType *low, IType *high,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto lidx = static_cast(dot(coord, lstride));
+  auto hidx = static_cast(dot(coord, hstride));
+  IType low_value = low[lidx];
+  IType high_value = high[hidx];
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_one_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, int scalar_pos,
+  const Shape  ,
+  const Shape  ,
+  IType *array, float scalar,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto idx = static_cast(dot(coord, stride));
+  IType low_value;
+  IType high_value;
+  if (scalar_pos == 0) {
+low_value = scalar;
+high_value = array[idx];
+  } else {
+low_value = array[idx];
+high_value = scalar;
+  }
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
 
 Review comment:
   fixed!
   thx


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204398
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.h
 ##
 @@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.h
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "./dist_common.h"
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+struct NumpyUniformParam : public dmlc::Parameter {
+  dmlc::optional low;
+  dmlc::optional high;
+  std::string ctx;
+  int dtype;
+  dmlc::optional> size;
+  DMLC_DECLARE_PARAMETER(NumpyUniformParam) {
+DMLC_DECLARE_FIELD(low);
+DMLC_DECLARE_FIELD(high);
+DMLC_DECLARE_FIELD(size)
+.set_default(dmlc::optional>())
+.describe("Output shape. If the given shape is, "
+  "e.g., (m, n, k), then m * n * k samples are drawn. "
+  "Default is None, in which case a single value is 
returned.");
+DMLC_DECLARE_FIELD(ctx)
+.set_default("cpu")
+.describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
+  " Only used for imperative calls.");
+DMLC_DECLARE_FIELD(dtype)
+.add_enum("float32", mshadow::kFloat32)
+.add_enum("float64", mshadow::kFloat64)
+.add_enum("float16", mshadow::kFloat16)
+.set_default(mshadow::kFloat32)
+.describe("DType of the output in case this can't be inferred. "
+  "Defaults to float32 if not defined (dtype=None).");
+  }
+};
+
+inline bool NumpyUniformOpType(const nnvm::NodeAttrs ,
+   std::vector *in_attrs,
+   std::vector *out_attrs) {
+  const NumpyUniformParam  = nnvm::get(attrs.parsed);
+  int otype = param.dtype;
+  if (otype != -1) {
+(*out_attrs)[0] = otype;
+  } else {
+(*out_attrs)[0] = mshadow::kFloat32;
+  }
+  return true;
+}
+
+namespace mxnet_op {
+template 
+struct uniform_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  const Shape  , const Shape 
 ,
+  const Shape  ,
+  IType *low, IType *high,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto lidx = static_cast(dot(coord, lstride));
+  auto hidx = static_cast(dot(coord, hstride));
+  IType low_value = low[lidx];
+  IType high_value = high[hidx];
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_one_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, int scalar_pos,
+  const Shape  ,
+  const Shape  ,
+  IType *array, float scalar,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto idx = static_cast(dot(coord, stride));
+  IType low_value;
+  IType high_value;
+  if (scalar_pos == 0) {
+low_value = scalar;
+high_value = array[idx];
+  } else {
+low_value = array[idx];
+high_value = scalar;
+  }
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_two_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  float low, float high,
+  float *uniform, OType *out) {
+  out[i] = low + uniform[i] * (high - low);
+  }
+};
+}  // namespace mxnet_op
+
+
+
+
 
 Review comment:
   > remove redundant blank lines.
   
   fixed, thx


This is 

[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204348
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.h
 ##
 @@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.h
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "./dist_common.h"
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+struct NumpyUniformParam : public dmlc::Parameter {
+  dmlc::optional low;
+  dmlc::optional high;
+  std::string ctx;
+  int dtype;
+  dmlc::optional> size;
+  DMLC_DECLARE_PARAMETER(NumpyUniformParam) {
+DMLC_DECLARE_FIELD(low);
+DMLC_DECLARE_FIELD(high);
+DMLC_DECLARE_FIELD(size)
+.set_default(dmlc::optional>())
+.describe("Output shape. If the given shape is, "
+  "e.g., (m, n, k), then m * n * k samples are drawn. "
+  "Default is None, in which case a single value is 
returned.");
+DMLC_DECLARE_FIELD(ctx)
+.set_default("cpu")
+.describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
+  " Only used for imperative calls.");
+DMLC_DECLARE_FIELD(dtype)
+.add_enum("float32", mshadow::kFloat32)
+.add_enum("float64", mshadow::kFloat64)
+.add_enum("float16", mshadow::kFloat16)
+.set_default(mshadow::kFloat32)
+.describe("DType of the output in case this can't be inferred. "
+  "Defaults to float32 if not defined (dtype=None).");
+  }
+};
+
+inline bool NumpyUniformOpType(const nnvm::NodeAttrs ,
+   std::vector *in_attrs,
+   std::vector *out_attrs) {
+  const NumpyUniformParam  = nnvm::get(attrs.parsed);
+  int otype = param.dtype;
+  if (otype != -1) {
+(*out_attrs)[0] = otype;
+  } else {
+(*out_attrs)[0] = mshadow::kFloat32;
+  }
+  return true;
+}
+
+namespace mxnet_op {
+template 
+struct uniform_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  const Shape  , const Shape 
 ,
+  const Shape  ,
+  IType *low, IType *high,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto lidx = static_cast(dot(coord, lstride));
+  auto hidx = static_cast(dot(coord, hstride));
+  IType low_value = low[lidx];
+  IType high_value = high[hidx];
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_one_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, int scalar_pos,
+  const Shape  ,
+  const Shape  ,
+  IType *array, float scalar,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto idx = static_cast(dot(coord, stride));
+  IType low_value;
+  IType high_value;
+  if (scalar_pos == 0) {
+low_value = scalar;
+high_value = array[idx];
+  } else {
+low_value = array[idx];
+high_value = scalar;
+  }
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_two_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  float low, float high,
+  float *uniform, OType *out) {
+  out[i] = low + uniform[i] * (high - low);
+  }
+};
+}  // namespace mxnet_op
+
+
+
+
+template 
+void NumpyUniformForward(const nnvm::NodeAttrs , const OpContext ,
+ const std::vector ,
+  

[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313204152
 
 

 ##
 File path: src/operator/numpy/random/np_uniform_op.h
 ##
 @@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_uniform_op.h
+ * \brief Operator for numpy sampling from uniform distributions
+ */
+#ifndef MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+#define MXNET_OPERATOR_NUMPY_RANDOM_NP_UNIFORM_OP_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include "./dist_common.h"
+#include "../../elemwise_op_common.h"
+#include "../../tensor/elemwise_binary_broadcast_op.h"
+#include "../../mshadow_op.h"
+#include "../../mxnet_op.h"
+#include "../../operator_common.h"
+
+namespace mxnet {
+namespace op {
+
+struct NumpyUniformParam : public dmlc::Parameter {
+  dmlc::optional low;
+  dmlc::optional high;
+  std::string ctx;
+  int dtype;
+  dmlc::optional> size;
+  DMLC_DECLARE_PARAMETER(NumpyUniformParam) {
+DMLC_DECLARE_FIELD(low);
+DMLC_DECLARE_FIELD(high);
+DMLC_DECLARE_FIELD(size)
+.set_default(dmlc::optional>())
+.describe("Output shape. If the given shape is, "
+  "e.g., (m, n, k), then m * n * k samples are drawn. "
+  "Default is None, in which case a single value is 
returned.");
+DMLC_DECLARE_FIELD(ctx)
+.set_default("cpu")
+.describe("Context of output, in format [cpu|gpu|cpu_pinned](n)."
+  " Only used for imperative calls.");
+DMLC_DECLARE_FIELD(dtype)
+.add_enum("float32", mshadow::kFloat32)
+.add_enum("float64", mshadow::kFloat64)
+.add_enum("float16", mshadow::kFloat16)
+.set_default(mshadow::kFloat32)
+.describe("DType of the output in case this can't be inferred. "
+  "Defaults to float32 if not defined (dtype=None).");
+  }
+};
+
+inline bool NumpyUniformOpType(const nnvm::NodeAttrs ,
+   std::vector *in_attrs,
+   std::vector *out_attrs) {
+  const NumpyUniformParam  = nnvm::get(attrs.parsed);
+  int otype = param.dtype;
+  if (otype != -1) {
+(*out_attrs)[0] = otype;
+  } else {
+(*out_attrs)[0] = mshadow::kFloat32;
+  }
+  return true;
+}
+
+namespace mxnet_op {
+template 
+struct uniform_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  const Shape  , const Shape 
 ,
+  const Shape  ,
+  IType *low, IType *high,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto lidx = static_cast(dot(coord, lstride));
+  auto hidx = static_cast(dot(coord, hstride));
+  IType low_value = low[lidx];
+  IType high_value = high[hidx];
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_one_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i, int scalar_pos,
+  const Shape  ,
+  const Shape  ,
+  IType *array, float scalar,
+  float *uniform, OType *out) {
+  Shape coord = unravel(i, oshape);
+  auto idx = static_cast(dot(coord, stride));
+  IType low_value;
+  IType high_value;
+  if (scalar_pos == 0) {
+low_value = scalar;
+high_value = array[idx];
+  } else {
+low_value = array[idx];
+high_value = scalar;
+  }
+  out[i] = low_value + uniform[i] * (high_value - low_value);
+  }
+};
+}  // namespace mxnet_op
+
+namespace mxnet_op {
+template 
+struct uniform_two_scalar_kernel {
+  MSHADOW_XINLINE static void Map(index_t i,
+  float low, float high,
+  float *uniform, OType *out) {
+  out[i] = low + uniform[i] * (high - low);
+  }
+};
+}  // namespace mxnet_op
+
+
+
+
+template 
+void NumpyUniformForward(const nnvm::NodeAttrs , const OpContext ,
+ const std::vector ,
+  

[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313203593
 
 

 ##
 File path: src/operator/numpy/random/dist_common.h
 ##
 @@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ *  Copyright (c) 2015 by Contributors
+ * \file etwoparams_dist_common.h
 
 Review comment:
   fixed, thx for pointing out


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior random.uniform()

2019-08-12 Thread GitBox
xidulu commented on a change in pull request #15858: [Numpy] Numpy behavior 
random.uniform()
URL: https://github.com/apache/incubator-mxnet/pull/15858#discussion_r313203370
 
 

 ##
 File path: python/mxnet/ndarray/numpy/random.py
 ##
 @@ -17,5 +17,65 @@
 
 """Namespace for operators used in Gluon dispatched by F=ndarray."""
 from __future__ import absolute_import
+from ...context import current_context
+from . import _internal as _npi
 
-__all__ = []
+__all__ = ['uniform']
+
+
+def uniform(low=0.0, high=1.0, size=None, ctx=None, dtype=None, out=None):
 
 Review comment:
   > Argument order here should be the same as your documentation.
   
   fixed! thx for suggestion.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services