[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582942
 
 

 ##
 File path: src/operator/numpy/np_matrix_op.cc
 ##
 @@ -135,17 +136,165 @@ bool NumpyReshapeInferShape(const mxnet::TShape& src, 
mxnet::TShape* dst) {
   }
 }
 
+bool NumpyXReshapeInferShape(const mxnet::TShape& src,
+ const mxnet::Tuple& target,
+ mxnet::TShape* output) {
+  bool target_shape_is_known = true;
+  dim_t target_size = 1;
+  for (int i = 0; i < target.ndim(); ++i) {
+if (target[i] < 0) {
+  target_shape_is_known = false;
+  target_size  = -1;
+  break;
+} else {
+  target_size *= target[i];
+}
+  }
+  if (shape_is_known(src) && target_shape_is_known) {
+CHECK_EQ(src.Size(), target_size) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+*output = TShape(target.begin(), target.end());
+return true;
+  } else if (!shape_is_known(src) || target.ndim() == -1) {
+return false;
+  } else {
+int unknown_axis = -1;
+dim_t known_dim_size_prod = 1;
+std::vector output_shape_vector;
+int src_inx = 0;
+for (int i = 0; i < target.ndim(); ++i) {
+  dim_t proposed_dim = target[i];
+  CHECK(proposed_dim >= -6)
+<< "Dimension size must be greater than -6, received " << proposed_dim;
+  if (proposed_dim == -1) {
+// infer the known dimension
+CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+unknown_axis = output_shape_vector.size();
+output_shape_vector.push_back(1);
+src_inx++;
+  } else if (proposed_dim == -2) {
+// copy the dimension from src to output
+CHECK_LT(src_inx, src.ndim())
+  << "Unmatching dimension of proposed new shape";
+known_dim_size_prod *= src[src_inx];
+output_shape_vector.push_back(src[src_inx++]);
+  } else if (proposed_dim == -3) {
+// skip the source dimension if and only if it is one
+CHECK_EQ(src[src_inx], 1)
+  <<"-3 index should only be used to skip dimision size 1";
+src_inx++;
+  } else if (proposed_dim == -4) {
+// copy all remaining dims from source
+while (src_inx < src.ndim()) {
+  known_dim_size_prod *= src[src_inx];
+  const int dn = src[src_inx++];
+  output_shape_vector.push_back(dn);
+}
+  } else if (proposed_dim == -5) {
+// merge two dims from source
+CHECK_LT(src_inx, src.ndim()-1)
+  <<"Not enough dimensions left for the product";
+const int d1 = src[src_inx++];
+const int d2 = src[src_inx++];
+if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+  output_shape_vector.push_back(-1);
+} else {
+  known_dim_size_prod *= d1*d2;
+  output_shape_vector.push_back(d1 * d2);
+}
+  } else if (proposed_dim == -6) {
+// split the source dim s into two dims
+// read the left dim and then the right dim (either can be -1)
+CHECK_LT(i + 2, target.ndim());
+CHECK_LT(src_inx, src.ndim());
+const int d0 = src[src_inx++];
+dim_t d1 = target[++i];
+dim_t d2 = target[++i];
+CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
+if (d1 == -1 && d0 >= 0) d1 = d0 / d2;  // d0 must be known to do this
+if (d2 == -1 && d0 >= 0) d2 = d0 / d1;  // d0 must be known to do this
+CHECK(d1 * d2 == static_cast(d0) || static_cast(d0) == 
dim_t(-1))
+  <<"Split dims " << d1 << ", " << d2 << " do not divide original dim 
" << d0;
+if (d1 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+} else if (d2 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size() + 1;
+}
+known_dim_size_prod *= d0 == -1 ? 1 : d0;
+output_shape_vector.push_back(d1);
+output_shape_vector.push_back(d2);
+  } else {
+// greater than 0, new shape
+known_dim_size_prod *= proposed_dim;
+output_shape_vector.push_back(proposed_dim);
+src_inx++;
+  }
+}
+
+if (unknown_axis > -1) {
+  // if the input in zero size tensor, the output must be of known shape 
of zero size
+  CHECK_NE(known_dim_size_prod, 0) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+  CHECK(src.Size() % known_dim_size_prod == 0)
+  

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582869
 
 

 ##
 File path: src/operator/numpy/np_matrix_op.cc
 ##
 @@ -135,17 +136,165 @@ bool NumpyReshapeInferShape(const mxnet::TShape& src, 
mxnet::TShape* dst) {
   }
 }
 
+bool NumpyXReshapeInferShape(const mxnet::TShape& src,
+ const mxnet::Tuple& target,
+ mxnet::TShape* output) {
+  bool target_shape_is_known = true;
+  dim_t target_size = 1;
+  for (int i = 0; i < target.ndim(); ++i) {
+if (target[i] < 0) {
+  target_shape_is_known = false;
+  target_size  = -1;
+  break;
+} else {
+  target_size *= target[i];
+}
+  }
+  if (shape_is_known(src) && target_shape_is_known) {
+CHECK_EQ(src.Size(), target_size) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+*output = TShape(target.begin(), target.end());
+return true;
+  } else if (!shape_is_known(src) || target.ndim() == -1) {
+return false;
+  } else {
+int unknown_axis = -1;
+dim_t known_dim_size_prod = 1;
+std::vector output_shape_vector;
+int src_inx = 0;
+for (int i = 0; i < target.ndim(); ++i) {
+  dim_t proposed_dim = target[i];
+  CHECK(proposed_dim >= -6)
+<< "Dimension size must be greater than -6, received " << proposed_dim;
+  if (proposed_dim == -1) {
+// infer the known dimension
+CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+unknown_axis = output_shape_vector.size();
+output_shape_vector.push_back(1);
+src_inx++;
+  } else if (proposed_dim == -2) {
+// copy the dimension from src to output
+CHECK_LT(src_inx, src.ndim())
+  << "Unmatching dimension of proposed new shape";
+known_dim_size_prod *= src[src_inx];
+output_shape_vector.push_back(src[src_inx++]);
+  } else if (proposed_dim == -3) {
+// skip the source dimension if and only if it is one
+CHECK_EQ(src[src_inx], 1)
+  <<"-3 index should only be used to skip dimision size 1";
+src_inx++;
+  } else if (proposed_dim == -4) {
+// copy all remaining dims from source
+while (src_inx < src.ndim()) {
+  known_dim_size_prod *= src[src_inx];
+  const int dn = src[src_inx++];
+  output_shape_vector.push_back(dn);
+}
+  } else if (proposed_dim == -5) {
+// merge two dims from source
+CHECK_LT(src_inx, src.ndim()-1)
+  <<"Not enough dimensions left for the product";
+const int d1 = src[src_inx++];
+const int d2 = src[src_inx++];
+if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+  output_shape_vector.push_back(-1);
+} else {
+  known_dim_size_prod *= d1*d2;
+  output_shape_vector.push_back(d1 * d2);
+}
+  } else if (proposed_dim == -6) {
+// split the source dim s into two dims
+// read the left dim and then the right dim (either can be -1)
+CHECK_LT(i + 2, target.ndim());
+CHECK_LT(src_inx, src.ndim());
+const int d0 = src[src_inx++];
+dim_t d1 = target[++i];
+dim_t d2 = target[++i];
+CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
+if (d1 == -1 && d0 >= 0) d1 = d0 / d2;  // d0 must be known to do this
+if (d2 == -1 && d0 >= 0) d2 = d0 / d1;  // d0 must be known to do this
+CHECK(d1 * d2 == static_cast(d0) || static_cast(d0) == 
dim_t(-1))
+  <<"Split dims " << d1 << ", " << d2 << " do not divide original dim 
" << d0;
+if (d1 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+} else if (d2 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size() + 1;
+}
+known_dim_size_prod *= d0 == -1 ? 1 : d0;
+output_shape_vector.push_back(d1);
+output_shape_vector.push_back(d2);
+  } else {
+// greater than 0, new shape
+known_dim_size_prod *= proposed_dim;
+output_shape_vector.push_back(proposed_dim);
+src_inx++;
+  }
+}
+
+if (unknown_axis > -1) {
+  // if the input in zero size tensor, the output must be of known shape 
of zero size
+  CHECK_NE(known_dim_size_prod, 0) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
 
 Review comment:
   Alignment:
   ```c++
 

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582829
 
 

 ##
 File path: src/operator/numpy/np_matrix_op.cc
 ##
 @@ -135,17 +136,165 @@ bool NumpyReshapeInferShape(const mxnet::TShape& src, 
mxnet::TShape* dst) {
   }
 }
 
+bool NumpyXReshapeInferShape(const mxnet::TShape& src,
+ const mxnet::Tuple& target,
+ mxnet::TShape* output) {
+  bool target_shape_is_known = true;
+  dim_t target_size = 1;
+  for (int i = 0; i < target.ndim(); ++i) {
+if (target[i] < 0) {
+  target_shape_is_known = false;
+  target_size  = -1;
+  break;
+} else {
+  target_size *= target[i];
+}
+  }
+  if (shape_is_known(src) && target_shape_is_known) {
+CHECK_EQ(src.Size(), target_size) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+*output = TShape(target.begin(), target.end());
+return true;
+  } else if (!shape_is_known(src) || target.ndim() == -1) {
+return false;
+  } else {
+int unknown_axis = -1;
+dim_t known_dim_size_prod = 1;
+std::vector output_shape_vector;
+int src_inx = 0;
+for (int i = 0; i < target.ndim(); ++i) {
+  dim_t proposed_dim = target[i];
+  CHECK(proposed_dim >= -6)
+<< "Dimension size must be greater than -6, received " << proposed_dim;
+  if (proposed_dim == -1) {
+// infer the known dimension
+CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+unknown_axis = output_shape_vector.size();
+output_shape_vector.push_back(1);
+src_inx++;
+  } else if (proposed_dim == -2) {
+// copy the dimension from src to output
+CHECK_LT(src_inx, src.ndim())
+  << "Unmatching dimension of proposed new shape";
+known_dim_size_prod *= src[src_inx];
+output_shape_vector.push_back(src[src_inx++]);
+  } else if (proposed_dim == -3) {
+// skip the source dimension if and only if it is one
+CHECK_EQ(src[src_inx], 1)
+  <<"-3 index should only be used to skip dimision size 1";
+src_inx++;
+  } else if (proposed_dim == -4) {
+// copy all remaining dims from source
+while (src_inx < src.ndim()) {
+  known_dim_size_prod *= src[src_inx];
+  const int dn = src[src_inx++];
+  output_shape_vector.push_back(dn);
+}
+  } else if (proposed_dim == -5) {
+// merge two dims from source
+CHECK_LT(src_inx, src.ndim()-1)
+  <<"Not enough dimensions left for the product";
+const int d1 = src[src_inx++];
+const int d2 = src[src_inx++];
+if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+  output_shape_vector.push_back(-1);
+} else {
+  known_dim_size_prod *= d1*d2;
+  output_shape_vector.push_back(d1 * d2);
+}
+  } else if (proposed_dim == -6) {
+// split the source dim s into two dims
+// read the left dim and then the right dim (either can be -1)
+CHECK_LT(i + 2, target.ndim());
+CHECK_LT(src_inx, src.ndim());
+const int d0 = src[src_inx++];
+dim_t d1 = target[++i];
+dim_t d2 = target[++i];
+CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
+if (d1 == -1 && d0 >= 0) d1 = d0 / d2;  // d0 must be known to do this
+if (d2 == -1 && d0 >= 0) d2 = d0 / d1;  // d0 must be known to do this
+CHECK(d1 * d2 == static_cast(d0) || static_cast(d0) == 
dim_t(-1))
+  <<"Split dims " << d1 << ", " << d2 << " do not divide original dim 
" << d0;
+if (d1 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
 
 Review comment:
   Same for all applicable places.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582898
 
 

 ##
 File path: src/operator/numpy/np_matrix_op.cc
 ##
 @@ -135,17 +136,165 @@ bool NumpyReshapeInferShape(const mxnet::TShape& src, 
mxnet::TShape* dst) {
   }
 }
 
+bool NumpyXReshapeInferShape(const mxnet::TShape& src,
+ const mxnet::Tuple& target,
+ mxnet::TShape* output) {
+  bool target_shape_is_known = true;
+  dim_t target_size = 1;
+  for (int i = 0; i < target.ndim(); ++i) {
+if (target[i] < 0) {
+  target_shape_is_known = false;
+  target_size  = -1;
+  break;
+} else {
+  target_size *= target[i];
+}
+  }
+  if (shape_is_known(src) && target_shape_is_known) {
+CHECK_EQ(src.Size(), target_size) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+*output = TShape(target.begin(), target.end());
+return true;
+  } else if (!shape_is_known(src) || target.ndim() == -1) {
+return false;
+  } else {
+int unknown_axis = -1;
+dim_t known_dim_size_prod = 1;
+std::vector output_shape_vector;
+int src_inx = 0;
+for (int i = 0; i < target.ndim(); ++i) {
+  dim_t proposed_dim = target[i];
+  CHECK(proposed_dim >= -6)
+<< "Dimension size must be greater than -6, received " << proposed_dim;
+  if (proposed_dim == -1) {
+// infer the known dimension
+CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+unknown_axis = output_shape_vector.size();
+output_shape_vector.push_back(1);
+src_inx++;
+  } else if (proposed_dim == -2) {
+// copy the dimension from src to output
+CHECK_LT(src_inx, src.ndim())
+  << "Unmatching dimension of proposed new shape";
+known_dim_size_prod *= src[src_inx];
+output_shape_vector.push_back(src[src_inx++]);
+  } else if (proposed_dim == -3) {
+// skip the source dimension if and only if it is one
+CHECK_EQ(src[src_inx], 1)
+  <<"-3 index should only be used to skip dimision size 1";
+src_inx++;
+  } else if (proposed_dim == -4) {
+// copy all remaining dims from source
+while (src_inx < src.ndim()) {
+  known_dim_size_prod *= src[src_inx];
+  const int dn = src[src_inx++];
+  output_shape_vector.push_back(dn);
+}
+  } else if (proposed_dim == -5) {
+// merge two dims from source
+CHECK_LT(src_inx, src.ndim()-1)
+  <<"Not enough dimensions left for the product";
+const int d1 = src[src_inx++];
+const int d2 = src[src_inx++];
+if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+  output_shape_vector.push_back(-1);
+} else {
+  known_dim_size_prod *= d1*d2;
+  output_shape_vector.push_back(d1 * d2);
+}
+  } else if (proposed_dim == -6) {
+// split the source dim s into two dims
+// read the left dim and then the right dim (either can be -1)
+CHECK_LT(i + 2, target.ndim());
+CHECK_LT(src_inx, src.ndim());
+const int d0 = src[src_inx++];
+dim_t d1 = target[++i];
+dim_t d2 = target[++i];
+CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
+if (d1 == -1 && d0 >= 0) d1 = d0 / d2;  // d0 must be known to do this
+if (d2 == -1 && d0 >= 0) d2 = d0 / d1;  // d0 must be known to do this
+CHECK(d1 * d2 == static_cast(d0) || static_cast(d0) == 
dim_t(-1))
+  <<"Split dims " << d1 << ", " << d2 << " do not divide original dim 
" << d0;
+if (d1 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+} else if (d2 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size() + 1;
+}
+known_dim_size_prod *= d0 == -1 ? 1 : d0;
+output_shape_vector.push_back(d1);
+output_shape_vector.push_back(d2);
+  } else {
+// greater than 0, new shape
+known_dim_size_prod *= proposed_dim;
+output_shape_vector.push_back(proposed_dim);
+src_inx++;
+  }
+}
+
+if (unknown_axis > -1) {
+  // if the input in zero size tensor, the output must be of known shape 
of zero size
+  CHECK_NE(known_dim_size_prod, 0) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+  CHECK(src.Size() % known_dim_size_prod == 0)
+  

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582779
 
 

 ##
 File path: src/operator/numpy/np_matrix_op.cc
 ##
 @@ -135,17 +136,165 @@ bool NumpyReshapeInferShape(const mxnet::TShape& src, 
mxnet::TShape* dst) {
   }
 }
 
+bool NumpyXReshapeInferShape(const mxnet::TShape& src,
+ const mxnet::Tuple& target,
+ mxnet::TShape* output) {
+  bool target_shape_is_known = true;
+  dim_t target_size = 1;
+  for (int i = 0; i < target.ndim(); ++i) {
+if (target[i] < 0) {
+  target_shape_is_known = false;
+  target_size  = -1;
+  break;
+} else {
+  target_size *= target[i];
+}
+  }
+  if (shape_is_known(src) && target_shape_is_known) {
+CHECK_EQ(src.Size(), target_size) << "Cannot reshape array of size "
+  << src.Size() << " into shape " << 
target;
+*output = TShape(target.begin(), target.end());
+return true;
+  } else if (!shape_is_known(src) || target.ndim() == -1) {
+return false;
+  } else {
+int unknown_axis = -1;
+dim_t known_dim_size_prod = 1;
+std::vector output_shape_vector;
+int src_inx = 0;
+for (int i = 0; i < target.ndim(); ++i) {
+  dim_t proposed_dim = target[i];
+  CHECK(proposed_dim >= -6)
+<< "Dimension size must be greater than -6, received " << proposed_dim;
+  if (proposed_dim == -1) {
+// infer the known dimension
+CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+unknown_axis = output_shape_vector.size();
+output_shape_vector.push_back(1);
+src_inx++;
+  } else if (proposed_dim == -2) {
+// copy the dimension from src to output
+CHECK_LT(src_inx, src.ndim())
+  << "Unmatching dimension of proposed new shape";
+known_dim_size_prod *= src[src_inx];
+output_shape_vector.push_back(src[src_inx++]);
+  } else if (proposed_dim == -3) {
+// skip the source dimension if and only if it is one
+CHECK_EQ(src[src_inx], 1)
+  <<"-3 index should only be used to skip dimision size 1";
+src_inx++;
+  } else if (proposed_dim == -4) {
+// copy all remaining dims from source
+while (src_inx < src.ndim()) {
+  known_dim_size_prod *= src[src_inx];
+  const int dn = src[src_inx++];
+  output_shape_vector.push_back(dn);
+}
+  } else if (proposed_dim == -5) {
+// merge two dims from source
+CHECK_LT(src_inx, src.ndim()-1)
+  <<"Not enough dimensions left for the product";
+const int d1 = src[src_inx++];
+const int d2 = src[src_inx++];
+if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
+  unknown_axis = output_shape_vector.size();
+  output_shape_vector.push_back(-1);
+} else {
+  known_dim_size_prod *= d1*d2;
+  output_shape_vector.push_back(d1 * d2);
+}
+  } else if (proposed_dim == -6) {
+// split the source dim s into two dims
+// read the left dim and then the right dim (either can be -1)
+CHECK_LT(i + 2, target.ndim());
+CHECK_LT(src_inx, src.ndim());
+const int d0 = src[src_inx++];
+dim_t d1 = target[++i];
+dim_t d2 = target[++i];
+CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
+if (d1 == -1 && d0 >= 0) d1 = d0 / d2;  // d0 must be known to do this
+if (d2 == -1 && d0 >= 0) d2 = d0 / d1;  // d0 must be known to do this
+CHECK(d1 * d2 == static_cast(d0) || static_cast(d0) == 
dim_t(-1))
+  <<"Split dims " << d1 << ", " << d2 << " do not divide original dim 
" << d0;
+if (d1 == -1) {
+  CHECK_LT(unknown_axis, 0)
+  << "One and only one dim can be inferred";
 
 Review comment:
   indent here:
   ```c++
 CHECK_LT(unknown_axis, 0)
   << "One and only one dim can be inferred";
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582546
 
 

 ##
 File path: python/mxnet/symbol/numpy/_symbol.py
 ##
 @@ -39,25 +46,90 @@ def _num_outputs(sym):
 
 @set_module('mxnet.symbol.numpy')
 class _Symbol(Symbol):
-def __getitem__(self, key):
-num_outputs = _num_outputs(self)
-if num_outputs == 1:
-raise NotImplementedError
-if not isinstance(key, int):
+def __init__(self, handle):
+super(_Symbol, self).__init__(handle)
+self._output_is_list = False
+
+def __getitem__(self, key): # pylint: disable = 
too-many-return-statements, inconsistent-return-statements
+num_outputs = len(self)
+# print("Num of outputs is ", num_outputs)
+if num_outputs == 1: # pylint: disable = too-many-nested-blocks
+# If number of output is one and is not a list, perform ndarray 
basic slicing
+if not self._output_is_list:
+if isinstance(key, integer_types):
+sliced = _npi.slice(self, key, key+1)
+return _npi.reshape(sliced, (-3, -4))
+elif isinstance(key, py_slice):
+if key.step is None or key.step != 0:
+start = [None] if key.start is None else key.start
+stop = [None] if key.stop is None else key.stop
+return _npi.slice(self, start, stop, key.step)
+else:
+raise ValueError("slice step cannot be zero")
+elif isinstance(key, list):
+raise NotImplementedError
+elif isinstance(key, tuple):
+begin = []
+end = []
+step = []
+new_shape = ()
+for index in key:
+if isinstance(index, py_slice):
+if index.step is not None and index.step == 0:
+raise ValueError("slice step cannot be zero")
+begin.append(index.start)
+end.append(index.stop)
+step.append(index.step)
+new_shape += (-2,)
+elif isinstance(index, integer_types):
+begin.append(index)
+end.append(index+1)
+step.append(1)
+new_shape += (-3,)
+new_shape += (-4,)
+sliced = _npi.slice(self, begin, end, step)
+return _npi.reshape(sliced, new_shape)
+# perform trivial list slicing on length one list represented by 
flag
+else:
+if isinstance(key, integer_types):
+if key in [-1, 0]:
+self._output_is_list = False
+return self
+else:
+raise IndexError
+elif isinstance(key, py_slice):
+if (key.start is None or key.start <= 0) and (key.stop is 
None or key.stop > 0):
+return self
+else:
+raise ValueError
+else:
+raise IndexError
+# list slicing on several nodes of outputs
+elif num_outputs > 1:
+if isinstance(key, py_slice):
+start = 0 if key.start is None else key.start
+stop = num_outputs if key.stop is None else key.stop
+step = 1 if key.step is None else key.step
+return Group([self[i] for i in range(start, stop, step)], 
_Symbol)
+elif isinstance(key, integer_types):
+if key >= num_outputs:
+# Important, python determines the end by this exception
+raise IndexError
+handle = SymbolHandle()
+check_call(_LIB.MXSymbolGetOutput(
+self.handle, mx_uint(key), ctypes.byref(handle)))
+return _Symbol(handle=handle)
+else:
+raise NotImplementedError
+else:
 raise NotImplementedError
-if key >= num_outputs:
-# Important, python determines the end by this exception
-raise IndexError
-handle = SymbolHandle()
-check_call(_LIB.MXSymbolGetOutput(
-self.handle, mx_uint(key), ctypes.byref(handle)))
-return _Symbol(handle=handle)
+
 
 Review comment:
   get rid of the extra blank line.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the 

[GitHub] [incubator-mxnet] haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in symbolic interface

2019-08-15 Thread GitBox
haojin2 commented on a change in pull request #15905: [Numpy] Basic indexing in 
symbolic interface
URL: https://github.com/apache/incubator-mxnet/pull/15905#discussion_r314582455
 
 

 ##
 File path: python/mxnet/symbol/numpy/_symbol.py
 ##
 @@ -39,25 +46,90 @@ def _num_outputs(sym):
 
 @set_module('mxnet.symbol.numpy')
 class _Symbol(Symbol):
-def __getitem__(self, key):
-num_outputs = _num_outputs(self)
-if num_outputs == 1:
-raise NotImplementedError
-if not isinstance(key, int):
+def __init__(self, handle):
+super(_Symbol, self).__init__(handle)
+self._output_is_list = False
+
+def __getitem__(self, key): # pylint: disable = 
too-many-return-statements, inconsistent-return-statements
+num_outputs = len(self)
+# print("Num of outputs is ", num_outputs)
 
 Review comment:
   Get rid of dead code.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services