[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-21 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r176184133
 
 

 ##
 File path: src/operator/pooling_v1-inl.h
 ##
 @@ -215,18 +217,21 @@ class PoolingV1Prop : public OperatorProperty {
   void Init(const std::vector >& kwargs) 
override {
 using namespace mshadow;
 param_.Init(kwargs);
-if (param_.kernel.ndim() == 2) {
-  if (param_.stride.ndim() == 0) param_.stride = Shape2(1, 1);
-  if (param_.pad.ndim() == 0) param_.pad = Shape2(0, 0);
+if (param_.global_pool) {
 
 Review comment:
   We can use `if (! param_.global_pool) {...` instead.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-21 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r176184133
 
 

 ##
 File path: src/operator/pooling_v1-inl.h
 ##
 @@ -215,18 +217,21 @@ class PoolingV1Prop : public OperatorProperty {
   void Init(const std::vector >& kwargs) 
override {
 using namespace mshadow;
 param_.Init(kwargs);
-if (param_.kernel.ndim() == 2) {
-  if (param_.stride.ndim() == 0) param_.stride = Shape2(1, 1);
-  if (param_.pad.ndim() == 0) param_.pad = Shape2(0, 0);
+if (param_.global_pool) {
 
 Review comment:
   if (! param_.global_pool) {...


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-21 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r176183569
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -98,28 +97,45 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   << "Pooling: Input data should be  3D in (batch, channel, x)"
   << " Or 4D in (batch, channel, y, x) "
   << " Or 5D in (batch, channel, d, y, x)";
+  CHECK_LE(dshape.ndim(), 5U)
+  << "Pooling: Input data should be  3D in (batch, channel, x)"
+  << " Or 4D in (batch, channel, y, x) "
+  << " Or 5D in (batch, channel, d, y, x)";
   TShape oshape = dshape;
   if (dshape.ndim() == 0) return false;
-  if (param.kernel.ndim() == 1) {
+  if (param.global_pool) {
+  if (dshape.ndim() == 3) {
 
 Review comment:
   We can replace the if-clauses with a for-loop.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-20 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r175856317
 
 

 ##
 File path: src/operator/nn/pool.h
 ##
 @@ -687,7 +687,7 @@ inline void pool(mshadow::Stream* s, const DType* 
in_data, const TShape& is
   LOG(FATAL) << "Unknown pooling type " << pool_type;
 }
   } else {
-LOG(FATAL) << "Unsupported " << kernel.ndim() << "-D pooling";
+LOG(FATAL) << "Unsupported " << kernel.ndim() << "-D non-avg pooling";
 
 Review comment:
   Why non-avg here?


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-16 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174999488
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -132,19 +132,23 @@ class PoolingOp {
 using namespace mshadow;
 Stream *s = ctx.get_stream();
 const TShape& ishape = in_data.shape_;
+TShape kernel = param_.kernel;
 TShape padding = param_.pad;
+TShape stride = param_.stride;
 if (param_.global_pool) {
-  for (index_t i = 0; i < padding.ndim(); i++) {
+  kernel = TShape(ishape.data() + ishape.ndim()-param_.kernel.ndim(),
+   ishape.data() + ishape.ndim());
 
 Review comment:
   I think I’ve found the error. Here it should be ishape.data() +2, 
ishape.data() + ishape.ndim())


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-16 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174999516
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -154,20 +158,24 @@ class PoolingOp {
 using namespace mshadow;
 Stream *s = ctx.get_stream();
 const TShape& ishape = in_data.shape_;
+TShape kernel = param_.kernel;
 TShape padding = param_.pad;
+TShape stride = param_.stride;
 if (param_.global_pool) {
-  for (index_t i = 0; i < padding.ndim(); i++) {
+  kernel = TShape(ishape.data() + ishape.ndim() - param_.kernel.ndim(),
+   ishape.data() + ishape.ndim());
 
 Review comment:
   Same here


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-14 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174552493
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -130,35 +153,30 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   } else if (param.kernel.ndim() == 2) {
 CHECK_EQ(dshape.ndim(), 4U)
 << "Pooling: Input data should be 4D in (batch, channel, y, x)";
-if (param.global_pool) {
-  oshape[2] = 1;
-  oshape[3] = 1;
+CHECK(param.kernel[0] <= dshape[2] + 2 * param.pad[0])
+<< "kernel size (" << param.kernel[0] << ") exceeds input ("
+<< dshape[2] << " padded to " << (dshape[2] + 2 * param.pad[0])
+<< ")";
+CHECK(param.kernel[1] <= dshape[3] + 2 * param.pad[1])
+<< "kernel size (" << param.kernel[1] << ") exceeds input ("
+<< dshape[3] << " padded to " << (dshape[3] + 2 * param.pad[1])
+<< ")";
+if (param.pooling_convention == pool_enum::kValid) {
+  oshape[2] = 1 +
+  (dshape[2] + 2 * param.pad[0] - param.kernel[0]) /
+  param.stride[0];
+  oshape[3] = 1 +
+  (dshape[3] + 2 * param.pad[1] - param.kernel[1]) /
+  param.stride[1];
 } else {
 
 Review comment:
   Ignore this comment. I've misinterpreted the code.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-14 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174552138
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -130,35 +153,30 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   } else if (param.kernel.ndim() == 2) {
 CHECK_EQ(dshape.ndim(), 4U)
 << "Pooling: Input data should be 4D in (batch, channel, y, x)";
-if (param.global_pool) {
-  oshape[2] = 1;
-  oshape[3] = 1;
+CHECK(param.kernel[0] <= dshape[2] + 2 * param.pad[0])
+<< "kernel size (" << param.kernel[0] << ") exceeds input ("
+<< dshape[2] << " padded to " << (dshape[2] + 2 * param.pad[0])
+<< ")";
+CHECK(param.kernel[1] <= dshape[3] + 2 * param.pad[1])
+<< "kernel size (" << param.kernel[1] << ") exceeds input ("
+<< dshape[3] << " padded to " << (dshape[3] + 2 * param.pad[1])
+<< ")";
+if (param.pooling_convention == pool_enum::kValid) {
+  oshape[2] = 1 +
+  (dshape[2] + 2 * param.pad[0] - param.kernel[0]) /
+  param.stride[0];
+  oshape[3] = 1 +
+  (dshape[3] + 2 * param.pad[1] - param.kernel[1]) /
+  param.stride[1];
 } else {
 
 Review comment:
   The `if (param.global_pool)` is removed and the `else` should also be 
removed.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-14 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174551074
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -46,15 +46,20 @@ static void PoolingParamParser(nnvm::NodeAttrs *attrs) {
 if (param.stride.ndim() == 0) param.stride = Shape2(1, 1);
 if (param.pad.ndim() == 0) param.pad = Shape2(0, 0);
   } else {
-CHECK_EQ(param.kernel.ndim(), 3U) << param.kernel.ndim()
-   << "D pooling not supported";
+  // ignore kernel size only if global_pool not assigned false
+  if (param.global_pool == false) {
+CHECK_EQ(param.kernel.ndim(), 3U) << param.kernel.ndim()
+<< "D pooling not supported";
+  }
 if (param.stride.ndim() == 0) param.stride = Shape3(1, 1, 1);
 if (param.pad.ndim() == 0) param.pad = Shape3(0, 0, 0);
   }
-  CHECK_EQ(param.stride.ndim(), param.kernel.ndim())
-  << "stride and kernel should have the same length";
-  CHECK_EQ(param.pad.ndim(), param.kernel.ndim())
-  << "pad and kernel should have the same length";
+  if (param.global_pool == false) {
+CHECK_EQ(param.stride.ndim(), param.kernel.ndim())
+<< "stride and kernel should have the same length";
+CHECK_EQ(param.pad.ndim(), param.kernel.ndim())
+<< "pad and kernel should have the same length";
+  }
 
 Review comment:
   Simplify the logic here. If you have used global_pool, there is no need to 
check the stride, kernel or pad.
   ```c++
   if(! param.global_pool) {
   // CHECK kernel, pad, stride
   }
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-14 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174549092
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -98,9 +103,24 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   << "Pooling: Input data should be  3D in (batch, channel, x)"
   << " Or 4D in (batch, channel, y, x) "
   << " Or 5D in (batch, channel, d, y, x)";
+  CHECK_LE(dshape.ndim(), 5U)
+  << "Pooling: Input data should be  3D in (batch, channel, x)"
+  << " Or 4D in (batch, channel, y, x) "
+  << " Or 5D in (batch, channel, d, y, x)";
   TShape oshape = dshape;
   if (dshape.ndim() == 0) return false;
-  if (param.kernel.ndim() == 1) {
+  if (param.global_pool) {
+  if (dshape.ndim() == 3) {
+  oshape[2] = 1;
+  } else if (dshape.ndim() == 4) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  } else if (dshape.ndim() == 5) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  oshape[4] = 1;
+  }
 
 Review comment:
   There is no need to revise this function. All you need to do is to prepare 
the correct initial values. You can check the logic here 
https://github.com/apache/incubator-mxnet/blob/master/src/operator/nn/pooling-inl.h#L135-L149
 . If global_pool is used, kernel is set to be 
`TShape(ishape.data()+ishape.ndim()-param_.kernel.ndim(), 
ishape.data()+ishape.ndim())`, pad is set to be all 0 and stride is set to be 
all 1. You can write a function to do this. For example:
   ```c++
   TShape kernel = param_.kernel;
   TShape padding = param_.pad;
   TShape stride = param_.stride;
   if(param_.global_pool) {
  kernel = TShape(ishape.data()+ishape.ndim()-param_.kernel.ndim(), 
ishape.data()+ishape.ndim());
  padding = TShape(ishape.ndim() - 2);
  for(int i = 0; i < ishape.ndim() - 2; i++) {
 padding[i] = 0;
  }
  stride = TShape(ishape.ndim() - 2);
   }
   pool(s, in_data.dptr(), in_data.shape_, out_data.shape_,
kernel,
padding,
stride,
param_.pool_type, req, out_data.dptr());
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-13 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174234577
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -98,9 +103,24 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   << "Pooling: Input data should be  3D in (batch, channel, x)"
   << " Or 4D in (batch, channel, y, x) "
   << " Or 5D in (batch, channel, d, y, x)";
+  CHECK_LE(dshape.ndim(), 5U)
+  << "Pooling: Input data should be  3D in (batch, channel, x)"
+  << " Or 4D in (batch, channel, y, x) "
+  << " Or 5D in (batch, channel, d, y, x)";
   TShape oshape = dshape;
   if (dshape.ndim() == 0) return false;
-  if (param.kernel.ndim() == 1) {
+  if (param.global_pool) {
+  if (dshape.ndim() == 3) {
+  oshape[2] = 1;
+  } else if (dshape.ndim() == 4) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  } else if (dshape.ndim() == 5) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  oshape[4] = 1;
+  }
+  } else if (param.kernel.ndim() == 1) {
 CHECK_EQ(dshape.ndim(), 3U)
 << "Pooling: Input data should be 3D in (batch, channel, x)";
 if (param.global_pool) {
 
 Review comment:
   These `if` can be removed because the global_pool case is handled before.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-13 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174234503
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -98,9 +103,24 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   << "Pooling: Input data should be  3D in (batch, channel, x)"
   << " Or 4D in (batch, channel, y, x) "
   << " Or 5D in (batch, channel, d, y, x)";
+  CHECK_LE(dshape.ndim(), 5U)
+  << "Pooling: Input data should be  3D in (batch, channel, x)"
+  << " Or 4D in (batch, channel, y, x) "
+  << " Or 5D in (batch, channel, d, y, x)";
   TShape oshape = dshape;
   if (dshape.ndim() == 0) return false;
-  if (param.kernel.ndim() == 1) {
+  if (param.global_pool) {
+  if (dshape.ndim() == 3) {
+  oshape[2] = 1;
+  } else if (dshape.ndim() == 4) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  } else if (dshape.ndim() == 5) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  oshape[4] = 1;
+  }
 
 Review comment:
   Also, you can use a for-loop instead of if.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-13 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174233854
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -98,9 +103,24 @@ static bool PoolingShape(const nnvm::NodeAttrs ,
   << "Pooling: Input data should be  3D in (batch, channel, x)"
   << " Or 4D in (batch, channel, y, x) "
   << " Or 5D in (batch, channel, d, y, x)";
+  CHECK_LE(dshape.ndim(), 5U)
+  << "Pooling: Input data should be  3D in (batch, channel, x)"
+  << " Or 4D in (batch, channel, y, x) "
+  << " Or 5D in (batch, channel, d, y, x)";
   TShape oshape = dshape;
   if (dshape.ndim() == 0) return false;
-  if (param.kernel.ndim() == 1) {
+  if (param.global_pool) {
+  if (dshape.ndim() == 3) {
+  oshape[2] = 1;
+  } else if (dshape.ndim() == 4) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  } else if (dshape.ndim() == 5) {
+  oshape[2] = 1;
+  oshape[3] = 1;
+  oshape[4] = 1;
+  }
 
 Review comment:
   Need to push the oshape to `out_shape`. See 
https://github.com/apache/incubator-mxnet/blob/master/src/operator/nn/pooling.cc#L163-L168.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-12 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174021284
 
 

 ##
 File path: src/operator/nn/pooling.cc
 ##
 @@ -54,11 +54,13 @@ static void PoolingParamParser(nnvm::NodeAttrs *attrs) {
 if (param.stride.ndim() == 0) param.stride = Shape3(1, 1, 1);
 if (param.pad.ndim() == 0) param.pad = Shape3(0, 0, 0);
   }
-  CHECK_EQ(param.stride.ndim(), param.kernel.ndim())
-  << "stride and kernel should have the same length";
-  CHECK_EQ(param.pad.ndim(), param.kernel.ndim())
-  << "pad and kernel should have the same length";
-  attrs->parsed = std::move(param);
+  if (param.global_pool == false) {
+CHECK_EQ(param.stride.ndim(), param.kernel.ndim())
+<< "stride and kernel should have the same length";
+CHECK_EQ(param.pad.ndim(), param.kernel.ndim())
+<< "pad and kernel should have the same length";
+attrs->parsed = std::move(param);
 
 Review comment:
   We still need to parse the param


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-12 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r174019453
 
 

 ##
 File path: src/operator/nn/pooling-inl.h
 ##
 @@ -56,11 +56,11 @@ struct PoolingParam : public dmlc::Parameter 
{
 DMLC_DECLARE_FIELD(cudnn_off).set_default(false)
 .describe("Turn off cudnn pooling and use MXNet pooling operator. ");
 
-DMLC_DECLARE_FIELD(kernel)
+DMLC_DECLARE_FIELD(kernel).set_default(TShape())  // add default value here
 .enforce_nonzero()
 .describe("Pooling kernel size: (y, x) or (d, y, x)");
 
-DMLC_DECLARE_FIELD(pool_type)
+DMLC_DECLARE_FIELD(pool_type).set_default(pool_enum::kMaxPooling)  // add 
default pooling method
 .add_enum("max", pool_enum::kMaxPooling)
 .add_enum("avg", pool_enum::kAvgPooling)
 .add_enum("sum", pool_enum::kSumPooling)
 
 Review comment:
   There's no need to check this if global_pool is turned on.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] sxjscience commented on a change in pull request #10000: [MXNET-80] Fix average pooling kernel size assignment error

2018-03-12 Thread GitBox
sxjscience commented on a change in pull request #1: [MXNET-80] Fix average 
pooling kernel size assignment error
URL: https://github.com/apache/incubator-mxnet/pull/1#discussion_r173895781
 
 

 ##
 File path: tests/python/gpu/test_operator_gpu.py
 ##
 @@ -904,86 +904,87 @@ def test_1d_pooling(pool_type):
 kernel = (4,)
 pad = (2,)
 stride = (2,)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=False, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, 
pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pad=pad, stride=stride, 
pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
-sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
+sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, 
global_pool=True, cudnn_off=True, name='pool'))
-
+
 check_consistency(sym_list, ctx_list)
-
+
 def test_2d_pooling(pool_type):
 data = (2, 3, 20, 20)
 kernel = (4, 4)
 pad = (2, 2)
 stride = (2, 2)
-
+
 ctx_list = []
 sym_list = []
-
+
 pooling_convention = 'valid'
-
+
 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': 
{'pool_data': np.float32}})
 sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, 
stride=stride, pool_type=pool_type,
 
 Review comment:
   @CoinCheung, we can then change to append new tests in the list instead of 
replacing the current tests. We could change the code following the comment by 
@TaoLv 


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services