SINGA-51 Improve the convolution and pooling operations
minor changes
- format test_common.cc
- format include patterns in driver, layers
all cc file include own h file in first line
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/d5d817e1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/d5d817e1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/d5d817e1
Branch: refs/heads/master
Commit: d5d817e145b1b8d60d8ae3781c24aa0916f4ddf7
Parents: e769142
Author: wang sheng <[email protected]>
Authored: Mon Sep 14 16:57:10 2015 +0800
Committer: wang sheng <[email protected]>
Committed: Mon Sep 14 16:57:10 2015 +0800
----------------------------------------------------------------------
include/driver.h | 9 +-
include/neuralnet/connection_layer.h | 7 +-
include/neuralnet/input_layer.h | 2 +
include/neuralnet/layer.h | 6 +-
include/neuralnet/loss_layer.h | 5 +-
include/neuralnet/neuron_layer.h | 4 +-
include/neuralnet/output_layer.h | 2 +
src/driver.cc | 4 +-
src/neuralnet/connection_layer.cc | 6 +-
src/neuralnet/input_layer.cc | 7 +-
src/neuralnet/layer.cc | 2 +
src/neuralnet/loss_layer.cc | 7 +-
src/neuralnet/neuron_layer.cc | 21 ++--
src/neuralnet/output_layer.cc | 3 +-
src/test/test_common.cc | 169 +++++++++++++++---------------
src/utils/common.cc | 17 ++-
16 files changed, 142 insertions(+), 129 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/driver.h
----------------------------------------------------------------------
diff --git a/include/driver.h b/include/driver.h
index 5a9ddfc..b16cef3 100644
--- a/include/driver.h
+++ b/include/driver.h
@@ -1,5 +1,6 @@
#ifndef SINGA_DRIVER_H_
#define SINGA_DRIVER_H_
+
#include "singa.h"
namespace singa {
@@ -110,24 +111,28 @@ int Driver::RegisterParam(const Type& type) {
factory->Register(type, CreateInstance(Subclass, Param));
return 1;
}
+
template<typename Subclass, typename Type>
int Driver::RegisterParamGenerator(const Type& type) {
auto factory = Singleton<Factory<singa::ParamGenerator>>::Instance();
factory->Register(type, CreateInstance(Subclass, ParamGenerator));
return 1;
}
+
template<typename Subclass, typename Type>
int Driver::RegisterUpdater(const Type& type) {
auto factory = Singleton<Factory<singa::Updater>>::Instance();
factory->Register(type, CreateInstance(Subclass, Updater));
return 1;
}
+
template<typename Subclass, typename Type>
int Driver::RegisterLRGenerator(const Type& type) {
auto factory = Singleton<Factory<singa::LRGenerator>>::Instance();
factory->Register(type, CreateInstance(Subclass, LRGenerator));
return 1;
}
+
template<typename Subclass, typename Type>
int Driver::RegisterWorker(const Type& type) {
auto factory = Singleton<Factory<singa::Worker>>::Instance();
@@ -135,8 +140,6 @@ int Driver::RegisterWorker(const Type& type) {
return 1;
}
-
} // namespace singa
-#endif // SINGA_DRIVER_H_
-
+#endif // SINGA_DRIVER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/connection_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/connection_layer.h
b/include/neuralnet/connection_layer.h
index e44f4f3..233714d 100644
--- a/include/neuralnet/connection_layer.h
+++ b/include/neuralnet/connection_layer.h
@@ -1,5 +1,7 @@
#ifndef SINGA_NEURALNET_CONNECTION_LAYER_H_
#define SINGA_NEURALNET_CONNECTION_LAYER_H_
+
+#include <vector>
#include "neuralnet/layer.h"
/**
@@ -120,6 +122,7 @@ class SplitLayer : public ConnectionLayer {
protected:
Blob<float> grads_;
};
-}
-// namespace singa
+
+} // namespace singa
+
#endif // SINGA_NEURALNET_CONNECTION_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/input_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/input_layer.h b/include/neuralnet/input_layer.h
index 62595c6..67af2eb 100644
--- a/include/neuralnet/input_layer.h
+++ b/include/neuralnet/input_layer.h
@@ -1,6 +1,7 @@
#ifndef SINGA_NEURALNET_INPUT_LAYER_H_
#define SINGA_NEURALNET_INPUT_LAYER_H_
+#include <string>
#include <vector>
#include "neuralnet/layer.h"
#include "utils/data_shard.h"
@@ -165,6 +166,7 @@ class PrefetchLayer : public Layer {
protected:
std::thread thread_;
};
+
} // namespace singa
#endif // SINGA_NEURALNET_INPUT_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/layer.h b/include/neuralnet/layer.h
index 56201f5..4f153d3 100644
--- a/include/neuralnet/layer.h
+++ b/include/neuralnet/layer.h
@@ -5,7 +5,6 @@
#include <string>
#include <thread>
#include <vector>
-
#include "proto/common.pb.h"
#include "proto/job.pb.h"
#include "utils/common.h"
@@ -13,8 +12,6 @@
#include "utils/param.h"
namespace singa {
-using std::vector;
-using std::string;
/**
* Base layer class.
@@ -207,10 +204,11 @@ class LossLayer : public Layer {
};
} // namespace singa
+
#include "neuralnet/connection_layer.h"
#include "neuralnet/input_layer.h"
#include "neuralnet/loss_layer.h"
#include "neuralnet/neuron_layer.h"
#include "neuralnet/output_layer.h"
-#endif // SINGA_NEURALNET_BASE_LAYER_H_
+#endif // SINGA_NEURALNET_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/loss_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/loss_layer.h b/include/neuralnet/loss_layer.h
index 8358bd6..c9f6681 100644
--- a/include/neuralnet/loss_layer.h
+++ b/include/neuralnet/loss_layer.h
@@ -41,6 +41,7 @@ class SoftmaxLossLayer : public LossLayer {
float scale_;
int topk_;
};
-}
-// namespace singa
+
+} // namespace singa
+
#endif // SINGA_NEURALNET_LOSS_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/neuron_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/neuron_layer.h b/include/neuralnet/neuron_layer.h
index dd45eec..86b55a3 100644
--- a/include/neuralnet/neuron_layer.h
+++ b/include/neuralnet/neuron_layer.h
@@ -1,9 +1,10 @@
#ifndef SINGA_NEURALNET_NEURON_LAYER_H_
#define SINGA_NEURALNET_NEURON_LAYER_H_
-#include <vector>
+#include <vector>
#include "neuralnet/layer.h"
#include "proto/job.pb.h"
+
/**
* \file this file includes the declarations neuron layer classes that conduct
* the transformation of features.
@@ -221,7 +222,6 @@ class RBMHidLayer: public RBMLayer {
RBMLayer *vis_layer_;
};
-
} // namespace singa
#endif // SINGA_NEURALNET_NEURON_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/include/neuralnet/output_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/output_layer.h b/include/neuralnet/output_layer.h
index c507e1c..ac83d00 100644
--- a/include/neuralnet/output_layer.h
+++ b/include/neuralnet/output_layer.h
@@ -1,4 +1,6 @@
#ifndef SINGA_NEURALNET_OUTPUT_LAYER_H_
#define SINGA_NEURALNET_OUTPUT_LAYER_H_
+
// currently no output sub-classes are defined
+
#endif // SINGA_NEURALNET_OUTPUT_LAYER_H_
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/driver.cc
----------------------------------------------------------------------
diff --git a/src/driver.cc b/src/driver.cc
index a891a08..88bf4aa 100644
--- a/src/driver.cc
+++ b/src/driver.cc
@@ -1,10 +1,8 @@
+#include "driver.h"
#include <cblas.h>
#include <glog/logging.h>
#include <string>
-
-#include "singa.h"
-
#include "utils/tinydir.h"
namespace singa {
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/connection_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer.cc
b/src/neuralnet/connection_layer.cc
index a3d0a75..e247161 100644
--- a/src/neuralnet/connection_layer.cc
+++ b/src/neuralnet/connection_layer.cc
@@ -1,7 +1,9 @@
-#include <vector>
-#include "neuralnet/layer.h"
+#include "neuralnet/connection_layer.h"
namespace singa {
+
+using std::vector;
+
/************* Implementation for ConcateLayer ***********/
void ConcateLayer::Setup(const LayerProto& proto, int npartitions) {
// CHECK_EQ(npartitions, 1);
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/input_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/input_layer.cc b/src/neuralnet/input_layer.cc
index b1c6986..f7167da 100644
--- a/src/neuralnet/input_layer.cc
+++ b/src/neuralnet/input_layer.cc
@@ -1,8 +1,7 @@
-#include <vector>
-#include <string>
+#include "neuralnet/input_layer.h"
-#include "neuralnet/layer.h"
#include "mshadow/tensor.h"
+
namespace singa {
using namespace mshadow;
@@ -10,6 +9,8 @@ using mshadow::cpu;
using mshadow::Shape4;
using mshadow::Tensor;
+using std::string;
+using std::vector;
/************* Implementation for ParserLayer ***********/
void ParserLayer::ComputeFeature(int flag, Metric *perf) {
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/layer.cc b/src/neuralnet/layer.cc
index 7e2e107..f38d592 100644
--- a/src/neuralnet/layer.cc
+++ b/src/neuralnet/layer.cc
@@ -9,6 +9,8 @@
namespace singa {
+using std::string;
+
Layer* Layer::Create(const LayerProto& proto) {
auto* factory = Singleton<Factory<Layer>>::Instance();
Layer* layer = nullptr;
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/loss_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/loss_layer.cc b/src/neuralnet/loss_layer.cc
index 118456a..f9b80a9 100644
--- a/src/neuralnet/loss_layer.cc
+++ b/src/neuralnet/loss_layer.cc
@@ -1,9 +1,10 @@
+#include "neuralnet/loss_layer.h"
+
#include <glog/logging.h>
-#include "neuralnet/layer.h"
#include "mshadow/tensor.h"
-
namespace singa {
+
using namespace mshadow;
using mshadow::cpu;
@@ -14,6 +15,8 @@ using mshadow::Shape3;
using mshadow::Shape4;
using mshadow::Tensor;
+using std::string;
+using std::vector;
/********** * Implementation for EuclideanLossLayer*************************/
void EuclideanLossLayer::ComputeFeature(int flag, Metric* perf) {
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/neuron_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/neuron_layer.cc b/src/neuralnet/neuron_layer.cc
index edfa022..a98b40d 100644
--- a/src/neuralnet/neuron_layer.cc
+++ b/src/neuralnet/neuron_layer.cc
@@ -1,10 +1,11 @@
+#include "neuralnet/neuron_layer.h"
+
#include <glog/logging.h>
#include <algorithm>
-
-#include "neuralnet/layer.h"
#include "utils/singleton.h"
#include "mshadow/tensor.h"
#include "mshadow/cxxnet_op.h"
+
namespace singa {
using namespace mshadow;
@@ -165,7 +166,7 @@ void CConvolutionLayer::ComputeGradient(int flag, Metric*
perf) {
if (gsrcblob != nullptr)
gsrc.dptr = gsrcblob->mutable_cpu_data();
gbias = expr::sumall_except_dim<1>(grad);
- for(int n = 0; n < batchsize_; n++) {
+ for (int n = 0; n < batchsize_; n++) {
Im2col(src[n].dptr, channels_, height_, width_,
kernel_, kernel_, pad_, pad_, stride_, stride_, col.dptr);
gweight += dot(grad[n], col.T());
@@ -230,7 +231,7 @@ Blob<float>* RBMLayer::Sample(int flag) {
&sample_ : &neg_sample_;
}
void RBMLayer::Setup(const LayerProto& proto, int npartitions) {
- CHECK_EQ(npartitions, 1); // TODO test for npartitions > 1
+ CHECK_EQ(npartitions, 1); // TODO(wangwei) test for npartitions > 1
Layer::Setup(proto, npartitions);
hdim_ = proto.rbm_conf().hdim();
gaussian_ = proto.rbm_conf().gaussian();
@@ -523,15 +524,15 @@ void PoolingLayer::ComputeGradient(int flag, Metric*
perf) {
void CPoolingLayer::Setup(const LayerProto& proto, int npartitions) {
PoolingLayer::Setup(proto, npartitions);
- if(pool_ == PoolingProto_PoolMethod_MAX)
- mask_.ReshapeLike(data_);
+ if (pool_ == PoolingProto_PoolMethod_MAX)
+ mask_.ReshapeLike(data_);
}
void CPoolingLayer::ComputeFeature(int flag, Metric* perf) {
- if(pool_ == PoolingProto_PoolMethod_MAX)
+ if (pool_ == PoolingProto_PoolMethod_MAX)
ForwardMaxPooling(srclayers_[0]->mutable_data(this)->mutable_cpu_data(),
batchsize_, channels_, height_, width_, kernel_, kernel_, pad_, pad_,
stride_, stride_, data_.mutable_cpu_data(), mask_.mutable_cpu_data());
- else if(pool_ == PoolingProto_PoolMethod_AVG)
+ else if (pool_ == PoolingProto_PoolMethod_AVG)
ForwardAvgPooling(srclayers_[0]->mutable_data(this)->mutable_cpu_data(),
batchsize_, channels_, height_, width_, kernel_, kernel_, pad_, pad_,
stride_, stride_, data_.mutable_cpu_data());
@@ -540,11 +541,11 @@ void CPoolingLayer::ComputeFeature(int flag, Metric*
perf) {
}
void CPoolingLayer::ComputeGradient(int flag, Metric* perf) {
- if(pool_ == PoolingProto_PoolMethod_MAX)
+ if (pool_ == PoolingProto_PoolMethod_MAX)
BackwardMaxPooling(grad_.cpu_data(), mask_.cpu_data(), batchsize_,
channels_, height_, width_, kernel_, kernel_, pad_, pad_,
stride_,
stride_,srclayers_[0]->mutable_grad(this)->mutable_cpu_data());
- else if(pool_ == PoolingProto_PoolMethod_AVG)
+ else if (pool_ == PoolingProto_PoolMethod_AVG)
BackwardAvgPooling(grad_.cpu_data(), batchsize_,
channels_, height_, width_, kernel_, kernel_, pad_, pad_,
stride_,
stride_,srclayers_[0]->mutable_grad(this)->mutable_cpu_data());
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/neuralnet/output_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/output_layer.cc b/src/neuralnet/output_layer.cc
index dfc547b..535480e 100644
--- a/src/neuralnet/output_layer.cc
+++ b/src/neuralnet/output_layer.cc
@@ -2,5 +2,4 @@
namespace singa {
-
-}
+} // namespace singa
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/test/test_common.cc
----------------------------------------------------------------------
diff --git a/src/test/test_common.cc b/src/test/test_common.cc
index b84e860..e30c9cb 100644
--- a/src/test/test_common.cc
+++ b/src/test/test_common.cc
@@ -1,113 +1,112 @@
-#include "gtest/gtest.h"
-#include "utils/common.h"
-#include <unordered_map>
#include <string>
+#include <unordered_map>
#include <vector>
+#include "gtest/gtest.h"
+#include "utils/common.h"
using std::string;
using std::vector;
using namespace singa;
TEST(CommonTest, TestIntVecToString) {
-
- vector<int> num_vec {2, 3, 5, 7, 11};
- string str = "(2, 3, 5, 7, 11, )";
- ASSERT_EQ(str, IntVecToString(num_vec));
+ vector<int> num_vec {2, 3, 5, 7, 11};
+ string str = "(2, 3, 5, 7, 11, )";
+ ASSERT_EQ(str, IntVecToString(num_vec));
}
TEST(CommonTest, TestStringPrintf) {
- const char* str_a = "abc";
- const char* str_b = "edfgh";
- const char* str_c = " !@#";
- const char* str_d = "1";
- const char* str_e = "2";
- const char* str_f = "3";
+ const char* str_a = "abc";
+ const char* str_b = "edfgh";
+ const char* str_c = " !@#";
+ const char* str_d = "1";
+ const char* str_e = "2";
+ const char* str_f = "3";
- string fmt_a = "%s%s%s";
- string fmt_b = "[%s] [%s] [%s] ";
+ string fmt_a = "%s%s%s";
+ string fmt_b = "[%s] [%s] [%s] ";
- string str_d_a = "abcedfgh !@#";
- string str_d_b = "[1] [2] [3] ";
+ string str_d_a = "abcedfgh !@#";
+ string str_d_b = "[1] [2] [3] ";
- ASSERT_EQ(str_d_a, StringPrintf(fmt_a, str_a, str_b, str_c));
- ASSERT_EQ(str_d_b, StringPrintf(fmt_b, str_d, str_e, str_f));
+ ASSERT_EQ(str_d_a, StringPrintf(fmt_a, str_a, str_b, str_c));
+ ASSERT_EQ(str_d_b, StringPrintf(fmt_b, str_d, str_e, str_f));
}
TEST(CommonTest, TestGCDLCM) {
- int a = 2, b = 5, c = 10, d = 15;
+ int a = 2, b = 5, c = 10, d = 15;
- ASSERT_EQ(1, gcd(a, b));
- ASSERT_EQ(5, gcd(c, d));
- ASSERT_EQ(10, LeastCommonMultiple(b, c));
- ASSERT_EQ(30, LeastCommonMultiple(c, d));
+ ASSERT_EQ(1, gcd(a, b));
+ ASSERT_EQ(5, gcd(c, d));
+ ASSERT_EQ(10, LeastCommonMultiple(b, c));
+ ASSERT_EQ(30, LeastCommonMultiple(c, d));
}
TEST(CommonTest, TestMetric) {
- string str, msg;
- Metric metric;
- metric.Add("a", 0.5);
- metric.Add("b", 0.5);
- metric.Add("a", 1.5);
- str = metric.ToLogString();
- msg = metric.ToString();
- metric.Reset();
- metric.ParseFrom(msg);
- ASSERT_EQ(str, metric.ToLogString());
+ string str, msg;
+ Metric metric;
+ metric.Add("a", 0.5);
+ metric.Add("b", 0.5);
+ metric.Add("a", 1.5);
+ str = metric.ToLogString();
+ msg = metric.ToString();
+ metric.Reset();
+ metric.ParseFrom(msg);
+ ASSERT_EQ(str, metric.ToLogString());
}
TEST(CommonTest, TestSlice) {
- vector<vector<int>> slices_0;
- vector<int> sizes {14112, 96, 256, 884736, 384};
- ASSERT_EQ(slices_0, Slice(0, sizes));
-
- vector<vector<int>> slices_1 {
- { 14112 },
- { 96 },
- { 256 },
- { 884736 },
- { 384 },
- };
-
- vector<vector<int>> slices_2 {
- { 14112 },
- { 96 },
- { 256 },
- { 435328, 449408 },
- { 384 },
- };
-
- vector<vector<int>> slices_4 {
- { 14112 },
- { 96 },
- { 256 },
- { 210432,224896,224896,224512 },
- { 384 },
- };
-
- vector<vector<int>> slices_8 {
- { 14112 },
- { 96 },
- { 256 },
- { 97984,112448,112448,112448,112448,112448,112448,112064 },
- { 384 },
- };
-
- ASSERT_EQ(slices_1, Slice(1, sizes));
- ASSERT_EQ(slices_2, Slice(2, sizes));
- ASSERT_EQ(slices_4, Slice(4, sizes));
- ASSERT_EQ(slices_8, Slice(8, sizes));
+ vector<vector<int>> slices_0;
+ vector<int> sizes {14112, 96, 256, 884736, 384};
+ ASSERT_EQ(slices_0, Slice(0, sizes));
+
+ vector<vector<int>> slices_1 {
+ {14112},
+ {96},
+ {256},
+ {884736},
+ {384},
+ };
+
+ vector<vector<int>> slices_2 {
+ {14112},
+ {96},
+ {256},
+ {435328, 449408},
+ {384},
+ };
+
+ vector<vector<int>> slices_4 {
+ {14112},
+ {96},
+ {256},
+ {210432, 224896, 224896, 224512},
+ {384},
+ };
+
+ vector<vector<int>> slices_8 {
+ {14112},
+ {96},
+ {256},
+ {97984, 112448, 112448, 112448, 112448, 112448, 112448, 112064},
+ {384},
+ };
+
+ ASSERT_EQ(slices_1, Slice(1, sizes));
+ ASSERT_EQ(slices_2, Slice(2, sizes));
+ ASSERT_EQ(slices_4, Slice(4, sizes));
+ ASSERT_EQ(slices_8, Slice(8, sizes));
}
TEST(CommonTest, TestPartitionSlices) {
- vector<int> slices {
- 97984,112448,112448,112448,112448,112448,112448,112064
- };
- vector<int> box_1 { 0, 0, 0, 0, 0, 0, 0, 0 };
- vector<int> box_2 { 0, 0, 0, 0, 1, 1, 1, 1 };
- vector<int> box_4 { 0, 0, 1, 1, 2, 2, 3, 3 };
- vector<int> box_8 { 0, 1, 2, 3, 4, 5, 6, 7 };
- ASSERT_EQ(box_1, PartitionSlices(1, slices));
- ASSERT_EQ(box_2, PartitionSlices(2, slices));
- ASSERT_EQ(box_4, PartitionSlices(4, slices));
- ASSERT_EQ(box_8, PartitionSlices(8, slices));
+ vector<int> slices {
+ 97984, 112448, 112448, 112448, 112448, 112448, 112448, 112064
+ };
+ vector<int> box_1 {0, 0, 0, 0, 0, 0, 0, 0};
+ vector<int> box_2 {0, 0, 0, 0, 1, 1, 1, 1};
+ vector<int> box_4 {0, 0, 1, 1, 2, 2, 3, 3};
+ vector<int> box_8 {0, 1, 2, 3, 4, 5, 6, 7};
+ ASSERT_EQ(box_1, PartitionSlices(1, slices));
+ ASSERT_EQ(box_2, PartitionSlices(2, slices));
+ ASSERT_EQ(box_4, PartitionSlices(4, slices));
+ ASSERT_EQ(box_8, PartitionSlices(8, slices));
}
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d5d817e1/src/utils/common.cc
----------------------------------------------------------------------
diff --git a/src/utils/common.cc b/src/utils/common.cc
index 3c3dc39..4cf9a89 100644
--- a/src/utils/common.cc
+++ b/src/utils/common.cc
@@ -357,8 +357,8 @@ void ForwardMaxPooling(const float* bottom, const int num,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* top, float* mask) {
- int top_height = (height + pad_h * 2 -kernel_h ) / stride_h + 1;
- int top_width = (width + pad_w * 2 -kernel_w ) / stride_w + 1;
+ int top_height = (height + pad_h * 2 -kernel_h) / stride_h + 1;
+ int top_width = (width + pad_w * 2 -kernel_w) / stride_w + 1;
int top_count = num * top_height * top_width * channels;
for (int i = 0; i < top_count; i++) {
mask[i] = -1;
@@ -402,8 +402,8 @@ void BackwardMaxPooling(const float* top, const float*
mask, const int num,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
float* bottom) {
- int top_height = (height + pad_h * 2 -kernel_h ) / stride_h + 1;
- int top_width = (width + pad_w * 2 -kernel_w ) / stride_w + 1;
+ int top_height = (height + pad_h * 2 -kernel_h) / stride_h + 1;
+ int top_width = (width + pad_w * 2 -kernel_w) / stride_w + 1;
const int top_offset = top_height * top_width;
const int bottom_offset = height * width;
memset(bottom, 0, sizeof(float) * num * channels * bottom_offset);
@@ -427,8 +427,8 @@ void ForwardAvgPooling(const float* bottom, const int num,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* top) {
- int top_height = (height + pad_h * 2 -kernel_h ) / stride_h + 1;
- int top_width = (width + pad_w * 2 -kernel_w ) / stride_w + 1;
+ int top_height = (height + pad_h * 2 -kernel_h) / stride_h + 1;
+ int top_width = (width + pad_w * 2 -kernel_w) / stride_w + 1;
int top_count = num * top_height * top_width * channels;
for (int i = 0; i < top_count; i++) {
top[i] = 0;
@@ -470,8 +470,8 @@ void BackwardAvgPooling(const float* top, const int num,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* bottom) {
- int top_height = (height + pad_h * 2 -kernel_h ) / stride_h + 1;
- int top_width = (width + pad_w * 2 -kernel_w ) / stride_w + 1;
+ int top_height = (height + pad_h * 2 -kernel_h) / stride_h + 1;
+ int top_width = (width + pad_w * 2 -kernel_w) / stride_w + 1;
const int top_offset = top_height * top_width;
const int bottom_offset = height * width;
memset(bottom, 0, sizeof(float) * num * channels * bottom_offset);
@@ -495,7 +495,6 @@ void BackwardAvgPooling(const float* top, const int num,
const int channels,
bottom[index] += top[top_index] / pool_size;
}
}
-
}
}
top += top_offset;