Repository: incubator-singa
Updated Branches:
  refs/heads/dev f07e3545c -> db5478efa


SINGA-174 Add Batch Normalization layer and Local Response Nomalization
layer.

Add Local Response Normalization layer inplementation in C++ language.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/db5478ef
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/db5478ef
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/db5478ef

Branch: refs/heads/dev
Commit: db5478efa051738ff3377908a8655201f9f9f18f
Parents: 96ed638
Author: WANG Ji <[email protected]>
Authored: Sat Jul 30 13:10:45 2016 +0800
Committer: WANG Ji <[email protected]>
Committed: Wed Aug 3 16:49:13 2016 +0800

----------------------------------------------------------------------
 src/model/layer/lrn.cc | 123 +++++++++++++++++++++++++++++++++++++-------
 test/singa/test_lrn.cc | 116 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 221 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/db5478ef/src/model/layer/lrn.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/lrn.cc b/src/model/layer/lrn.cc
index f17b743..a624147 100644
--- a/src/model/layer/lrn.cc
+++ b/src/model/layer/lrn.cc
@@ -19,8 +19,9 @@
 *
 ************************************************************/
 #include "lrn.h"
+#include <vector>
 
-namespace singa{
+namespace singa {
 RegisterLayerClass(LRN);
 void LRN::Setup(const Shape& in_sample, const LayerConf& conf) {
   Layer::Setup(in_sample, conf);
@@ -33,27 +34,113 @@ void LRN::Setup(const Shape& in_sample, const LayerConf& 
conf) {
 }
 
 const Tensor LRN::Forward(int flag, const Tensor& input) {
-  //Tensor output;
-  //const float salpha = alpha_ / local_size_;
-  LOG(FATAL) << "Not implemented";
-  /* Tensor API may be need
-   * 1. set
-   * template <typename Dtype>
-   * void Set(Dtype val);
-   *
-   * 2. axpy
-   * 3. padding
-   *
-   *
-   */
-  Tensor output;
+  Tensor x = input.Clone();
+  x.Reshape(Shape{input.shape(0), input.Size() / input.shape(0)});
+  vector<Tensor> channels, images;
+  // for each image
+  for (size_t i = 0; i < input.shape(0); ++i) {
+    Tensor image = CopyRows(x, i, i + 1);
+    image.Reshape(Shape{input.shape(1), input.shape(2) * input.shape(3)});
+    // for each channel of the image
+    channels.clear();
+    for (size_t c = 0; c < input.shape(1); ++c) {
+      Tensor window =
+          CopyRows(image, std::max(0, static_cast<int>(c) - local_size_ / 2),
+                   std::min(input.shape(1), c + local_size_ / 2 + 1));
+      window = Square(window);
+
+      Tensor tmp, ch;
+      tmp.Reshape(Shape{input.shape(2) * input.shape(3)});
+      SumRows(window, &tmp);
+
+      tmp *= alpha_;
+      tmp += k_;
+      tmp = Pow(tmp, beta_);
+
+      ch = CopyRows(image, c, c + 1);
+      ch = ch / tmp;
+      ch.Reshape(Shape{input.shape(2), input.shape(3)});
+      channels.push_back(ch);
+    }
+    Tensor normalized_image = ConcatenateRows(channels);
+    normalized_image.Reshape(
+        Shape{input.shape(1), input.shape(2) * input.shape(3)});
+    images.push_back(normalized_image);
+  }
+  Tensor output = ConcatenateRows(images);
+  output.Reshape(input.shape());
+  buf_.push(input);
+
   return output;
 }
 
-const std::pair<Tensor, vector<Tensor>> LRN::Backward(
-    int flag, const Tensor& grad) {
-  LOG(FATAL) << "Not implemented";
+const std::pair<Tensor, vector<Tensor>> LRN::Backward(int flag,
+                                                      const Tensor& grad) {
   Tensor dx;
+  if ((flag & kTrain) == kTrain) {
+    Tensor dy = grad.Clone();
+    dy.Reshape(Shape{grad.shape(0), grad.Size() / grad.shape(0)});
+    Tensor x = buf_.top();
+    buf_.pop();
+    x.Reshape(dy.shape());
+    vector<Tensor> channels, images;
+    // for each image
+    for (size_t i = 0; i < grad.shape(0); ++i) {
+      Tensor image = CopyRows(x, i, i + 1);
+      image.Reshape(Shape{grad.shape(1), grad.shape(2) * grad.shape(3)});
+      // for each channel of the image
+      channels.clear();
+      for (size_t c = 0; c < grad.shape(1); ++c) {
+        Tensor window =
+            CopyRows(image, std::max(0, static_cast<int>(c) - local_size_ / 2),
+                     std::min(grad.shape(1), c + local_size_ / 2 + 1));
+        Tensor tmp;
+        tmp.Reshape(Shape{grad.shape(2) * grad.shape(3)});
+        window = Square(window);
+        SumRows(window, &tmp);
+        tmp *= alpha_;
+        tmp += k_;
+        tmp.Reshape(Shape{grad.shape(2), grad.shape(3)});
+        channels.push_back(tmp);
+      }
+      Tensor norm_image = ConcatenateRows(channels);
+      norm_image.Reshape(Shape{grad.shape(1), grad.shape(2) * grad.shape(3)});
+      images.push_back(norm_image);
+    }
+    Tensor norm = ConcatenateRows(images);
+    norm.Reshape(dy.shape());
+    dx = Pow(norm, -beta_);
+    dx = dx * dy;
+    Tensor tmp = dx * x;
+    tmp = tmp / norm;
+    images.clear();
+    for (size_t i = 0; i < grad.shape(0); ++i) {
+      Tensor image = CopyRows(tmp, i, i + 1);
+      image.Reshape(Shape{grad.shape(1), grad.shape(2) * grad.shape(3)});
+      // for each channel of the image
+      channels.clear();
+      for (size_t c = 0; c < grad.shape(1); ++c) {
+        Tensor window =
+            CopyRows(image, std::max(0, static_cast<int>(c) - local_size_ / 2),
+                     std::min(grad.shape(1), c + local_size_ / 2 + 1));
+        Tensor tmpr;
+        tmpr.Reshape(Shape{grad.shape(2) * grad.shape(3)});
+        SumRows(window, &tmpr);
+        tmpr.Reshape(Shape{grad.shape(2), grad.shape(3)});
+        channels.push_back(tmpr);
+      }
+      Tensor pooled_image = ConcatenateRows(channels);
+      pooled_image.Reshape(Shape{grad.shape(1), grad.shape(2) * 
grad.shape(3)});
+      images.push_back(pooled_image);
+    }
+    Tensor tmp2 = ConcatenateRows(images);
+    tmp2 *= (-2.0f * beta_ * alpha_);
+    tmp2 = tmp2 * x;
+    dx = dx + tmp2;
+    dx.Reshape(grad.shape());
+  } else {
+    LOG(ERROR) << "Do not call backward for evaluation phase";
+  }
   vector<Tensor> param_grad;
   return std::make_pair(dx, param_grad);
 }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/db5478ef/test/singa/test_lrn.cc
----------------------------------------------------------------------
diff --git a/test/singa/test_lrn.cc b/test/singa/test_lrn.cc
new file mode 100644
index 0000000..5de4535
--- /dev/null
+++ b/test/singa/test_lrn.cc
@@ -0,0 +1,116 @@
+/*********************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+************************************************************/
+
+#include "../src/model/layer/lrn.h"
+#include "gtest/gtest.h"
+
+using namespace singa;
+
+TEST(LRN, Setup) {
+  LRN lrn;
+  EXPECT_EQ("LRN", lrn.layer_type());
+
+  LayerConf conf;
+  LRNConf *lrn_conf = conf.mutable_lrn_conf();
+  lrn_conf->set_k(1.0);
+  lrn_conf->set_local_size(3);
+  lrn_conf->set_alpha(0.1);
+  lrn_conf->set_beta(0.75);
+  lrn.Setup(Shape{1}, conf);
+
+  EXPECT_FLOAT_EQ(1.0, lrn.k());
+  EXPECT_EQ(3, lrn.local_size());
+  EXPECT_FLOAT_EQ(0.1, lrn.alpha());
+  EXPECT_FLOAT_EQ(0.75, lrn.beta());
+}
+
+TEST(LRN, Forward) {
+  LRN lrn;
+  const float x[] = {1, 2, 3, 4, 5, 6, 7, 8};
+  Tensor in(Shape{2, 4, 1, 1});
+  in.CopyDataFromHostPtr(x, 8);
+
+  singa::LayerConf conf;
+  singa::LRNConf *lrn_conf = conf.mutable_lrn_conf();
+  lrn_conf->set_k(1.0);
+  lrn_conf->set_local_size(3);
+  lrn_conf->set_alpha(0.1);
+  lrn_conf->set_beta(0.75);
+  lrn.Setup(Shape{4, 1, 1}, conf);
+
+  Tensor out = lrn.Forward(kTrain, in);
+  const float *outptr = out.data<float>();
+  const auto &shape = out.shape();
+  EXPECT_EQ(4u, shape.size());
+  EXPECT_EQ(2u, shape[0]);
+  EXPECT_EQ(4u, shape[1]);
+  EXPECT_EQ(1u, shape[2]);
+  EXPECT_EQ(1u, shape[3]);
+
+  EXPECT_NEAR(0.737787, outptr[0], 1e-6f);
+  EXPECT_NEAR(1.037221, outptr[1], 1e-6f);
+  EXPECT_NEAR(1.080992, outptr[2], 1e-6f);
+  EXPECT_NEAR(1.563179, outptr[3], 1e-6f);
+  EXPECT_NEAR(1.149545, outptr[4], 1e-6f);
+  EXPECT_NEAR(0.930604, outptr[5], 1e-6f);
+  EXPECT_NEAR(0.879124, outptr[6], 1e-6f);
+  EXPECT_NEAR(1.218038, outptr[7], 1e-6f);
+}
+
+TEST(LRN, Backward) {
+  LRN lrn;
+  const float x[] = {1, 2, 3, 4, 5, 6, 7, 8};
+  Tensor in(Shape{2, 4, 1, 1});
+  in.CopyDataFromHostPtr(x, 8);
+
+  singa::LayerConf conf;
+  singa::LRNConf *lrn_conf = conf.mutable_lrn_conf();
+  lrn_conf->set_k(1.0);
+  lrn_conf->set_local_size(3);
+  lrn_conf->set_alpha(0.1);
+  lrn_conf->set_beta(0.75);
+  lrn.Setup(Shape{4, 1, 1}, conf);
+
+  Tensor out = lrn.Forward(kTrain, in);
+
+  const float dy_arr[] = {8, 7, 6, 5, 4, 3, 2, 1};
+  Tensor dy(Shape{2, 4, 1, 1});
+  dy.CopyDataFromHostPtr(dy_arr, 8);
+
+  const auto ret = lrn.Backward(singa::kTrain, dy);
+  singa::Tensor dx = ret.first;
+  const float *dxptr = dx.data<float>();
+  const auto &shape = dx.shape();
+  EXPECT_EQ(4u, shape.size());
+  EXPECT_EQ(2u, shape[0]);
+  EXPECT_EQ(4u, shape[1]);
+  EXPECT_EQ(1u, shape[2]);
+  EXPECT_EQ(1u, shape[3]);
+
+  EXPECT_NEAR(4.858288752f, dxptr[0], 1e-6f);
+  EXPECT_NEAR(1.04332631f, dxptr[1], 1e-6f);
+  EXPECT_NEAR(-0.952648779f, dxptr[2], 1e-6f);
+  EXPECT_NEAR(-0.38373312f, dxptr[3], 1e-6f);
+  EXPECT_NEAR(0.259424615f, dxptr[4], 1e-6f);
+  EXPECT_NEAR(-0.426475393f, dxptr[5], 1e-6f);
+  EXPECT_NEAR(-0.213195118f, dxptr[6], 1e-6f);
+  EXPECT_NEAR(-0.099276183f, dxptr[7], 1e-6f);
+}

Reply via email to