http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/neuralnet/input_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/input_layer.cc b/src/neuralnet/input_layer.cc
index f89369c..b4743f4 100644
--- a/src/neuralnet/input_layer.cc
+++ b/src/neuralnet/input_layer.cc
@@ -20,9 +20,9 @@
 *************************************************************/
 
 #include "neuralnet/input_layer.h"
-
 #include "mshadow/tensor.h"
-
+#include "utils/image_transform.h"
+#include "utils/tokenizer.h"
 namespace singa {
 
 using namespace mshadow;
@@ -33,6 +33,219 @@ using mshadow::Tensor;
 using std::string;
 using std::vector;
 
+/*****************ImagePreprocess**************************************/
+void ImagePreprocessLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  CHECK_EQ(srclayers.size(), 1);
+  InputLayer::Setup(conf, srclayers);
+  scale_ = conf.rgbimage_conf().scale();
+  cropsize_ = conf.rgbimage_conf().cropsize();
+  mirror_ = conf.rgbimage_conf().mirror();
+  const auto& src = srclayers.at(0)->data(this);
+  const auto& shape = src.shape();
+  CHECK_EQ(shape.size(), 4);
+  CHECK_EQ(shape.at(2), shape.at(3));
+  if (cropsize_ != 0 && cropsize_ != shape.at(2)) {
+    data_.Reshape(vector<int>{shape.at(0), shape.at(1), cropsize_, cropsize_});
+  } else {
+    data_ = src;
+  }
+}
+
+void ImagePreprocessLayer::ComputeFeature(int flag,
+    const vector<Layer*>& srclayers) {
+  const auto& srcdata = srclayers.at(0)->data(this);
+  int batchsize = srcdata.shape()[0], channel = srcdata.shape()[1];
+  int height = srcdata.shape()[2], width = srcdata.shape()[3];
+  const float* srcdptr = srcdata.cpu_data();
+  float* dptr = data_.mutable_cpu_data();
+  int srcimage_size = channel * height * width;
+  int image_size = channel * data_.shape()[2] * data_.shape()[3];
+  for (int k = 0; k < batchsize; k++) {
+    int h_offset = 0, w_offset = 0;
+    if (cropsize_> 0 && ((flag & kTrain) == kTrain)) {
+      h_offset = rand() % (srcdata.shape()[1] - cropsize_);
+      w_offset = rand() % (srcdata.shape()[2] - cropsize_);
+    }
+    bool do_mirror = mirror_ && rand() % 2 && ((flag & kTrain) == kTrain);
+    ImageTransform(srcdptr + k * srcimage_size, nullptr, do_mirror, cropsize_,
+        cropsize_, h_offset, w_offset, srcdata.shape()[1], height, width,
+        scale_, dptr + image_size);
+  }
+}
+
+/*************StoreInputLayer******************/
+StoreInputLayer::~StoreInputLayer() {
+  if (store_ != nullptr) {
+    delete store_;
+  }
+}
+
+void StoreInputLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  InputLayer::Setup(conf, srclayers);
+  batchsize_ = conf.store_conf().batchsize();
+}
+
+void StoreInputLayer::ComputeFeature(int flag,
+    const vector<Layer*>& srclayers) {
+  string key, val;
+  if (store_ == nullptr) {
+    store_ = io::OpenStore(layer_conf_.store_conf().backend(),
+                             layer_conf_.store_conf().path(),
+                             io::kRead);
+  }
+  for (int k = 0; k < batchsize_; k++){
+    if (!store_->Read(&key, &val)) {
+      store_->SeekToFirst();
+      CHECK(store_->Read(&key, &val));
+    }
+    // TODO(wangwei) random skip and shuffle among this mini-batch
+    Parse(k, flag, key, val);
+  }
+}
+/*********SingleLabelRecordLayer******************/
+void SingleLabelRecordLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  StoreInputLayer::Setup(conf, srclayers);
+
+  vector<int> shape {batchsize_};
+  for (int s : conf.store_conf().shape())
+    shape.push_back(s);
+  data_.Reshape(shape);
+  aux_data_.resize(batchsize_);
+}
+void SingleLabelRecordLayer::ComputeFeature(int flag,
+    const vector<Layer*>& srclayers) {
+  StoreInputLayer::ComputeFeature(flag, srclayers);
+
+  auto& store_conf = layer_conf_.store_conf();
+  if (store_conf.has_mean_file() && mean_.count() == 0) {
+    mean_.Reshape(vector<int>{data_.count() / batchsize_});
+    LoadRecord(store_conf.backend(), store_conf.mean_file(), &mean_);
+  } else if (store_conf.has_mean_value() && mean_.count() == 0) {
+    mean_.Reshape(vector<int>{data_.count() / batchsize_});
+    for (int i = 0; i < data_.count() / batchsize_; i++)
+      mean_.mutable_cpu_data()[i] = store_conf.mean_value();
+  }
+  if (store_conf.has_std_file() && std_.count() == 0) {
+    std_.Reshape(vector<int>{data_.count() / batchsize_});
+    LoadRecord(store_conf.backend(), store_conf.std_file(), &std_);
+    // TODO(wangwei) check std[i] != 0
+  } else if (store_conf.has_std_value() && std_.count() == 0) {
+    std_.Reshape(vector<int>{data_.count() / batchsize_});
+    CHECK_NE(store_conf.std_value(), 0);
+    for (int i = 0; i < data_.count() / batchsize_; i++)
+      std_.mutable_cpu_data()[i] = store_conf.std_value();
+  }
+
+  if (mean_.count()) {
+    const float* mean = mean_.cpu_data();
+    for (int k = 0; k < batchsize_; k++){
+      float* dptr = data_.mutable_cpu_data() + k * mean_.count();
+      for (int i = 0; i < mean_.count(); i++) {
+        dptr[i] -= mean[i];
+      }
+    }
+  }
+  if (std_.count()) {
+    const float* std = std_.cpu_data();
+    for (int k = 0; k < batchsize_; k++){
+      float* dptr = data_.mutable_cpu_data() + k * std_.count();
+      for (int i = 0; i < std_.count(); i++) {
+        dptr[i] /= std[i];
+      }
+    }
+  }
+}
+/*****************CSVRecordLayer*******************/
+void CSVRecordLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  SingleLabelRecordLayer::Setup(conf, srclayers);
+  sep_ = conf.store_conf().separator();
+}
+
+void CSVRecordLayer::LoadRecord(const string& backend,
+    const string&path, Blob<float>* to) {
+  io::Store* store = io::OpenStore(backend, path, io::kRead);
+  string key, val;
+  CHECK(store->Read(&key, &val));
+  float* ptr = to->mutable_cpu_data();
+  Tokenizer t(val, sep_);
+  string x;
+  for (int i = 0; i< to->count(); i++) {
+    t >> x;
+    ptr[i] = stof(x);
+  }
+  CHECK(!t.Valid());
+  delete store;
+}
+
+bool CSVRecordLayer::Parse(int k, int flag, const string& key,
+    const string& value) {
+  float* ptr = data_.mutable_cpu_data() + k * data_.count() / batchsize_;
+  Tokenizer t(value, sep_);
+  string x;
+  // parse label if not deploy phase and has_label is set.
+  if ((flag & kDeploy) == 0 && layer_conf_.store_conf().has_label()) {
+    t >> x;
+    aux_data_[k] = stoi(x);
+  }
+  for (int i = 0; i< data_.count() / batchsize_; i++) {
+    t >> x;
+    ptr[i] = stof(x);
+  }
+  CHECK(!t.Valid());
+  return true;
+}
+
+
+/*********ProtoRecordLayer******************/
+void ProtoRecordLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  SingleLabelRecordLayer::Setup(conf, srclayers);
+  encoded_ = conf.store_conf().encoded();
+}
+
+void ProtoRecordLayer::LoadRecord(const string& backend,
+    const string&path, Blob<float>* to) {
+  io::Store* store = io::OpenStore(backend, path, io::kRead);
+  string key, val;
+  CHECK(store->Read(&key, &val));
+  SingleLabelImageRecord image;
+  image.ParseFromString(val);
+  CHECK_EQ(to->count(), image.data_size());
+  float* ptr = to->mutable_cpu_data();
+  for (int i = 0; i< to->count(); i++)
+    ptr[i] = image.data(i);
+  delete store;
+}
+
+bool ProtoRecordLayer::Parse(int k, int flag, const string& key,
+    const string& value) {
+  SingleLabelImageRecord image;
+  image.ParseFromString(value);
+  int size = data_.count() / batchsize_;
+  if (image.data_size()) {
+    CHECK_EQ(size, image.data_size());
+    float* ptr = data_.mutable_cpu_data() + k * size;
+    for (int i = 0; i< size; i++)
+      ptr[i] = image.data(i);
+  } else if (image.pixel().size()) {
+    CHECK_EQ(size, image.pixel().size());
+    float* ptr = data_.mutable_cpu_data() + k * size;
+    string pixel = image.pixel();
+    for (int i = 0; i < size; i++)
+      ptr[i] =  static_cast<float>(static_cast<uint8_t>(pixel[i]));
+  } else {
+    LOG(ERROR) << "not pixel nor pixel";
+  }
+  if ((flag & kDeploy) == 0) {  // deploy mode does not have label
+    aux_data_.at(k) = image.label();
+  }
+  return true;
+}
+
 /************* Implementation for ParserLayer ***********/
 void ParserLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) {
   CHECK_EQ(srclayers.size(), 1);
@@ -213,7 +426,6 @@ void LabelLayer::ParseRecords(int flag, const 
vector<Record>& records,
   }
   CHECK_EQ(rid, blob->shape()[0]);
 }
-
 /**************** Implementation for MnistLayer ******************/
 void MnistLayer::ParseRecords(int flag, const vector<Record>& records,
     Blob<float>* blob) {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/neuralnet/loss_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/loss_layer.cc b/src/neuralnet/loss_layer.cc
index b5447f6..3a5f5fa 100644
--- a/src/neuralnet/loss_layer.cc
+++ b/src/neuralnet/loss_layer.cc
@@ -92,7 +92,7 @@ void SoftmaxLossLayer::ComputeFeature(int flag,
   Tensor<cpu, 2> prob(data_.mutable_cpu_data(), s);
   Tensor<cpu, 2> src(srclayers[0]->mutable_data(this)->mutable_cpu_data(), s);
   Softmax(prob, src);
-  const float* label = srclayers[1]->data(this).cpu_data();
+  const auto& label = srclayers[1]->aux_data(this);
   const float* probptr = prob.dptr;
   float loss = 0, precision = 0;
   for (int n = 0; n < batchsize_; n++) {
@@ -123,7 +123,7 @@ void SoftmaxLossLayer::ComputeFeature(int flag,
 
 void SoftmaxLossLayer::ComputeGradient(int flag,
     const vector<Layer*>& srclayers) {
-  const float* label = srclayers[1]->data(this).cpu_data();
+  const auto& label = srclayers[1]->aux_data();
   Blob<float>* gsrcblob = srclayers[0]->mutable_grad(this);
   gsrcblob->CopyFrom(data_);
   float* gsrcptr = gsrcblob->mutable_cpu_data();

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/proto/job.proto
----------------------------------------------------------------------
diff --git a/src/proto/job.proto b/src/proto/job.proto
index 950f785..2d6dfbe 100644
--- a/src/proto/job.proto
+++ b/src/proto/job.proto
@@ -190,8 +190,6 @@ message LayerProto {
   optional ConcateProto concate_conf = 31;
   // configuration for dropout layer
   optional DropoutProto dropout_conf = 33;
-  // configuration for euclideanloss layer
-  optional EuclideanLossProto euclideanloss_conf = 50;
   // configuration for inner product layer
   optional InnerProductProto innerproduct_conf = 34;
   // configuration for local response normalization layer
@@ -218,6 +216,9 @@ message LayerProto {
   optional SoftmaxLossProto softmaxloss_conf = 40;
   // configuration for split layer
   optional SplitProto split_conf = 42;
+  // configuration for store input layers
+  optional StoreProto store_conf = 51;
+
 
 
   // overrides the partition dimension for neural net
@@ -316,9 +317,20 @@ message SplitProto {
   optional int32 num_splits = 1 [default = 1];
 }
 
-message EuclideanLossProto {
+message StoreProto {
+  required string backend = 1;
+  optional string path = 2;
+  optional string separator = 3 [default = ","];
+  optional string mean_file = 4;
+  optional string std_file = 5;
+  optional float mean_value = 6;
+  optional float std_value = 7;
+  optional int32 batchsize = 8 [default = 1];
+  repeated int32 shape = 9;
+  optional bool encoded = 10 [default = false];
+  optional int32 random_skip = 11 [default = 0];
+  optional bool has_label = 12 [default = true];
 }
-
 message SoftmaxLossProto {
   // computing accuracy against topk results
   optional int32 topk = 1 [default = 1];
@@ -525,11 +537,12 @@ enum InitMethod {
 enum LayerType {
   // Data layers
   //  - Load records from file, database
+  kProtoRecord = 29;
+  kCSVRecord = 30;
+  kImagePreprocess = 31;
   kLMDBData = 17;
   kPrefetch = 19;
   kShardData = 3;
-  // Parser layers
-  //  - Parse features from records, e.g., pixels
   kLabel = 18;
   kMnist = 7;
   kRGBImage = 10;
@@ -582,6 +595,7 @@ enum Phase {
   kForward = 32;
   kBackward = 64;
   kLoss = 128;
+  kDeploy = 256;
 }
 
 enum ParamType {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/test/test_csv_record_layer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_csv_record_layer.cc 
b/src/test/test_csv_record_layer.cc
new file mode 100644
index 0000000..4517698
--- /dev/null
+++ b/src/test/test_csv_record_layer.cc
@@ -0,0 +1,92 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+#include <string>
+#include <vector>
+#include <fstream>
+
+#include "gtest/gtest.h"
+#include "neuralnet/input_layer.h"
+#include "proto/job.pb.h"
+
+class CSVRecordLayerTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    std::string path ="src/test/test.csv";
+    std::ofstream ofs(path, std::ofstream::out);
+    ASSERT_TRUE(ofs.is_open());
+    ofs << "12,3.2,1,14.1\n";
+    ofs << "2,0.2,0,1.1\n";
+    ofs << "1,2.2,1,4.1\n";
+    ofs.close();
+    auto conf = csv_conf.mutable_store_conf();
+    conf->set_path(path);
+    conf->set_batchsize(2);
+    conf->add_shape(3);
+    conf->set_backend("textfile");
+  }
+  singa::LayerProto csv_conf;
+};
+
+TEST_F(CSVRecordLayerTest, Setup) {
+  singa::CSVRecordLayer layer;
+  layer.Setup(csv_conf, std::vector<singa::Layer*>{});
+  EXPECT_EQ(2, layer.aux_data().size());
+  EXPECT_EQ(6, layer.data(nullptr).count());
+}
+
+TEST_F(CSVRecordLayerTest, ComputeFeature) {
+  singa::CSVRecordLayer csv;
+  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
+  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+
+  EXPECT_EQ(12, csv.aux_data()[0]);
+  EXPECT_EQ(2, csv.aux_data()[1]);
+  auto data = csv.data(nullptr);
+  EXPECT_EQ(3.2f, data.cpu_data()[0]);
+  EXPECT_EQ(14.1f, data.cpu_data()[2]);
+  EXPECT_EQ(0.2f, data.cpu_data()[3]);
+  EXPECT_EQ(1.1f, data.cpu_data()[5]);
+}
+TEST_F(CSVRecordLayerTest, ComputeFeatureDeploy) {
+  singa::CSVRecordLayer csv;
+  csv_conf.mutable_store_conf()->set_shape(0, 4);
+  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
+  csv.ComputeFeature(singa::kDeploy, std::vector<singa::Layer*>{});
+
+  auto data = csv.data(nullptr);
+  EXPECT_EQ(12.f, data.cpu_data()[0]);
+  EXPECT_EQ(1.f, data.cpu_data()[2]);
+  EXPECT_EQ(14.1f, data.cpu_data()[3]);
+  EXPECT_EQ(0.2f, data.cpu_data()[5]);
+}
+
+TEST_F(CSVRecordLayerTest, SeekToFirst) {
+  singa::CSVRecordLayer csv;
+  csv.Setup(csv_conf, std::vector<singa::Layer*>{});
+  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+  csv.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+
+  auto data = csv.data(nullptr);
+  EXPECT_EQ(2.2f, data.cpu_data()[0]);
+  EXPECT_EQ(4.1f, data.cpu_data()[2]);
+  EXPECT_EQ(3.2f, data.cpu_data()[3]);
+  EXPECT_EQ(14.1f, data.cpu_data()[5]);
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/test/test_proto_record_layer.cc
----------------------------------------------------------------------
diff --git a/src/test/test_proto_record_layer.cc 
b/src/test/test_proto_record_layer.cc
new file mode 100644
index 0000000..0a39c13
--- /dev/null
+++ b/src/test/test_proto_record_layer.cc
@@ -0,0 +1,122 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+#include <string>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "neuralnet/input_layer.h"
+#include "proto/job.pb.h"
+#include "proto/common.pb.h"
+
+class ProtoRecordLayerTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    std::string path ="src/test/test.bin";
+    auto* store = singa::io::CreateStore("kvfile");
+    store->Open(path, singa::io::kCreate);
+    {
+    singa::SingleLabelImageRecord image;
+    image.add_data(3.2);
+    image.add_data(1);
+    image.add_data(14.1);
+    image.set_label(12);
+    std::string val;
+    image.SerializeToString(&val);
+    store->Write("0", val);
+    }
+
+    {
+    singa::SingleLabelImageRecord image;
+    image.add_data(0.2);
+    image.add_data(0);
+    image.add_data(1.1);
+    image.set_label(2);
+    std::string val;
+    image.SerializeToString(&val);
+    store->Write("1", val);
+    }
+
+    {
+    singa::SingleLabelImageRecord image;
+    image.add_data(2.2);
+    image.add_data(1);
+    image.add_data(4.1);
+    image.set_label(1);
+    std::string val;
+    image.SerializeToString(&val);
+    store->Write("2", val);
+    }
+    store->Flush();
+    store->Close();
+
+    auto conf = image_conf.mutable_store_conf();
+    conf->set_path(path);
+    conf->set_batchsize(2);
+    conf->add_shape(3);
+    conf->set_backend("kvfile");
+  }
+  singa::LayerProto image_conf;
+};
+
+TEST_F(ProtoRecordLayerTest, Setup) {
+  singa::ProtoRecordLayer layer;
+  layer.Setup(image_conf, std::vector<singa::Layer*>{});
+  EXPECT_EQ(2, layer.aux_data().size());
+  EXPECT_EQ(6, layer.data(nullptr).count());
+}
+
+TEST_F(ProtoRecordLayerTest, ComputeFeature) {
+  singa::ProtoRecordLayer image;
+  image.Setup(image_conf, std::vector<singa::Layer*>{});
+  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+
+  EXPECT_EQ(12, image.aux_data()[0]);
+  EXPECT_EQ(2, image.aux_data()[1]);
+  auto data = image.data(nullptr);
+  EXPECT_EQ(3.2f, data.cpu_data()[0]);
+  EXPECT_EQ(14.1f, data.cpu_data()[2]);
+  EXPECT_EQ(0.2f, data.cpu_data()[3]);
+  EXPECT_EQ(1.1f, data.cpu_data()[5]);
+}
+TEST_F(ProtoRecordLayerTest, ComputeFeatureDeploy) {
+  singa::ProtoRecordLayer image;
+  image.Setup(image_conf, std::vector<singa::Layer*>{});
+  image.ComputeFeature(singa::kDeploy, std::vector<singa::Layer*>{});
+
+  auto data = image.data(nullptr);
+  EXPECT_EQ(3.2f, data.cpu_data()[0]);
+  EXPECT_EQ(14.1f, data.cpu_data()[2]);
+  EXPECT_EQ(0.2f, data.cpu_data()[3]);
+  EXPECT_EQ(1.1f, data.cpu_data()[5]);
+}
+
+TEST_F(ProtoRecordLayerTest, SeekToFirst) {
+  singa::ProtoRecordLayer image;
+  image.Setup(image_conf, std::vector<singa::Layer*>{});
+  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+  image.ComputeFeature(singa::kTrain, std::vector<singa::Layer*>{});
+
+  auto data = image.data(nullptr);
+  EXPECT_EQ(2.2f, data.cpu_data()[0]);
+  EXPECT_EQ(4.1f, data.cpu_data()[2]);
+  EXPECT_EQ(3.2f, data.cpu_data()[3]);
+  EXPECT_EQ(14.1f, data.cpu_data()[5]);
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/5f010caa/src/utils/image_transform.cc
----------------------------------------------------------------------
diff --git a/src/utils/image_transform.cc b/src/utils/image_transform.cc
new file mode 100644
index 0000000..4851334
--- /dev/null
+++ b/src/utils/image_transform.cc
@@ -0,0 +1,56 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+#include "utils/image_transform.h"
+
+namespace singa {
+
+void ImageTransform(const float* in, const float* mean, bool mirror, int 
h_crop,
+    int w_crop, int h_offset, int w_offset, int channel, int height, int width,
+    float scale, float* out) {
+  if (h_crop == 0) {
+    CHECK_NE(h_offset, 0);
+    h_crop = height;
+  }
+  if (w_crop ==0) {
+    CHECK_NE(w_offset, 0);
+    w_crop = width;
+  }
+  CHECK_NE(scale, 0);
+
+  int out_idx = 0, in_idx = 0;
+  for (int c = 0; c < channel; c++) {
+    for (int h = 0; h < h_crop; h++) {
+      for (int w = 0; w < w_crop; w++) {
+        in_idx = (c * height + h_offset + h) * width + w_offset + w;
+        if (mirror) {
+          out_idx = (c * h_crop + h) * w_crop + (w_crop - 1 - w);
+        } else {
+          out_idx = (c * h_crop + h) * w_crop + w;
+        }
+        out[out_idx] = in[in_idx];
+        if (mean != nullptr)
+          out[out_idx] -= mean[in_idx];
+        out[out_idx] *= scale;
+      }
+    }
+  }
+}
+} /* singa */

Reply via email to