http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/neuralnet/neuron_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/neuron_layer.h b/include/neuralnet/neuron_layer.h
deleted file mode 100644
index 51ba304..0000000
--- a/include/neuralnet/neuron_layer.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_NEURON_LAYER_H_
-#define SINGA_NEURALNET_NEURON_LAYER_H_
-
-#include <vector>
-#include "neuralnet/layer.h"
-#include "proto/job.pb.h"
-
-/**
- * \file this file includes the declarations neuron layer classes that conduct
- * the transformation of features.
- */
-namespace singa {
-/**
- * Convolution layer.
- */
-class ConvolutionLayer : public NeuronLayer {
- public:
-  ~ConvolutionLayer();
-
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
-  ConnectionType src_neuron_connection(int k) const  override {
-    // CHECK_LT(k, srclayers_.size());
-    return kOneToAll;
-  }
-
- protected:
-  int kernel_, pad_,  stride_;
-  int batchsize_,  channels_, height_, width_;
-  int col_height_, col_width_, conv_height_, conv_width_, num_filters_;
-  Param* weight_, *bias_;
-  Blob<float> col_data_, col_grad_;
-};
-
-/**
- * Use im2col from Caffe
- */
-class CConvolutionLayer : public ConvolutionLayer {
- public:
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-class DropoutLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
- protected:
-  // drop probability
-  float pdrop_;
-  /* record which neuron is dropped, required for back propagating gradients,
-   * if mask[i]=0, then the i-th neuron is dropped.
-   */
-  Blob<float> mask_;
-};
-/**
- * Local Response Normalization edge
- *
- * b_i=a_i/x_i^beta
- * x_i=knorm+alpha*\sum_{j=max(0,i-n/2}^{min(N,i+n/2}(a_j)^2
- * n is size of local response area.
- * a_i, the activation (after ReLU) of a neuron convolved with the i-th kernel.
- * b_i, the neuron after normalization, N is the total num of kernels
- */
-class LRNLayer : public NeuronLayer {
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- protected:
-  //! shape of the bottom layer feature
-  int batchsize_, channels_, height_, width_;
-  //! size local response (neighbor) area
-  int lsize_;
-  //! hyper-parameter
-  float alpha_, beta_, knorm_;
-  Blob<float> norm_;
-};
-
-class PoolingLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- protected:
-  int kernel_, pad_, stride_;
-  int batchsize_, channels_, height_, width_, pooled_height_, pooled_width_;
-  PoolingProto_PoolMethod pool_;
-};
-
-/**
- * Use book-keeping for BP following Caffe's pooling implementation
- */
-class CPoolingLayer : public PoolingLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers);
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
- private:
-  Blob<float> mask_;
-};
-
-class ReLULayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-class InnerProductLayer : public NeuronLayer {
- public:
-  ~InnerProductLayer();
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
-
- private:
-  int batchsize_;
-  int vdim_, hdim_;
-  bool transpose_;
-  Param *weight_, *bias_;
-};
-
-/**
- * This layer apply scaled Tan function to neuron activations.
- * f(x)=1.7159047  tanh(0.66666667 x)
- */
-class STanhLayer : public NeuronLayer {
- public:
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-/**
- * This layer apply Sigmoid function to neuron activations.
- * f(x)=1/(1+exp(-x))
- * f'(x)=f(x)*(1-f(x))
- */
-class SigmoidLayer: public Layer {
- public:
-  using Layer::ComputeFeature;
-  using Layer::ComputeGradient;
-
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-};
-
-
-/**
- * Base layer for RBM models.
- */
-class RBMLayer: virtual public Layer {
- public:
-  virtual ~RBMLayer() {}
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  const Blob<float>& neg_data(const Layer* layer) {
-    return neg_data_;
-  }
-  Blob<float>* mutable_neg_data(const Layer* layer) {
-    return &neg_data_;
-  }
-  const std::vector<Param*> GetParams() const override {
-    std::vector<Param*> params{weight_, bias_};
-    return params;
-  }
-  virtual Blob<float>* Sample(int flat);
-
- protected:
-  //! if ture, sampling according to guassian distribution
-  bool gaussian_;
-  //! dimension of the hidden layer
-  int hdim_;
-  //! dimension of the visible layer
-  int vdim_;
-  int batchsize_;
-  bool first_gibbs_;
-  Param* weight_, *bias_;
-
-  Blob<float> neg_data_;
-  Blob<float> neg_sample_;
-  Blob<float> sample_;
-};
-
-/**
- * RBM visible layer
- */
-class RBMVisLayer: public RBMLayer, public LossLayer {
- public:
-  ~RBMVisLayer();
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- private:
-  RBMLayer* hid_layer_;
-  Layer* input_layer_;
-};
-/**
- * RBM hidden layer
- */
-class RBMHidLayer: public RBMLayer {
- public:
-  ~RBMHidLayer();
-  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
-  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
-  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
-
- private:
-  RBMLayer *vis_layer_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_NEURALNET_NEURON_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/neuralnet/output_layer.h
----------------------------------------------------------------------
diff --git a/include/neuralnet/output_layer.h b/include/neuralnet/output_layer.h
deleted file mode 100644
index d48d805..0000000
--- a/include/neuralnet/output_layer.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_NEURALNET_OUTPUT_LAYER_H_
-#define SINGA_NEURALNET_OUTPUT_LAYER_H_
-
-// currently no output sub-classes are defined
-
-#endif  // SINGA_NEURALNET_OUTPUT_LAYER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/server.h
----------------------------------------------------------------------
diff --git a/include/server.h b/include/server.h
deleted file mode 100644
index 4b75430..0000000
--- a/include/server.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_SERVER_H_
-#define SINGA_SERVER_H_
-
-#include <unordered_map>
-#include <vector>
-#include "comm/socket.h"
-#include "proto/job.pb.h"
-#include "utils/param.h"
-#include "utils/updater.h"
-
-namespace singa {
-
- /* Repsond to worker's get/put/udpate request, and periodically syncing with
-  * other servers.
-  *
-  * Normally, the Server creates a response message for each request which
-  * will be sent back to the one who issued the request. However, if the 
request
-  * are not processed successfully, the original message will be returned. The
-  * sever does not know the returned message is a response or the original
-  * message. It just sends it to the router. The router will decided to
-  * re-send the request to the server or send it to the worker.
-  */
-class Server {
- public:
-  ~Server();
-  Server(int group_id, int server_id,
-      const JobProto& job_conf,
-      const std::vector<int>& slice2group,
-      const std::vector<int>& slice2server);
-  void Run();
-  inline int grp_id() const { return grp_id_; }
-  inline int id() const { return id_; }
-
- protected:
-  /**
-   * Process GET request.
-   *
-   * @return the orignal message or a response message which contains the 
values
-   * of the Param with the request version.
-   */
-  Msg* HandleGet(Msg** msg);
-  /**
-   * Process Update request.
-   *
-   * It waits until received the gradients from all workers from the same 
worker
-   * group. After updating, it responses to each sender with the new Param
-   * values. It may generate a sync message to the server group that maintains
-   * the global version of the updated Param (slice).
-   *
-   * Note: there is no counter for each worker group on the number of received
-   * update requests. Hence it is possible that the server would conduct the
-   * update when it receives x requests from group a and y requests from group
-   * b where x + y = group size. To avoid this problem, we can
-   * -# maintain request list for each group for each Param at the server side
-   * -# do not span a worker group among multiple nodes. then the updates from
-   * the same group would be locally aggregated on the worker node. And the
-   * server would conduct the update immediately after receiving the aggregated
-   * request.
-   * -# launch only one worker group.
-   *
-   * @return the orignal message or response message
-   */
-  const std::vector<Msg*> HandleUpdate(Msg **msg);
-  /**
-   * Process PUT request.
-   *
-   * @return the original message or response message. If we don't want to
-   * acknowledge the put request, then return nullptr.
-   */
-  Msg* HandlePut(Msg **msg);
-  /**
-   * Handle sync request from other server groups.
-   *
-   * It adds updates of Param (slice) from other server groups directly to
-   * local Param (slice). Currently, each Param (slice) has a master group,
-   * i.e., slice2group_[sliceid], which would receive such requests from all
-   * other server groups for the Param object.
-   *
-   * @param msg request msg containing the parameter updates
-   * @return response msg that contains the fresh parameter values.
-   */
-  Msg* HandleSyncRequest(Msg** msg);
-  /**
-   * Handle sync response.
-   *
-   * The response msg includes the latest values of a Param object from the
-   * server group that maintainers this Param object.
-   * The local Param values are replaced with the addition result of local
-   * udpates since the sync request was sent and the received Param values.
-   *
-   * @param response message
-   */
-  void HandleSyncResponse(Msg** msg);
-
- protected:
-  int grp_id_ = -1;
-  int id_ = -1;
-  Updater* updater_ = nullptr;
-  //!< map from slice ID to slice and deleted in the destructor
-  std::unordered_map<int, ParamEntry*> shard_;
-  std::vector<int> slice2group_, slice2server_;
-  //!< num of updates from last sync with master server group for a param/slice
-  std::vector<int> n_updates_;
-  //!< num of sync requests that have not been responded
-  std::vector<int> n_pending_sync_;
-  std::vector<Blob<float>> last_sync_;
-  std::unordered_map<int, std::vector<Msg*>> buffer_requests_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_SERVER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa.h
----------------------------------------------------------------------
diff --git a/include/singa.h b/include/singa.h
deleted file mode 100644
index ddfec0e..0000000
--- a/include/singa.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_SINGA_H_
-#define SINGA_SINGA_H_
-
-#include "comm/socket.h"
-#include "io/store.h"
-#include "neuralnet/neuralnet.h"
-#include "neuralnet/layer.h"
-#include "proto/job.pb.h"
-#include "proto/singa.pb.h"
-#include "utils/common.h"
-#include "utils/param.h"
-#include "utils/singleton.h"
-#include "utils/factory.h"
-#include "./driver.h"
-
-#endif  // SINGA_SINGA_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/comm/msg.h
----------------------------------------------------------------------
diff --git a/include/singa/comm/msg.h b/include/singa/comm/msg.h
new file mode 100644
index 0000000..50a9b81
--- /dev/null
+++ b/include/singa/comm/msg.h
@@ -0,0 +1,238 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_COMM_MSG_H_
+#define SINGA_COMM_MSG_H_
+
+// TODO(wangwei): make it a compiler argument
+#define USE_ZMQ
+
+#include <utility>
+#ifdef USE_ZMQ
+#include <czmq.h>
+#endif
+
+namespace singa {
+/**
+ * Wrapper to generate message address
+ * @param grp worker/server group id
+ * @param id_or_proc worker/server id or procs id
+ * @param type msg type
+ */
+inline int Addr(int grp, int id_or_proc, int type) {
+  return (grp << 16) | (id_or_proc << 8) | type;
+}
+
+/**
+ * Parse group id from addr.
+ *
+ * @return group id
+ */
+inline int AddrGrp(int addr) {
+  return addr >> 16;
+}
+
+/**
+ * Parse worker/server id from addr.
+ *
+ * @return id
+ */
+inline int AddrID(int addr) {
+  static const int mask = (1 << 8) - 1;
+  return (addr >> 8) & mask;
+}
+
+/**
+ * Parse worker/server procs from addr.
+ *
+ * @return procs id
+ */
+inline int AddrProc(int addr) {
+  return AddrID(addr);
+}
+
+/**
+ * Parse msg type from addr
+ * @return msg type
+ */
+inline int AddrType(int addr) {
+  static const int mask = (1 << 8) -1;
+  return addr & mask;
+}
+
+/**
+ * Msg used to transfer Param info (gradient or value), feature blob, etc
+ * between workers, stubs and servers.
+ *
+ * Each msg has a source addr and dest addr identified by a unique integer.
+ * It is also associated with a target field (value and version) for ease of
+ * getting some meta info (e.g., parameter id) from the msg.
+ *
+ * Other data is added into the message as frames.
+ */
+class Msg {
+ public:
+  ~Msg();
+  Msg();
+  /**
+   * Construct the msg providing source and destination addr.
+   */
+  Msg(int src, int dst);
+  /**
+   * Copy constructor.
+   */
+  Msg(const Msg& msg);
+  /**
+   * Swap the src/dst addr
+   */
+  void SwapAddr();
+  /**
+   * Add a frame (a chunck of bytes) into the message
+   */
+  void AddFrame(const void* addr, int nBytes);
+  /**
+   * @return num of bytes of the current frame.
+   */
+  int FrameSize();
+  /**
+   * @return the pointer to the current frame data.
+   */
+  void* FrameData();
+  /**
+   * @return the data of the current frame as c string
+   */
+  char* FrameStr();
+  /**
+   * Move the cursor to the first frame.
+   */
+  void FirstFrame();
+  /**
+   * Move the cursor to the last frame.
+   */
+  void LastFrame();
+  /**
+   * Move the cursor to the next frame
+   * @return true if the next frame is not NULL; otherwise false
+   */
+  bool NextFrame();
+  /**
+   *  Add a 'format' frame to the msg (like CZMQ's zsock_send).
+   *
+   *  The format is a string that defines the type of each field.
+   *  The format can contain any of these characters, each corresponding to
+   *  one or two arguments:
+   *  i = int (signed)
+   *  1 = uint8_t
+   *  2 = uint16_t
+   *  4 = uint32_t
+   *  8 = uint64_t
+   *  p = void * (sends the pointer value, only meaningful over inproc)
+   *  s = char**
+   *
+   *  Returns size of the added content.
+   */
+  int AddFormatFrame(const char *format, ...);
+  /**
+   *  Parse the current frame added using AddFormatFrame(const char*, ...).
+   *
+   *  The format is a string that defines the type of each field.
+   *  The format can contain any of these characters, each corresponding to
+   *  one or two arguments:
+   *  i = int (signed)
+   *  1 = uint8_t
+   *  2 = uint16_t
+   *  4 = uint32_t
+   *  8 = uint64_t
+   *  p = void * (sends the pointer value, only meaningful over inproc)
+   *  s = char**
+   *
+   *  Returns size of the parsed content.
+   */
+  int ParseFormatFrame(const char* format, ...);
+
+#ifdef USE_ZMQ
+  void ParseFromZmsg(zmsg_t* msg);
+  zmsg_t* DumpToZmsg();
+#endif
+
+  /**
+   * @return msg size in terms of bytes, ignore meta info.
+   */
+  int size() const;
+  /**
+   * Set source addr.
+   * @param addr unique identify one worker/server/stub in the current job
+   */
+  inline void set_src(int addr) { src_ = addr; }
+  /**
+   * @return source addr.
+   */
+  inline int src() const { return src_; }
+  /**
+   * Set destination addr.
+   * @param addr unique identify one worker/server/stub in the current job
+   */
+  inline void set_dst(int addr) { dst_ = addr; }
+  /**
+   * @return dst addr.
+   */
+  inline int dst() const { return dst_; }
+  /**
+   * Set msg type, e.g., kPut, kGet, kUpdate, kRequest
+   */
+  inline void set_type(int type) { type_ = type; }
+  /**
+   * @return msg type.
+   */
+  inline int type() const { return type_; }
+  /**
+   * Set msg target.
+   *
+   * One msg has a target to identify some entity in worker/server/stub.
+   * The target is associated with a version, e.g., Param version.
+   */
+  inline void set_trgt(int val, int version) {
+    trgt_val_ = val;
+    trgt_version_ = version;
+  }
+  inline int trgt_val() const { return trgt_val_; }
+  inline int trgt_version() const { return trgt_version_; }
+
+ protected:
+  int src_ = 0;
+  int dst_ = 0;
+  int type_ = 0;
+  int trgt_val_ = 0;
+  int trgt_version_ = 0;
+#ifdef USE_ZMQ
+  zmsg_t* msg_ = nullptr;
+  zframe_t *frame_ = nullptr;
+#endif
+};
+
+inline void DeleteMsg(Msg** msg) {
+  delete *msg;
+  *msg = nullptr;
+}
+
+}  // namespace singa
+
+#endif  // SINGA_COMM_MSG_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/comm/socket.h
----------------------------------------------------------------------
diff --git a/include/singa/comm/socket.h b/include/singa/comm/socket.h
new file mode 100644
index 0000000..fae9ccb
--- /dev/null
+++ b/include/singa/comm/socket.h
@@ -0,0 +1,174 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_COMM_SOCKET_H_
+#define SINGA_COMM_SOCKET_H_
+
+#ifdef USE_ZMQ
+#include <czmq.h>
+#endif
+#include <map>
+#include <string>
+#include <vector>
+#include "singa/comm/msg.h"
+
+namespace singa {
+
+const std::string kInprocRouterEndpoint = "inproc://router";
+
+class SocketInterface {
+ public:
+  virtual ~SocketInterface() {}
+  /**
+    * Send a message to connected socket(s), non-blocking. The message
+    * will be deallocated after sending, thus should not be used after
+    * calling Send();
+    *
+    * @param msg The message to be sent
+    * @return 1 for success queuing the message for sending, 0 for failure
+    */
+  virtual int Send(Msg** msg) = 0;
+  /**
+    * Receive a message from any connected socket.
+    *
+    * @return a message pointer if success; nullptr if failure
+    */
+  virtual Msg* Receive() = 0;
+  /**
+   * @return Identifier of the implementation dependent socket. E.g., zsock_t*
+   * for ZeroMQ implementation and rank for MPI implementation.
+   */
+  virtual void* InternalID() const = 0;
+};
+
+class Poller {
+ public:
+  Poller();
+  explicit Poller(SocketInterface* socket);
+  /**
+    * Add a socket for polling; Multiple sockets can be polled together by
+    * adding them into the same poller.
+    */
+  void Add(SocketInterface* socket);
+  /**
+    * Poll for all sockets added into this poller.
+    * @param timeout Stop after this number of mseconds
+    * @return pointer To the socket if it has one message in the receiving
+    * queue; nullptr if no message in any sockets,
+    */
+  SocketInterface* Wait(int duration);
+
+  /**
+   * @return true if the poller is terminated due to process interupt
+   */
+  virtual bool Terminated();
+
+ protected:
+#ifdef USE_ZMQ
+  zpoller_t *poller_;
+  std::map<zsock_t*, SocketInterface*> zsock2Socket_;
+#endif
+};
+
+class Dealer : public SocketInterface {
+ public:
+  /*
+   * @param id Local dealer ID within a procs if the dealer is from worker or
+   * server thread, starts from 1 (0 is used by the router); or the connected
+   * remote procs ID for inter-process dealers from the stub thread.
+   */
+  Dealer();
+  explicit Dealer(int id);
+  ~Dealer() override;
+  /**
+    * Setup the connection with the router.
+    *
+    * @param endpoint Identifier of the router. For intra-process
+    * connection, the endpoint follows the format of ZeroMQ, i.e.,
+    * starting with "inproc://"; in Singa, since each process has one
+    * router, hence we can fix the endpoint to be "inproc://router" for
+    * intra-process. For inter-process, the endpoint follows ZeroMQ's
+    * format, i.e., IP:port, where IP is the connected process.
+    * @return 1 connection sets up successfully; 0 otherwise
+    */
+  int Connect(const std::string& endpoint);
+  int Send(Msg** msg) override;
+  Msg* Receive() override;
+  void* InternalID() const override;
+
+ protected:
+  int id_ = -1;
+#ifdef USE_ZMQ
+  zsock_t* dealer_ = nullptr;
+  zpoller_t* poller_ = nullptr;
+#endif
+};
+
+class Router : public SocketInterface {
+ public:
+  Router();
+  /**
+   * There is only one router per procs, hence its local id is 0 and is not set
+   * explicitly.
+   *
+   * @param bufsize Buffer at most this number of messages
+   */
+  explicit Router(int bufsize);
+  ~Router() override;
+  /**
+   * Setup the connection with dealers.
+   *
+   * It automatically binds to the endpoint for intra-process communication,
+   * i.e., "inproc://router".
+   *
+   * @param endpoint The identifier for the Dealer socket in other process
+   * to connect. It has the format IP:Port, where IP is the host machine.
+   * If endpoint is empty, it means that all connections are
+   * intra-process connection.
+   * @return number of connected dealers.
+   */
+  int Bind(const std::string& endpoint);
+  /**
+   * If the destination socket has not connected yet, buffer this the message.
+   */
+  int Send(Msg** msg) override;
+  Msg* Receive() override;
+  void* InternalID() const override;
+
+ protected:
+  int nBufmsg_ = 0;
+  int bufsize_ = 100;
+#ifdef USE_ZMQ
+  zsock_t* router_ = nullptr;
+  zpoller_t* poller_ = nullptr;
+  std::map<int, zframe_t*> id2addr_;
+  std::map<int, std::vector<zmsg_t*>> bufmsg_;
+#endif
+};
+
+#ifdef USE_MPI
+// TODO(wangsheng): add intra-process communication using shared queue
+std::vector<SafeQueue*> MPIQueues;
+#endif
+
+}  // namespace singa
+
+#endif  // SINGA_COMM_SOCKET_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/driver.h
----------------------------------------------------------------------
diff --git a/include/singa/driver.h b/include/singa/driver.h
new file mode 100644
index 0000000..7796884
--- /dev/null
+++ b/include/singa/driver.h
@@ -0,0 +1,226 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_DRIVER_H_ 
+#define SINGA_DRIVER_H_
+
+#include <vector>
+#include "singa/proto/job.pb.h"
+#include "singa/proto/singa.pb.h"
+#include "singa/utils/factory.h"
+#include "singa/utils/param.h"
+#include "singa/utils/singleton.h"
+#include "singa/utils/updater.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/worker.h"
+#include "singa/server.h"
+
+namespace singa {
+using std::vector;
+class Driver {
+ public:
+  /**
+   * Init SINGA
+   * - init glog
+   * - parse job id and job conf from cmd line
+   * - register built-in layer, worker, updater, param subclasses.
+   *
+   * May be used for MPI init if it is used for message passing.
+   */
+  void Init(int argc, char** argv);
+  /**
+   * Update job configuration and call Train(const JobProto&) to start the
+   * training.
+   *
+   * It sets up the logging path and checkpoing files (if resume), and checks
+   * the existence of the workspace folder .
+   *
+   * @param[in] resume if true resume the training from the latest checkpoint
+   * files.
+   * @param[in] job_conf job configuration.
+   */
+  void Train(bool resume, const JobProto& job_conf);
+  /**
+   * Create workers and servers to conduct the training.
+   *
+   * @param[in] job_conf job configuration with all necessary fields set (e.g.,
+   * by Train(bool, const JobProto&).
+   */
+  void Train(const JobProto& job_conf);
+  /**
+   * Setting the checkpoint field of the job configuration to resume training.
+   *
+   * The checkpoint folder will be searched to get the files for the latest
+   * checkpoint, which will be added into the checkpoint field. The workers
+   * would then load the values of params from the checkpoint files.
+   *
+   * @param job_conf job configuration
+   */
+  void SetupForResume(JobProto* job_conf);
+  /**
+   * Create server instances.
+   *
+   * @param[in] job_conf job configuration.
+   * @param[in] net training neural network.
+   * @return server instances
+   */
+  const vector<Server*> CreateServers(const JobProto& job_conf, NeuralNet* 
net);
+  /**
+   * Create workers instances.
+   * @param[in] job_conf job configuration.
+   * @param[in] net training neural network.
+   * @return worker instances
+   */
+  const vector<Worker*> CreateWorkers(const JobProto& job_conf, NeuralNet* 
net);
+
+
+  /*********** Subclasses registers *************************/
+  /**
+   * Register a Layer subclass.
+   *
+   * @param type layer type ID. If called to register built-in subclasses,
+   * it is from LayerType; if called to register user-defined
+   * subclass, it is a string;
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterLayer(const Type& type);
+  /**
+   * Register an Updater subclass.
+   *
+   * @param type ID of the subclass. If called to register built-in subclasses,
+   * it is from UpdaterType; if called to register user-defined
+   * subclass, it is a string;
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterUpdater(const Type& type);
+  /**
+   * Register a learning rate generator subclasses.
+   *
+   * @param type ID of the subclass. If called to register built-in subclasses,
+   * it is from ChangeMethod; if called to register user-defined
+   * subclass, it is a string;
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterLRGenerator(const Type& type);
+  /**
+   * Register a Worker subclass.
+   *
+   * @param type ID of the subclass. If called to register built-in subclasses,
+   * it is from TrainOneBatchAlg; if called to register user-defined
+   * subclass, it is a string;
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterWorker(const Type& type);
+  /**
+   * Register a Param subclass.
+   * @param type ID of the subclass. If called to register built-in subclasses,
+   * it is from ParamType; if called to register user-defined
+   * subclass, it is a string;
+   *
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterParam(const Type& type);
+  /**
+   * Register ParamGenerator subclasses for initalizing Param objects.
+   *
+   * @param type ID of the subclass. If called to register built-in subclasses,
+   * it is from InitMethod; if called to register user-defined
+   * subclass, it is a string;
+   * @return 0 if success; otherwise -1.
+   */
+  template<typename Subclass, typename Type>
+  int RegisterParamGenerator(const Type& type);
+
+  /****************** Access function ********************/
+  /**
+   * @return job ID which is generated by zookeeper and passed in by the
+   * launching script.
+   */
+  inline int job_id() const { return job_id_; }
+  /**
+   * @return job conf path which is passed by users at the command line. It
+   * should at least contains the cluster configuration.
+   */
+  inline JobProto job_conf() const { return job_conf_; }
+
+ private:
+  int job_id_;
+  JobProto job_conf_;
+  SingaProto singa_conf_;
+};
+
+/************* Implementation of template functions*************************
+* Must put the implementation in driver.h file instead of driver.cc.
+* Otherwise there would be linking error caused by unknown registration
+* functions, becuase these function cannot be generated merely based on its
+* declearation in driver.h.
+*/
+
+template<typename Subclass, typename Type>
+int Driver::RegisterLayer(const Type& type) {
+  auto factory = Singleton<Factory<singa::Layer>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, Layer));
+  return 1;
+}
+
+template<typename Subclass, typename Type>
+int Driver::RegisterParam(const Type& type) {
+  auto factory = Singleton<Factory<singa::Param>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, Param));
+  return 1;
+}
+
+template<typename Subclass, typename Type>
+int Driver::RegisterParamGenerator(const Type& type) {
+  auto factory = Singleton<Factory<singa::ParamGenerator>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, ParamGenerator));
+  return 1;
+}
+
+template<typename Subclass, typename Type>
+int Driver::RegisterUpdater(const Type& type) {
+  auto factory = Singleton<Factory<singa::Updater>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, Updater));
+  return 1;
+}
+
+template<typename Subclass, typename Type>
+int Driver::RegisterLRGenerator(const Type& type) {
+  auto factory = Singleton<Factory<singa::LRGenerator>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, LRGenerator));
+  return 1;
+}
+
+template<typename Subclass, typename Type>
+int Driver::RegisterWorker(const Type& type) {
+  auto factory = Singleton<Factory<singa::Worker>>::Instance();
+  factory->Register(type, CreateInstance(Subclass, Worker));
+  return 1;
+}
+
+}  // namespace singa
+
+#endif  // SINGA_DRIVER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/hdfs_store.h
----------------------------------------------------------------------
diff --git a/include/singa/io/hdfs_store.h b/include/singa/io/hdfs_store.h
new file mode 100644
index 0000000..f85615b
--- /dev/null
+++ b/include/singa/io/hdfs_store.h
@@ -0,0 +1,22 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+// TODO(wangwei) use hdfs as data storage

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/imagefolder_store.h
----------------------------------------------------------------------
diff --git a/include/singa/io/imagefolder_store.h 
b/include/singa/io/imagefolder_store.h
new file mode 100644
index 0000000..c05d92d
--- /dev/null
+++ b/include/singa/io/imagefolder_store.h
@@ -0,0 +1,21 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+// TODO(wangwei) store images in a disk folder

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/kvfile.h
----------------------------------------------------------------------
diff --git a/include/singa/io/kvfile.h b/include/singa/io/kvfile.h
new file mode 100644
index 0000000..27dd35e
--- /dev/null
+++ b/include/singa/io/kvfile.h
@@ -0,0 +1,182 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_IO_KVFILE_H_
+#define SINGA_IO_KVFILE_H_
+
+#include <fstream>
+#include <string>
+#include <unordered_set>
+
+#define USE_PROTOBUF 1
+
+#ifdef USE_PROTOBUF
+#include <google/protobuf/message.h>
+#endif
+
+namespace singa {
+namespace io {
+
+/**
+ * KVFile stores training/validation/test tuples.
+ * Every worker node should have a KVFile for training data (validation/test
+ * KVFile is optional).
+ * KVFile consists of a set of unordered tuples. Each tuple is
+ * encoded as [key_len key val_len val] (key_len and val_len are of type
+ * uint32, which indicate the bytes of key and value respectively.
+ *
+ * When KVFile is created, it will remove the last tuple if the value size
+ * and key size do not match because the last write crashed.
+ *
+ * TODO(wangwei) split one KVFile into multiple KVFile s.
+ *
+ */
+class KVFile {
+ public:
+  enum Mode {
+    // read only mode used in training
+    kRead = 0,
+    // write mode used in creating KVFile (will overwrite previous one)
+    kCreate = 1,
+    // append mode, e.g. used when previous creating crashes
+    kAppend = 2
+  };
+
+  /**
+   * KVFile constructor.
+   *
+   * @param path path to the disk KVFile, it can be
+   *  - a path to local disk file.
+   *  - a path to local directory. This is to be compatible with the older
+   *    version (DataShard). The KVFile is shard.dat under that directory
+   *  - a hdfs file starting with "hdfs://"
+   * @param mode KVFile open mode, KVFile::kRead, KVFile::kWrite or
+   * KVFile::kAppend
+   * @param bufsize Cache bufsize bytes data for every disk op (read or write),
+   * default is 10MB.
+   */
+  KVFile(const std::string& path, Mode mode, int bufsize = 10485760);
+  ~KVFile();
+
+#ifdef USE_PROTOBUF
+  /**
+   * read next tuple from the KVFile.
+   *
+   * @param key Tuple key
+   * @param val Record of type Message
+   * @return false if read unsuccess, e.g., the tuple was not inserted
+   *         completely.
+   */
+  bool Next(std::string* key, google::protobuf::Message* val);
+  /**
+   * Append one tuple to the KVFile.
+   *
+   * @param key e.g., image path
+   * @param val
+   * @return false if unsucess, e.g., inserted before
+   */
+  bool Insert(const std::string& key, const google::protobuf::Message& tuple);
+#endif
+  /**
+   * read next tuple from the KVFile.
+   *
+   * @param key Tuple key
+   * @param val Record of type string
+   * @return false if unsuccess, e.g. the tuple was not inserted completely.
+   */
+  bool Next(std::string* key, std::string* val);
+  /**
+   * Append one tuple to the KVFile.
+   *
+   * @param key e.g., image path
+   * @param val
+   * @return false if unsucess, e.g., inserted before
+   */
+  bool Insert(const std::string& key, const std::string& tuple);
+  /**
+   * Move the read pointer to the head of the KVFile file.
+   * Used for repeated reading.
+   */
+  void SeekToFirst();
+  /**
+   * Flush buffered data to disk.
+   * Used only for kCreate or kAppend.
+   */
+  void Flush();
+  /**
+   * Iterate through all tuples to get the num of all tuples.
+   *
+   * @return num of tuples
+   */
+  int Count();
+  /**
+   * @return path to KVFile file
+   */
+  inline std::string path() { return path_; }
+
+ protected:
+  /**
+   * Read the next key and prepare buffer for reading value.
+   *
+   * @param key
+   * @return length (i.e., bytes) of value field.
+   */
+  int Next(std::string* key);
+  /**
+   * Setup the disk pointer to the right position for append in case that
+   * the pervious write crashes.
+   *
+   * @param path KVFile path.
+   * @return offset (end pos) of the last success written record.
+   */
+  int PrepareForAppend(const std::string& path);
+  /**
+   * Read data from disk if the current data in the buffer is not a full field.
+   *
+   * @param size size of the next field.
+   */
+  bool PrepareNextField(int size);
+
+ private:
+  std::string path_ = "";
+  Mode mode_;
+  //!< either ifstream or ofstream
+  std::fstream fdat_;
+  //!< to avoid replicated record
+  std::unordered_set<std::string> keys_;
+  //!< internal buffer
+  char* buf_ = nullptr;
+  //!< offset inside the buf_
+  int offset_ = 0;
+  //!< allocated bytes for the buf_
+  int capacity_ = 0;
+  //!< bytes in buf_, used in reading
+  int bufsize_ = 0;
+};
+}  // namespace io
+
+/**
+ * @deprecated {ShardData is deprecated! Use KVFile}.
+using ShardData = KVFile;
+*/
+}  // namespace singa
+
+#endif  // SINGA_IO_KVFILE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/kvfile_store.h
----------------------------------------------------------------------
diff --git a/include/singa/io/kvfile_store.h b/include/singa/io/kvfile_store.h
new file mode 100644
index 0000000..74ff127
--- /dev/null
+++ b/include/singa/io/kvfile_store.h
@@ -0,0 +1,55 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_IO_KVFILE_STORE_H_
+#define SINGA_IO_KVFILE_STORE_H_
+
+#include <string>
+#include "singa/io/store.h"
+#include "singa/io/kvfile.h"
+
+namespace singa {
+namespace io {
+
+/**
+ * Use the KVFile as the data storage.
+ *
+ * KVFile is a binary file. Each tuple is stored as byte string.
+ */
+class KVFileStore : public Store {
+ public:
+  ~KVFileStore() { Close();}
+  bool Open(const std::string& source, Mode mode) override;
+  void Close() override;
+  bool Read(std::string* key, std::string* value) override;
+  void SeekToFirst() override;
+  bool Write(const std::string& key, const std::string& value) override;
+  void Flush() override;
+
+ private:
+  KVFile* file_ = nullptr;
+  Mode mode_;
+};
+
+}  // namespace io
+}  // namespace singa
+
+#endif  // SINGA_IO_KVFILE_STORE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/store.h
----------------------------------------------------------------------
diff --git a/include/singa/io/store.h b/include/singa/io/store.h
new file mode 100644
index 0000000..15afb6a
--- /dev/null
+++ b/include/singa/io/store.h
@@ -0,0 +1,105 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_IO_STORE_H_
+#define SINGA_IO_STORE_H_
+
+#include <string>
+
+namespace singa {
+namespace io {
+
+using std::string;
+enum Mode { kCreate, kRead, kAppend };
+
+/**
+ * General key-value store that provides functions for reading and writing
+ * tuples.
+ *
+ * Subclasses implement the functions for a specific data storage, e.g., CSV
+ * file, HDFS, image folder, singa::io::SFile, leveldb, lmdb, etc.
+ */
+class Store {
+ public:
+  Store() { }
+  /**
+   * In case that users forget to call Close() to release resources, e.g.,
+   * memory, you can release them here.
+   */
+  virtual ~Store() { }
+  /**
+   * @param[in] source path to the storage, could be a file path, folder path
+   * or hdfs path, or even a http url.
+   * @param[in] mode
+   * @return true if open successfully, otherwise false.
+   */
+  virtual bool Open(const std::string& source, Mode mode) = 0;
+  /**
+   * Release resources.
+   */
+  virtual void Close() = 0;
+  /**
+   * Read a tuple.
+   *
+   * @param[out] key
+   * @param[out] value
+   * @return true if read successfully, otherwise false.
+   */
+  virtual bool Read(std::string* key, std::string* value) = 0;
+  /**
+   * Seek the read header to the first tuple.
+   */
+  virtual void SeekToFirst() = 0;
+  /**
+   * Write a tuple.
+   *
+   * @param[in] key
+   * @param[in] value
+   * @return true if success, otherwise false.
+   */
+  virtual bool Write(const std::string& key, const std::string& value) = 0;
+  /**
+   * Flush writing buffer if it has.
+   */
+  virtual void Flush() {}
+};
+
+/**
+ * Create a Store object.
+ *
+ * @param[in] backend identifier for a specific backend. Two backends are
+ * inluced currently, i.e., "kvfile", "textfile"
+ * @return a pointer to the newly created Store.
+ */
+Store* CreateStore(const string& backend);
+/**
+ * Create and open a Store object.
+ *
+ * @param[in] backend, @see CreateStore().
+ * @param[in] path
+ * @param[in] mode kRead or kCreate or kAppend
+ */
+Store* OpenStore(const string& backend, const string& path, Mode mode);
+
+}  // namespace io
+}  // namespace singa
+
+#endif  // SINGA_IO_STORE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/io/textfile_store.h
----------------------------------------------------------------------
diff --git a/include/singa/io/textfile_store.h 
b/include/singa/io/textfile_store.h
new file mode 100644
index 0000000..dcc559d
--- /dev/null
+++ b/include/singa/io/textfile_store.h
@@ -0,0 +1,56 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_IO_TEXTFILE_STORE_H_
+#define SINGA_IO_TEXTFILE_STORE_H_
+
+#include <fstream>
+#include <string>
+#include "singa/io/store.h"
+
+namespace singa {
+namespace io {
+/**
+ * Use text file as the data storage, one line per tuple.
+ *
+ * It is used for storeing CSV format data where the key is the line No. and
+ * the value is the line.
+ */
+class TextFileStore : public Store {
+ public:
+  ~TextFileStore() { Close(); }
+  bool Open(const std::string& source, Mode mode) override;
+  void Close() override;
+  bool Read(std::string* key, std::string* value) override;
+  void SeekToFirst() override;
+  bool Write(const std::string& key, const std::string& value) override;
+  void Flush() override;
+
+ private:
+  int lineNo_ = 0;
+  std::fstream* fs_ = nullptr;
+  Mode mode_;
+};
+
+}  // namespace io
+}  // namespace singa
+
+#endif  // SINGA_IO_TEXTFILE_STORE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/connection_layer/bridge.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/bridge.h 
b/include/singa/neuralnet/connection_layer/bridge.h
new file mode 100644
index 0000000..333d6c9
--- /dev/null
+++ b/include/singa/neuralnet/connection_layer/bridge.h
@@ -0,0 +1,106 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_
+#define SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+
+/**
+ * \file this file includes the declarations of layers that inherit the
+ * base ConnectionLayer.
+ */
+namespace singa {
+class BridgeLayer : virtual public ConnectionLayer {
+ public:
+  void set_ready(bool a) {
+    ready_ = a;
+  }
+  bool ready() const {
+    return ready_;
+  }
+  virtual bool is_bridgesrclayer() const {
+    return false;
+  }
+  virtual bool is_bridgedstlayer() const {
+    return false;
+  }
+
+ protected:
+  //!< true if received grad from BridgeDstLayer
+  bool ready_;
+};
+
+/**
+ * For recv data from layer on other threads which may resident on other nodes
+ * due to layer/data partiton
+ */
+class BridgeDstLayer : public BridgeLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override {
+    // reset ready_ for next iteration.
+    ready_ = false;
+  }
+  void ComputeGradient(int flag,  const vector<Layer*>& srclayers) override {}
+  bool is_bridgedstlayer() const {
+    return true;
+  }
+};
+
+/**
+ * For sending data to layer on other threads which may resident on other nodes
+ * due to layer/data partition.
+ */
+class BridgeSrcLayer : public BridgeLayer {
+ public:
+  void Setup(const LayerProto& conf, const vector<Layer*>& srclayers) override 
{
+    CHECK_GE(srclayers.size(), 1);
+    srclayer_ = srclayers.at(0);
+  }
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override {}
+  void ComputeGradient(int flag,  const vector<Layer*>& srclayers) override {
+    ready_ = false;
+  }
+  const Blob<float>& data(const Layer* from) const override {
+    return srclayer_->data(this);
+  }
+  Blob<float>* mutable_data(const Layer* from) override {
+    return srclayer_->mutable_data(this);
+  }
+  const Blob<float>& grad(const Layer* from) const override {
+    return srclayer_->grad(this);
+  }
+  Blob<float>* mutable_grad(const Layer* from) override {
+    return srclayer_->mutable_grad(this);
+  }
+  bool is_bridgesrclayer() const override {
+    return true;
+  }
+
+ private:
+  Layer* srclayer_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_CONNECTION_LAYER_BRIDGE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/connection_layer/concate.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/concate.h 
b/include/singa/neuralnet/connection_layer/concate.h
new file mode 100644
index 0000000..9c5180b
--- /dev/null
+++ b/include/singa/neuralnet/connection_layer/concate.h
@@ -0,0 +1,48 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_
+#define SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+
+/**
+ * \file this file includes the declarations of layers that inherit the
+ * base ConnectionLayer.
+ */
+namespace singa {
+/**
+ * Connect multiple (src) layers with a single (dst) layer.
+ *
+ * It concates feature Blobs (i.e., matrix) of src layers on one dimension.
+ * The concated feature Blob will be fed into the dst layer.
+ */
+class ConcateLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_CONNECTION_LAYER_CONCATE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/connection_layer/slice.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/slice.h 
b/include/singa/neuralnet/connection_layer/slice.h
new file mode 100644
index 0000000..87b1754
--- /dev/null
+++ b/include/singa/neuralnet/connection_layer/slice.h
@@ -0,0 +1,54 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_CONNECTION_LAYER_SLICE_H_
+#define SINGA_NEURALNET_CONNECTION_LAYER_SLICE_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+
+/**
+ * \file this file includes the declarations of layers that inherit the
+ * base ConnectionLayer.
+ */
+namespace singa {
+/**
+ * Connect a single (src) layer with multiple (dst) layers.
+ *
+ * It slices the feature Blob (i.e., matrix) of the src layer on one dimension.
+ * The sliced feature Blobs will be fed into dst layers.
+ */
+class SliceLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ private:
+  std::vector<Blob<float>> datavec_;
+  std::vector<Blob<float>> gradvec_;
+  int slice_dim_;
+  int slice_num_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_CONNECTION_LAYER_SLICE_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/connection_layer/split.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/connection_layer/split.h 
b/include/singa/neuralnet/connection_layer/split.h
new file mode 100644
index 0000000..391473a
--- /dev/null
+++ b/include/singa/neuralnet/connection_layer/split.h
@@ -0,0 +1,52 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_CONNECTION_LAYER_SPLIT_H_
+#define SINGA_NEURALNET_CONNECTION_LAYER_SPLIT_H_
+
+#include <vector>
+#include "singa/neuralnet/layer.h"
+
+/**
+ * \file this file includes the declarations of layers that inherit the
+ * base ConnectionLayer.
+ */
+namespace singa {
+/**
+ * Connect a single (src) layer with multiple dst layers.
+ *
+ * It replicates the feature Blob of the src layer.
+ * Each replicated feature Blob will be fed into one dst layer.
+ * It aggregates gradients set by all dst layers and set it to the src layer.
+ */
+class SplitLayer : public ConnectionLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
+
+ protected:
+  Blob<float> grads_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_CONNECTION_LAYER_SPLIT_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/csv_record.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/csv_record.h 
b/include/singa/neuralnet/input_layer/csv_record.h
new file mode 100644
index 0000000..1c50ceb
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/csv_record.h
@@ -0,0 +1,72 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_CSV_RECORD_H_
+#define SINGA_NEURALNET_INPUT_LAYER_CSV_RECORD_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/neuralnet/input_layer/store_input.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+
+/**
+ * Specific layer that parses the value string loaded by Store as a line from
+ * a CSV file.
+ *
+ * It assumes the first column is the label except that has_label_ is 
configured
+ * to false. Or the data is used in deploy mode.
+ */
+class CSVRecordLayer : public SingleLabelRecordLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+
+ protected:
+  bool Parse(int k, int flag, const string& key, const string& val) override;
+  void LoadRecord(const string& backend,
+                  const string& path,
+                  Blob<float>* to) override;
+
+ private:
+  std::string sep_;
+  bool has_label_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_CSV_RECORD_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/data.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/data.h 
b/include/singa/neuralnet/input_layer/data.h
new file mode 100644
index 0000000..46ffbf7
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/data.h
@@ -0,0 +1,76 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_DATA_H_
+#define SINGA_NEURALNET_INPUT_LAYER_DATA_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Base layer for reading ::Record  from local Shard, HDFS, lmdb, etc.
+ */
+class DataLayer: virtual public InputLayer {
+ public:
+  Blob<float>* mutable_data(const Layer* layer) override { return nullptr; }
+  ConnectionType dst_layer_connection() const override {
+    return kOneToMany;
+  }
+
+  inline int batchsize() const { return batchsize_; }
+  virtual const Record& sample() const {
+    return sample_;
+  }
+  /**
+   * @return the loaded records
+   */
+  virtual const std::vector<Record>& records() const {
+    return records_;
+  }
+
+ protected:
+  int random_skip_;
+  int batchsize_;
+  Record sample_;
+  std::vector<Record> records_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_DATA_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/image_preprocess.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/image_preprocess.h 
b/include/singa/neuralnet/input_layer/image_preprocess.h
new file mode 100644
index 0000000..2407990
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/image_preprocess.h
@@ -0,0 +1,63 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_
+#define SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Do preprocessing for images, including cropping, mirroring, resizing.
+ */
+class ImagePreprocessLayer : public InputLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers);
+
+ private:
+  bool mirror_ = false;
+  int cropsize_ = 0;
+  int resize_ = 0;
+  float scale_ = 1;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_IMAGE_PREPROCESS_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/label.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/label.h 
b/include/singa/neuralnet/input_layer/label.h
new file mode 100644
index 0000000..1c40275
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/label.h
@@ -0,0 +1,59 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_LABEL_H_
+#define SINGA_NEURALNET_INPUT_LAYER_LABEL_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/neuralnet/input_layer/parser.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Derived from ParserLayer to parse label from SingaleLabelImageRecord.
+ */
+class LabelLayer : public ParserLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ParseRecords(int flag, const std::vector<Record>& records,
+                    Blob<float>* blob) override;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_LABEL_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/lmdb_data.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/lmdb_data.h 
b/include/singa/neuralnet/input_layer/lmdb_data.h
new file mode 100644
index 0000000..bc26ab0
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/lmdb_data.h
@@ -0,0 +1,75 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_LMDB_DATA_H_
+#define SINGA_NEURALNET_INPUT_LAYER_LMDB_DATA_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/neuralnet/input_layer/data.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Layer for loading Record from LMDB.
+ *
+ * It is derived from DataLayer.
+ */
+#ifdef USE_LMDB
+#include <lmdb.h>
+class LMDBDataLayer : public DataLayer {
+ public:
+  ~LMDBDataLayer();
+
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void OpenLMDB(const std::string& path);
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ConvertCaffeDatumToRecord(const CaffeDatum& datum,
+                                 SingleLabelImageRecord* record);
+
+ private:
+  MDB_env* mdb_env_;
+  MDB_dbi mdb_dbi_;
+  MDB_txn* mdb_txn_;
+  MDB_cursor* mdb_cursor_;
+  MDB_val mdb_key_, mdb_value_;
+};
+#endif
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_LMDB_DATA_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/mnist.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/mnist.h 
b/include/singa/neuralnet/input_layer/mnist.h
new file mode 100644
index 0000000..7c91173
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/mnist.h
@@ -0,0 +1,62 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_MNIST_H_
+#define SINGA_NEURALNET_INPUT_LAYER_MNIST_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/neuralnet/input_layer/parser.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Derived from ParserLayer to parse MNIST feature from 
SingaleLabelImageRecord.
+ */
+class MnistLayer : public ParserLayer {
+ public:
+  void Setup(const LayerProto& proto, const vector<Layer*>& srclayers) 
override;
+  void ParseRecords(int flag, const std::vector<Record>& records,
+                    Blob<float>* blob) override;
+
+ protected:
+  float norm_a_, norm_b_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_MNIST_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/parser.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/parser.h 
b/include/singa/neuralnet/input_layer/parser.h
new file mode 100644
index 0000000..de52b0a
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/parser.h
@@ -0,0 +1,65 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_PARSER_H_
+#define SINGA_NEURALNET_INPUT_LAYER_PARSER_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Base layer for parsing the input records into Blobs.
+ */
+class ParserLayer : public InputLayer {
+ public:
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
+  ConnectionType dst_layer_connection() const override {
+    return kOneToMany;
+  }
+  /**
+   * Parse records from DataLayer into blob.
+   */
+  virtual void ParseRecords(int flag, const std::vector<Record>& records,
+      Blob<float>* blob) = 0;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_PARSER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/singa/neuralnet/input_layer/prefetch.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/input_layer/prefetch.h 
b/include/singa/neuralnet/input_layer/prefetch.h
new file mode 100644
index 0000000..038f7e2
--- /dev/null
+++ b/include/singa/neuralnet/input_layer/prefetch.h
@@ -0,0 +1,65 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#ifndef SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_
+#define SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_
+
+#include <string>
+#include <vector>
+#include "singa/io/store.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/utils/data_shard.h"
+/**
+ * \file this file includes the declarations of input layers that inherit the
+ * base InputLayer to load input features.
+ *
+ * The feature loading phase can be implemented using a single layer or
+ * separated into DataLayer (for loading features as records) and ParserLayer
+ * (for parsing features from records). SINGA has provided some subclasses of
+ * DataLayer and ParserLayer.
+ *
+ * Data prefetching can be implemented as a sub-class of InputLayer.
+ * SINGA provides a built-in PrefetchLayer which embeds DataLayer and
+ * ParserLayer.
+ */
+namespace singa {
+using std::string;
+using std::vector;
+/**
+ * Layer for prefetching data records and parsing them.
+ *
+ * The data loading and parsing work is done by internal DataLayer and
+ * ParserLayer respectively. This layer controls the prefetching thread, i.e.,
+ * creating and joining the prefetching thread.
+ */
+class PrefetchLayer : public Layer {
+ public:
+  ~PrefetchLayer();
+  void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
+  void ComputeGradient(int flag, const vector<Layer*>& srclayers) override {}
+
+ protected:
+  std::thread thread_;
+};
+
+}  // namespace singa
+
+#endif  // SINGA_NEURALNET_INPUT_LAYER_PREFETCH_H_

Reply via email to