http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/cluster_rt.h
----------------------------------------------------------------------
diff --git a/include/utils/cluster_rt.h b/include/utils/cluster_rt.h
deleted file mode 100644
index 5de6c16..0000000
--- a/include/utils/cluster_rt.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_CLUSTER_RT_H_
-#define SINGA_UTILS_CLUSTER_RT_H_
-
-#include <zookeeper/zookeeper.h>
-#include <string>
-#include <vector>
-
-namespace singa {
-
-typedef void (*rt_callback)(void *contest);
-
-const int kZKBufSize = 100;
-// following paths are global
-const std::string kZKPathSinga = "/singa";
-const std::string kZKPathSys =   "/singa/sys";
-const std::string kZKPathJLock = "/singa/sys/job-lock";
-const std::string kZKPathHostIdx = "/singa/sys/host-idx";
-const std::string kZKPathApp =   "/singa/app";
-const std::string kZKPathJob =   "/singa/app/job-";
-// following paths are local under /singa/app/job-X
-const std::string kZKPathJobGroup = "/group";
-const std::string kZKPathJobProc =  "/proc";
-const std::string kZKPathJobPLock = "/proc-lock";
-
-inline std::string GetZKJobWorkspace(int job_id) {
-  char buf[kZKBufSize];
-  snprintf(buf, kZKBufSize, "%010d", job_id);
-  return kZKPathJob + buf;
-}
-
-struct RTCallback {
-  rt_callback fn;
-  void* ctx;
-};
-
-struct JobInfo {
-  int id;
-  int procs;
-  std::string name;
-};
-
-/*
- * A wrapper for zookeeper service which handles error code and reconnections
- */
-class ZKService {
- public:
-  static void ChildChanges(zhandle_t* zh, int type, int state,
-                           const char *path, void* watcherCtx);
-
-  ~ZKService();
-  bool Init(const std::string& host, int timeout);
-  bool CreateNode(const char* path, const char* val, int flag, char* output);
-  bool DeleteNode(const char* path);
-  bool Exist(const char* path);
-  bool UpdateNode(const char* path, const char* val);
-  bool GetNode(const char* path, char* output);
-  bool GetChild(const char* path, std::vector<std::string>* vt);
-  bool WGetChild(const char* path, std::vector<std::string>* vt,
-                   RTCallback *cb);
-
- private:
-  const int kNumRetry = 5;
-  const int kSleepSec = 1;
-
-  static void WatcherGlobal(zhandle_t* zh, int type, int state,
-                            const char *path, void* watcherCtx);
-
-  zhandle_t* zkhandle_ = nullptr;
-};
-
-/**
- * ClusterRuntime is a runtime service that manages dynamic configuration
- * and status of the whole cluster. It mainly provides following services:
- *    1)  Provide running status of each server/worker
- *    2)  Translate process id to (hostname:port)
- */
-class ClusterRuntime {
- public:
-  ClusterRuntime(const std::string& host, int job_id);
-  ClusterRuntime(const std::string& host, int job_id, int timeout);
-  ~ClusterRuntime();
-
-  /**
-   * Initialize the runtime instance
-   */
-  bool Init();
-  /**
-   * register the process, and get a unique process id
-   *
-   * \return the process id, -1 if failed
-   */
-  int RegistProc(const std::string& host_addr, int pid);
-  /**
-   * translate the process id to host address
-   *
-   * \return the host and port, "" if no such proc id 
-   */
-  std::string GetProcHost(int proc_id);
-  /**
-   * Server: watch all workers in a server group,
-   * will be notified when all workers have left
-   */
-  bool WatchSGroup(int gid, int sid, rt_callback fn, void* ctx);
-  /**
-   * Worker: join a server group (i.e. start to read/update these servers)
-   */
-  bool JoinSGroup(int gid, int wid, int s_group);
-  /**
-   * Worker: leave a server group (i.e. finish its all work)
-   */
-  bool LeaveSGroup(int gid, int wid, int s_group);
-
- private:
-  inline std::string groupPath(int gid) {
-    return group_path_ + "/sg" + std::to_string(gid);
-  }
-  inline std::string workerPath(int gid, int wid) {
-    return "/g" + std::to_string(gid) + "_w" + std::to_string(wid);
-  }
-
-  int timeout_ = 30000;
-  std::string host_ = "";
-  ZKService zk_;
-  std::string workspace_ = "";
-  std::string group_path_ = "";
-  std::string proc_path_ = "";
-  std::string proc_lock_path_ = "";
-  std::vector<RTCallback*> cb_vec_;
-};
-
-class JobManager {
- public:
-  // host is comma separated host:port pairs, each corresponding to a zk 
server.
-  // e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-  explicit JobManager(const std::string& host);
-  JobManager(const std::string& host, int timeout);
-
-  // NOTICE: Init must be called once, before start to use other functions
-  bool Init();
-  // generate a unique job id
-  bool GenerateJobID(int* id);
-  // generate a list of hosts for a job conf
-  bool GenerateHostList(const char* host_file, const char* job_file,
-                        std::vector<std::string>* list);
-  // list all jobs recorded in zk
-  bool ListJobs(std::vector<JobInfo>* jobs);
-  // list running processes for a job
-  bool ListJobProcs(int job, std::vector<std::string>* procs);
-  // remove a job path in zk
-  bool Remove(int job);
-  // remove all job paths in zk
-  bool RemoveAllJobs();
-  // remove all singa related paths in zk
-  bool CleanUp();
-
- private:
-  const int kJobsNotRemoved = 10;
-
-  bool CleanPath(const std::string& path, bool remove);
-  std::string ExtractClusterConf(const char* job_file);
-
-  int timeout_ = 30000;
-  std::string host_ = "";
-  ZKService zk_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_CLUSTER_RT_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/common.h
----------------------------------------------------------------------
diff --git a/include/utils/common.h b/include/utils/common.h
deleted file mode 100644
index 3eb0bbd..0000000
--- a/include/utils/common.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_COMMON_H_
-#define SINGA_UTILS_COMMON_H_
-
-#include <google/protobuf/message.h>
-#include <unordered_map>
-#include <sstream>
-#include <string>
-#include <vector>
-#include <utility>
-#include "proto/common.pb.h"
-
-namespace singa {
-
-std::string IntVecToString(const std::vector<int>& vec);
-std::string VStringPrintf(std::string fmt, va_list l);
-std::string StringPrintf(std::string fmt, ...);
-
-/**
- * Locate the position of the arg in arglist.
- *
- * @param argc total num of arguments
- * @param arglist all arguments
- * @param the searched argument
- * @return the position of arg in the arglist; -1 if not found.
- */
-int ArgPos(int argc, char** arglist, const char* arg);
-void CreateFolder(const std::string name);
-/**
- * Slice a set of large Params into small pieces such that they can be roughtly
- * equally partitioned into a fixed number of boxes.
- *
- * @param num total number of boxes to store the small pieces
- * @param sizes size of all Params
- * @return all slices for each Param
- */
-const std::vector<std::vector<int>> Slice(int num,
-    const std::vector<int>& sizes);
-/**
- * Partition slices into boxes.
- *
- * @param num number of boxes
- * @param slices slice sizes
- * @return box id for each slice
- */
-const std::vector<int> PartitionSlices(int num, const std::vector<int>& 
slices);
-/*
-inline void Sleep(int millisec=1){
-  std::this_thread::sleep_for(std::chrono::milliseconds(millisec));
-}
-*/
-int gcd(int a, int b);
-int LeastCommonMultiple(int a, int b);
-/*
-inline float rand_real() {
-  return  static_cast<float>(rand_r())/(RAND_MAX+1.0f);
-}
-*/
-std::string GetHostIP();
-void SetupLog(const std::string& workspace, const std::string& model);
-
-/**
- * Performance mtrics.
- */
-class Metric {
- public:
-  Metric() {}
-  explicit Metric(const std::string& str);
-  /**
-   * Add one metric.
-   *
-   * If the metric exist, the aggregate. Otherwise create a new entry for it.
-   *
-   * @param name metric name, e.g., 'loss'
-   * @param value metric value
-   */
-  void Add(const std::string& name, float value);
-  void Add(const std::string& name, float value, int count);
-  /**
-   * reset all metric counter and value to 0
-   */
-  void Reset();
-  /**
-   * Generate a one-line string for logging
-   */
-  std::string ToLogString() const;
-  /**
-   * Serialize the object into a string
-   */
-  std::string ToString() const;
-  /**
-   * Parse the metric from a string
-   */
-  void ParseFrom(const std::string& msg);
-
- private:
-  std::unordered_map<std::string, std::pair<int, float>> entry_;
-};
-
-using google::protobuf::Message;
-void Im2col(const float* data_im, const int channels,
-    const int height, const int width, const int kernel_h, const int kernel_w,
-    const int pad_h, const int pad_w, const int stride_h, const int stride_w,
-    float* data_col);
-void Col2im(const float* data_col, const int channels,
-    const int height, const int width, const int patch_h, const int patch_w,
-    const int pad_h, const int pad_w, const int stride_h, const int stride_w,
-    float* data_im);
-void ForwardMaxPooling(const float* bottom, const int num, const int channels,
-    const int height, const int width, const int kernel_h, const int kernel_w,
-    const int pad_h, const int pad_w, const int stride_h, const int stride_w,
-    float* top, float* mask);
-void BackwardMaxPooling(const float* top, const float* mask, const int num,
-    const int channels, const int height, const int width,
-    const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
-    const int stride_h, const int stride_w,
-    float* bottom);
-void ForwardAvgPooling(const float* bottom, const int num, const int channels,
-    const int height, const int width, const int kernel_h, const int kernel_w,
-    const int pad_h, const int pad_w, const int stride_h, const int stride_w,
-    float* top);
-void BackwardAvgPooling(const float* top, const int num, const int channels,
-    const int height, const int width, const int kernel_h, const int kernel_w,
-    const int pad_h, const int pad_w, const int stride_h, const int stride_w,
-    float* bottom);
-
-void ReadProtoFromTextFile(const char* filename, Message* proto);
-void WriteProtoToTextFile(const Message& proto, const char* filename);
-void ReadProtoFromBinaryFile(const char* filename, Message* proto);
-void WriteProtoToBinaryFile(const Message& proto, const char* filename);
-
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_COMMON_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/data_shard.h
----------------------------------------------------------------------
diff --git a/include/utils/data_shard.h b/include/utils/data_shard.h
deleted file mode 100644
index 7d69ae5..0000000
--- a/include/utils/data_shard.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_DATA_SHARD_H_
-#define SINGA_UTILS_DATA_SHARD_H_
-
-#include <google/protobuf/message.h>
-#include <fstream>
-#include <string>
-#include <unordered_set>
-
-namespace singa {
-
-/**
- * Data shard stores training/validation/test tuples.
- * Every worker node should have a training shard (validation/test shard
- * is optional). The shard file for training is
- * singa::Cluster::workspace()/train/shard.dat; The shard file for validation
- * is singa::Cluster::workspace()/train/shard.dat; Similar path for test.
- *
- * shard.dat consists of a set of unordered tuples. Each tuple is
- * encoded as [key_len key record_len val] (key_len and record_len are of type
- * uint32, which indicate the bytes of key and record respectively.
- *
- * When Shard obj is created, it will remove the last key if the record size
- * and key size do not match because the last write of tuple crashed.
- *
- * TODO
- * 1. split one shard into multiple shards.
- * 2. add threading to prefetch and parse records
- *
- */
-class DataShard {
- public:
-  enum {
-    // read only mode used in training
-    kRead = 0,
-    // write mode used in creating shard (will overwrite previous one)
-    kCreate = 1,
-    // append mode, e.g. used when previous creating crashes
-    kAppend = 2
-  };
-
-  /**
-   * Init the shard obj.
-   *
-   * @param folder Shard folder (path excluding shard.dat) on worker node
-   * @param mode Shard open mode, Shard::kRead, Shard::kWrite or Shard::kAppend
-   * @param bufsize Batch bufsize bytes data for every disk op (read or write),
-   * default is 100MB
-   */
-  DataShard(const std::string& folder, int mode);
-  DataShard(const std::string& folder, int mode, int capacity);
-  ~DataShard();
-
-  /**
-   * read next tuple from the shard.
-   *
-   * @param key Tuple key
-   * @param val Record of type Message
-   * @return false if read unsuccess, e.g., the tuple was not inserted
-   *         completely.
-   */
-  bool Next(std::string* key, google::protobuf::Message* val);
-  /**
-   * read next tuple from the shard.
-   *
-   * @param key Tuple key
-   * @param val Record of type string
-   * @return false if read unsuccess, e.g., the tuple was not inserted
-   *         completely.
-   */
-  bool Next(std::string* key, std::string* val);
-  /**
-   * Append one tuple to the shard.
-   *
-   * @param key e.g., image path
-   * @param val
-   * @return false if unsucess, e.g., inserted before
-   */
-  bool Insert(const std::string& key, const google::protobuf::Message& tuple);
-  /**
-   * Append one tuple to the shard.
-   *
-   * @param key e.g., image path
-   * @param val
-   * @return false if unsucess, e.g., inserted before
-   */
-  bool Insert(const std::string& key, const std::string& tuple);
-  /**
-   * Move the read pointer to the head of the shard file.
-   * Used for repeated reading.
-   */
-  void SeekToFirst();
-  /**
-   * Flush buffered data to disk.
-   * Used only for kCreate or kAppend.
-   */
-  void Flush();
-  /**
-   * Iterate through all tuples to get the num of all tuples.
-   *
-   * @return num of tuples
-   */
-  int Count();
-  /**
-   * @return path to shard file
-   */
-  inline std::string path() { return path_; }
-
- protected:
-  /**
-   * Read the next key and prepare buffer for reading value.
-   *
-   * @param key
-   * @return length (i.e., bytes) of value field.
-   */
-  int Next(std::string* key);
-  /**
-   * Setup the disk pointer to the right position for append in case that
-   * the pervious write crashes.
-   *
-   * @param path shard path.
-   * @return offset (end pos) of the last success written record.
-   */
-  int PrepareForAppend(const std::string& path);
-  /**
-   * Read data from disk if the current data in the buffer is not a full field.
-   *
-   * @param size size of the next field.
-   */
-  bool PrepareNextField(int size);
-
- private:
-  char mode_ = 0;
-  std::string path_ = "";
-  // either ifstream or ofstream
-  std::fstream fdat_;
-  // to avoid replicated record
-  std::unordered_set<std::string> keys_;
-  // internal buffer
-  char* buf_ = nullptr;
-  // offset inside the buf_
-  int offset_ = 0;
-  // allocated bytes for the buf_
-  int capacity_ = 0;
-  // bytes in buf_, used in reading
-  int bufsize_ = 0;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_DATA_SHARD_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/factory.h
----------------------------------------------------------------------
diff --git a/include/utils/factory.h b/include/utils/factory.h
deleted file mode 100644
index 3af25f0..0000000
--- a/include/utils/factory.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_FACTORY_H_
-#define SINGA_UTILS_FACTORY_H_
-
-#include <glog/logging.h>
-#include <functional>
-#include <map>
-#include <string>
-
-/**
- * Macro that creats a function which instantiate a subclass instance and
- * returns pointer to the base class.
- */
-#define CreateInstance(SubClass, BaseClass) \
-  [](void)->BaseClass* {return new SubClass();}
-
-/**
- * Factory template to generate class (or a sub-class) object based on id.
- * 1. register class creation function that generates a class
- * object based on id.
- * 2. call Create() func to call the creation function and return
- * a pointer to the base calss.
- */
-template<typename T>
-class Factory {
- public:
-  /**
-   * Register functions to create user defined classes.
-   * This function is called by the REGISTER_FACTORY macro.
-   *
-   * @param id Identifier of the creating function/class
-   * @param func a function that creates a layer instance
-   */
-  inline void Register(const std::string& id,
-                       const std::function<T*(void)>& func) {
-    CHECK(str2func_.find(id) == str2func_.end())
-      << "The id has been registered by another function";
-    str2func_[id] = func;
-  }
-  /**
-   * Register functions to create user defined classes.
-   * This function is called by the REGISTER_FACTORY macro.
-   *
-   * @param id Identifier of the creating function/class
-   * @param func a function that creates a layer instance
-   */
-  inline void Register(int id,
-                       const std::function<T*(void)>& func) {
-    CHECK(id2func_.find(id) == id2func_.end())
-      << "The id has been registered by another function";
-    id2func_[id] = func;
-  }
-  /**
-   * create an instance by providing its id
-   *
-   * @param id
-   */
-  inline T* Create(const std::string& id) {
-    CHECK(str2func_.find(id) != str2func_.end())
-      << "The creation function for " << id << " has not been registered";
-    return str2func_[id]();
-  }
-  /**
-   * create an instance by providing its id
-   *
-   * @param id
-   */
-  inline T* Create(int id) {
-    CHECK(id2func_.find(id) != id2func_.end())
-      << "The creation function for " << id << " has not been registered";
-    return id2func_[id]();
-  }
-
- private:
-  // Map that stores the registered creation functions
-  std::map<std::string, std::function<T*(void)>> str2func_;
-  std::map<int, std::function<T*(void)>> id2func_;
-};
-
-#endif  // SINGA_UTILS_FACTORY_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/graph.h
----------------------------------------------------------------------
diff --git a/include/utils/graph.h b/include/utils/graph.h
deleted file mode 100644
index bad7b19..0000000
--- a/include/utils/graph.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_GRAPH_H_
-#define SINGA_UTILS_GRAPH_H_
-
-#include <stack>
-#include <string>
-#include <map>
-#include <vector>
-
-namespace singa {
-
-class Node {
- public:
-  /**
-   * Node constructor.
-   *
-   * @param name name of the corresponding layer
-   */
-  explicit Node(std::string name);
-  /**
-   * Node constructor.
-   *
-   * This node is a partition of some node.
-   * @param name node name
-   * @param origin  name of the original node
-   * @param id partition id of this node
-   * @param proto conf of the corresponding layer
-   */
-  Node(const std::string& name, const std::string& origin, int id, void* 
proto);
-  ~Node() {}  // the proto field is deleted outside by other functions
-  void AddDstNode(Node* dstnode);
-  void AddSrcNode(Node* srcnode);
-  void RemoveDstNode(Node* dst);
-  void RemoveSrcNode(Node* src);
-
-  std::string name = "";
-  //! name of the origin node/layer from which is node is derived
-  std::string origin = "";
-  //! partition id
-  int partition_id = 0;
-  //! proto of the corresponding layer
-  void* proto = nullptr;
-  std::vector<Node*> srcnodes;
-  std::vector<Node*> dstnodes;
-};
-
-/**
- * Neuralnet is constructed by creating a graph with each node representing one
- * layer at first. After topology sort for graph nodes, layers are created and
- * connected.
- */
-class Graph {
- public:
-  Graph() {}
-  ~Graph();
-  /**
-   * @return all nodes of the graph
-   */
-  inline const std::vector<Node*>& nodes() const {
-    return nodes_;
-  }
-  /**
-   * @param name node name
-   * @return return the node of given name
-   */
-  inline Node* node(const std::string& name) const {
-    return name2node_.at(name);
-  }
-  void AddNode(Node* node);
-  Node* AddNode(const std::string& name);
-  void AddEdge(Node* srcnode, Node* dstnode);
-  void AddEdge(const std::string& src, const std::string& dst);
-  void RemoveEdge(Node* src, Node* dst);
-  void RemoveEdge(const std::string &src, const std::string& dst);
-  /**
-   * Dump the graph into json string which can be used to draw a picture by
-   * graphviz
-   */
-  std::string ToJson() const;
-  /**
-   * \copybreif ToJson()
-   *
-   * @param info info associated with each node
-   */
-  std::string ToJson(const std::map<std::string, std::string>& info) const;
-  /**
-   * Do topology sort for all nodes of the graph.
-   */
-  void Sort();
-
- private:
-  std::vector<Node*> nodes_;
-  std::map<std::string, Node*> name2node_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_GRAPH_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/image_transform.h
----------------------------------------------------------------------
diff --git a/include/utils/image_transform.h b/include/utils/image_transform.h
deleted file mode 100644
index 2867ad2..0000000
--- a/include/utils/image_transform.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_IMAGE_TRANSFORM_H_
-#define SINGA_UTILS_IMAGE_TRANSFORM_H_
-
-#include <glog/logging.h>
-// TODO(wangwei) provide image transformation API, the implementation can be
-// done by opencv, manual transform, or mshadow.
-namespace singa {
-
-void ImageTransform(const float* in, const float* mean, bool mirror, int 
h_crop,
-    int w_crop, int h_offset, int w_offset, int channel, int height, int width,
-    float scale, float* out);
-}  // namespace singa
-
-#endif  // SINGA_UTILS_IMAGE_TRANSFORM_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/param.h
----------------------------------------------------------------------
diff --git a/include/utils/param.h b/include/utils/param.h
deleted file mode 100644
index f690438..0000000
--- a/include/utils/param.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_PARAM_H_
-#define SINGA_UTILS_PARAM_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "comm/msg.h"
-#include "proto/job.pb.h"
-#include "utils/blob.h"
-
-namespace singa {
-using std::vector;
-/**
- * Base parameter generator which intializes parameter values.
- */
-class ParamGenerator {
- public:
-  static ParamGenerator* Create(const ParamGenProto& proto);
-
-  virtual ~ParamGenerator() {}
-
-  virtual void Init(const ParamGenProto& proto) { proto_ = proto; }
-  virtual void Fill(Blob<float>* data);
-
- protected:
-  ParamGenProto proto_;
-};
-
-class GaussianGen : public ParamGenerator {
- public:
-  void  Fill(Blob<float>* data) override;
-};
-
-class GaussianSqrtFanInGen : public GaussianGen {
- public:
-  void  Fill(Blob<float>* data) override;
-};
-
-class UniformGen : public ParamGenerator {
- public:
-  void  Fill(Blob<float>* data) override;
-};
-
-class UniformSqrtFanInGen : public UniformGen {
- public:
-  void Fill(Blob<float>* data) override;
-};
-
-class UniformSqrtFanInOutGen : public UniformGen {
- public:
-  void Fill(Blob<float>* data) override;
-};
-
-/**
- * Base paramter class.
- *
- * The Param object is a set of parameters, e.g., the (sub) weight matrix or
- * (sub) bias vector.
- *
- * It has at a gradient Blob and data Blob for gradients and parameter values.
- * Since some layers (or neuralnet) share parameter values, the data Blob is a
- * shared pointer which can be assigned to many Param objects' data field.
- *
- * It provides access methods like data(), grad(). It also provides functions
- * for generating messages and parsing messages to transferring the Param
- * objects among worker-worker, worker-server and server-server.
- *
- * Param objects are of different sizes, which makes it hard to acheive
- * load-balance among servers. Hence, we slice large Param objects into small
- * pieces. At the server side, one slice is a Param object.
- */
-class Param {
- public:
-  /**
-   * Create an instance of (sub) Param class based on the type from the
-   * configuration.
-   *
-   * @param[in] conf configuration
-   * @param a pointer to an instance
-   */
-  static Param* Create(const ParamProto& conf);
-
-  /**
-   * Try to slice the Param objects (from a neural net) into a given number of
-   * servers (groups) evenly. This is to achieve load-balance among servers.
-   *
-   * It does not change the Param objects, but just computes the length of each
-   * slice.
-   *
-   * @param num number of servers (groups) for maintaining the Param objects.
-   * @param params all Param objects from a neural net.
-   * @return the length of each slice.
-   */
-  static const vector<int> ComputeSlices(int num, const vector<Param*>& 
params);
-  /**
-   * It computes the length of each slice and slices the Param objects by 
adding
-   * the slicing information into every Param object.
-   *
-   * @copydetails ComputeSlices()
-   */
-  static void SliceParams(int num, const vector<Param*>& params);
-
-  Param() {}
-  virtual ~Param() {}
-  void Init(const ParamProto& proto) { proto_ = proto; }
-  /**
-   * Setup param object
-   *
-   * @param conf param configuration, include learning rate multiplier etc.
-   * @param shape one value per dimension
-   */
-  virtual void Setup(const std::vector<int>& shape);
-  /*
-   * Fill the values according to init method, e.g., gaussian distribution.
-   *
-   * @param version initial version
-   */
-  virtual void InitValues();
-  virtual void InitValues(int version);
-  /**
-   * Share the data blob from other Param objects.
-   *
-   * @param other the Param object whose owner owns the data blob
-   */
-  void ShareFrom(const Param& other);
-  /**
-   * Init param values from checkpoint blob.
-   */
-  void FromProto(const BlobProto& blob);
-  /**
-   * Dump param values to blob.
-   */
-  void ToProto(BlobProto* blob);
-  /**
-   * Add a slice
-   *
-   * @param slice_id
-   * @param size num of floats for this slice
-   */
-  void AddSlice(int slice_id, int size);
-  /**
-   * Scale the learning rate when updating parameters in the Param object
-   */
-  inline float lr_scale() const { return proto_.lr_scale(); }
-  /**
-   * Scale the weight decay when updating parameters in the Param object
-   */
-  inline float wd_scale() const { return proto_.wd_scale(); }
-  /**
-   * Parameter name used for Param re-use in other model or sharing between
-   * layers
-   */
-  inline const std::string& name() const { return proto_.name(); }
-  inline void set_name(const std::string& name) { proto_.set_name(name); }
-  /**
-   * If it shares data from others, then owner is the id of that Param,
-   * otherwise it is itself's id.
-   */
-  inline int owner() const { return proto_.owner(); }
-  /**
-   * ID start from 0 and ordered for all Param from the same neuralnet
-   */
-  inline int id() const { return proto_.id(); }
-  /**
-   * Set ID
-   */
-  inline void set_id(int id) {
-    proto_.set_id(id);
-    proto_.set_owner(id);
-  }
-  /**
-   * Param version is stored inside the data blob to enable all Param objs
-   * sharing the same values have the same version.
-   * @return the param version
-   */
-  inline int version() const { return data_->version(); }
-  inline void set_version(int v) { data_->set_version(v); }
-  /**
-   * @return the version of the parameter value local to a worker
-   */
-  inline int local_version() const { return local_version_; }
-  inline void set_local_version(int v) { local_version_ = v; }
-  inline const std::string& share_from() const { return proto_.share_from(); }
-   /**
-    * @return num of floats.
-    */
-  inline int size() const { return data_->count(); }
-  inline const Blob<float>& data() const { return *data_; }
-  inline Blob<float>* mutable_data() { return data_.get(); }
-  inline const Blob<float> &grad() const { return grad_; }
-  inline Blob<float> *mutable_grad() { return &grad_; }
-  inline float* mutable_cpu_data() { return data_->mutable_cpu_data(); }
-  inline float* mutable_cpu_grad() { return grad_.mutable_cpu_data(); }
-  inline float* mutable_cpu_history() { return history_.mutable_cpu_data(); }
-  /**
-   * @return slice start ID
-   */
-  inline int slice_start() const { return slice_start_; }
-  inline int num_slices() const { return num_slices_; }
-
-  /**
-   * Below are message/request related functions.
-   * The basic communication workflows are as follow:
-   *------------------------------------------------------------------------
-   *         |Put         |Get           |Update           |Sync
-   *------------------------------------------------------------------------
-   * Generate|(stub)      |(stub)        |(stub)           |(server)
-   * Message |GenPutMsg   |GenGetMsg     |GenUpdateMsg     |GenSyncMsg
-   *------------------------------------------------------------------------
-   * Handle  |(server)    |(server)      |(server)         |(server)
-   * Message |HandlePutMsg|HandleGetMsg  |ParseUpdateMsg   |HandleSyncMsg
-   *         |            |              |GenUpdateResMsg  |
-   *------------------------------------------------------------------------
-   * Handle  |            |(stub)        |(stub)           |(server)
-   * Response|            |ParseGetResMsg|ParseUpdateResMsg|ParseSyncResMsg
-   *------------------------------------------------------------------------
-   */
-
-  /**
-   * Generate the message for a put request, i.e., put parameters to a server
-   *
-   * This function is called at worker/stub side.
-   * @param copy decides whether to copy the parameter values from the server.
-   * @param slice_idx index of the slice from which the message is generated.
-   * @return generated message without setting src, dst, target fields.
-   */
-  virtual Msg* GenPutMsg(bool copy, int slice_idx);
-  /**
-   * Generate the message for a get request, i.e., get parameters from a server
-   * \copydetails GenPutMsg(bool, int);
-   */
-  virtual Msg* GenGetMsg(bool copy, int slice_idx);
-  /**
-   * Generate the message for a update request, i.e., pass info to server for
-   * parameter update.
-   * \copydetails GenPutMsg(bool, int);
-   */
-  virtual Msg* GenUpdateMsg(bool copy, int slice_idx);
-  /**
-   * Generate the message for a synchronization request between server groups.
-   *
-   * This function is called at server side where the Param is actually a slice
-   * of an original Param object.
-   * */
-  virtual Msg* GenSyncMsg(int offset, int size);
-  /**
-   * Server handling function for put request.
-   *
-   * @param msg request
-   * @param reserve if true reserve the msg space for the calling function;
-   * otherwise the msg should be freed inside the function.
-   * @return resposne message
-   */
-  virtual Msg* HandlePutMsg(Msg** msg, bool reserve);
-  /**
-   * Server handling function for put request.
-   *
-   * \copydetails HandleGetMsg(Msg**, bool reserve)
-   */
-  virtual Msg* HandleGetMsg(Msg** msg, bool reserve);
-  /**
-   * Server parse update requests.
-   * \copydetails GenUpdateResponseMsgs(const std::vector<Msg*>& msgs);
-   */
-  virtual void ParseUpdateMsgs(const std::vector<Msg*>& msgs);
-  /**
-   * Generate the messages to response the update requests.
-   *
-   * This function is called at the server side, where the Param is actually a
-   * slice of an original Param object.
-   *
-   * @param msgs for synchronous training, there would be multiple procs in
-   * which workers sharing the same Param (slice) objects. Their update 
requests
-   * is bufferred and handled together. For asynchrnous training, there is only
-   * request in msgs.
-   * @return response messages
-   */
-  virtual const std::vector<Msg*>
-    GenUpdateResponseMsgs(std::vector<Msg*>* msgs, bool reserve);
-  /**
-   * Server handling function for synchronization message
-   *
-   * \copydetails HandleGetMsg(Msg**, bool reserve)
-   */
-  virtual Msg* HandleSyncMsg(Msg** msg, bool reserve);
-  /**
-   * Worker/Stub parsing function for get response.
-   *
-   * @param msg
-   * @param slice_idx index for the slice
-   */
-  virtual int ParseGetResponseMsg(Msg* msg, int slice_idx);
-  /**
-   * Worker/Server parsing function for update response
-   *
-   * \copydetails ParseGetResponseMsg(Msg**, int);
-   */
-  virtual int ParseUpdateResponseMsg(Msg* msg, int slice_idx);
-  /**
-   * Server parsing function for synchronization response.
-   *
-   * \copydetails ParseGetResponseMsg(Msg** , int);
-   */
-  virtual int ParseSyncResponseMsg(Msg* msg, int slice_idx);
-
- protected:
-  /**
-   * Implement the common code of ParseGetResponseMsg and 
ParseUpdateResponseMsg
-   * \copydetails ParseSyncResponseMsg(Msg* msg, int slice_idx);
-   */
-  void ParseResponseMsg(Msg* msg, int slice_idx);
-
- protected:
-  int local_version_ = -1;
-  // the ID of the first slice
-  int slice_start_ = 0;
-  int num_slices_ = 0;
-  // offset and size of each slice
-  std::vector<int> slice_offset_;
-  std::vector<int> slice_size_;
-  // for debug checking
-  // since put request has no feedback, we do not track its pending status
-  std::vector<bool> pending_get_;
-  std::vector<bool> pending_update_;
-  int num_pending_requests_ = 0;
-  // data field
-  std::shared_ptr<Blob<float>> data_ = nullptr;
-  // gradient, history gradient of this parameter
-  Blob<float> grad_, history_;
-  ParamProto proto_;
-};
-
-/**
- * ParamEntry is used for aggregating gradients of Params shared by workers 
from
- * the same group.
- *
- * For each worker group, every unique Param object has a ParamEntry object.
- * Param objects sharing the same values are associated with the same
- * ParamEntry.
- */
-class ParamEntry {
- public:
-  ParamEntry() {}
-  ParamEntry(int total, Param* p);
-  /**
-   * Associate the counter to a Param object.
-   *
-   * @param p
-   * @param local 1 if it is used by workers in this procs, 0 otherwise
-   */
-  void AddParam(bool local, Param* p);
-  int next_version = -1;  // next_version & num_update are directly used by 
stub
-  int num_update = 0;
-  int num_local = 0;  //!< # local workers using the shared parameter
-  int num_total = 0;  //!< # total workers using the shared parameter
-  //!< Shares are deleted by neuralnet's destructor
-  std::vector<Param*> shares;
-};
-
-inline int ParamTrgt(int param_id, int slice_id) {
-  return (param_id << 16) | slice_id;
-}
-
-inline int ParamID(int param_trgt) {
-  return param_trgt >> 16;
-}
-
-inline int SliceID(int param_trgt) {
-  static const int mask = (1 << 16) -1;
-  return param_trgt & mask;
-}
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_PARAM_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/singleton.h
----------------------------------------------------------------------
diff --git a/include/utils/singleton.h b/include/utils/singleton.h
deleted file mode 100644
index 4cf487e..0000000
--- a/include/utils/singleton.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_SINGLETON_H_
-#define SINGA_UTILS_SINGLETON_H_
-
-/**
-  * Thread-safe implementation for C++11 according to
-  * 
http://stackoverflow.com/questions/2576022/efficient-thread-safe-singleton-in-c
-  */
-template<typename T>
-class Singleton {
- public:
-  static T* Instance() {
-    static T data_;
-    return &data_;
-  }
-};
-
-/**
- * Thread Specific Singleton
- *
- * Each thread will have its own data_ storage.
- */
-template<typename T>
-class TSingleton {
- public:
-  static T* Instance() {
-    static thread_local T data_;
-    return &data_;
-  }
-};
-
-#endif  // SINGA_UTILS_SINGLETON_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/tinydir.h
----------------------------------------------------------------------
diff --git a/include/utils/tinydir.h b/include/utils/tinydir.h
deleted file mode 100644
index abb7000..0000000
--- a/include/utils/tinydir.h
+++ /dev/null
@@ -1,562 +0,0 @@
-/*
-Copyright (c) 2013-2014, Cong Xu, Baudouin Feildel
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#ifndef TINYDIR_H
-#define TINYDIR_H
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#ifdef _WIN32
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#ifdef _MSC_VER
-#pragma warning (disable : 4996)
-#endif
-#else
-#include <dirent.h>
-#include <libgen.h>
-#include <sys/stat.h>
-#endif
-
-
-/* types */
-
-#define _TINYDIR_PATH_MAX 4096
-#ifdef _WIN32
-/* extra chars for the "\\*" mask */
-#define _TINYDIR_PATH_EXTRA 2
-#else
-#define _TINYDIR_PATH_EXTRA 0
-#endif
-#define _TINYDIR_FILENAME_MAX 256
-
-#ifdef _MSC_VER
-#define _TINYDIR_FUNC static __inline
-#else
-#define _TINYDIR_FUNC static __inline__
-#endif
-
-/* Allow user to use a custom allocator by defining _TINYDIR_MALLOC and 
_TINYDIR_FREE. */
-#if    defined(_TINYDIR_MALLOC) &&  defined(_TINYDIR_FREE)
-#elif !defined(_TINYDIR_MALLOC) && !defined(_TINYDIR_FREE)
-#else
-#error "Either define both alloc and free or none of them!"
-#endif
-
-#if !defined(_TINYDIR_MALLOC)
-       #define _TINYDIR_MALLOC(_size) malloc(_size)
-       #define _TINYDIR_FREE(_ptr)    free(_ptr)
-#endif //!defined(_TINYDIR_MALLOC)
-
-typedef struct
-{
-       char path[_TINYDIR_PATH_MAX];
-       char name[_TINYDIR_FILENAME_MAX];
-       char *extension;
-       int is_dir;
-       int is_reg;
-
-#ifdef _WIN32
-#else
-       struct stat _s;
-#endif
-} tinydir_file;
-
-typedef struct
-{
-       char path[_TINYDIR_PATH_MAX];
-       int has_next;
-       size_t n_files;
-
-       tinydir_file *_files;
-#ifdef _WIN32
-       HANDLE _h;
-       WIN32_FIND_DATAA _f;
-#else
-       DIR *_d;
-       struct dirent *_e;
-#endif
-} tinydir_dir;
-
-
-/* declarations */
-
-_TINYDIR_FUNC
-int tinydir_open(tinydir_dir *dir, const char *path);
-_TINYDIR_FUNC
-int tinydir_open_sorted(tinydir_dir *dir, const char *path);
-_TINYDIR_FUNC
-void tinydir_close(tinydir_dir *dir);
-
-_TINYDIR_FUNC
-int tinydir_next(tinydir_dir *dir);
-_TINYDIR_FUNC
-int tinydir_readfile(const tinydir_dir *dir, tinydir_file *file);
-_TINYDIR_FUNC
-int tinydir_readfile_n(const tinydir_dir *dir, tinydir_file *file, size_t i);
-_TINYDIR_FUNC
-int tinydir_open_subdir_n(tinydir_dir *dir, size_t i);
-
-_TINYDIR_FUNC
-void _tinydir_get_ext(tinydir_file *file);
-_TINYDIR_FUNC
-int _tinydir_file_cmp(const void *a, const void *b);
-
-
-/* definitions*/
-
-_TINYDIR_FUNC
-int tinydir_open(tinydir_dir *dir, const char *path)
-{
-       if (dir == NULL || path == NULL || strlen(path) == 0)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       if (strlen(path) + _TINYDIR_PATH_EXTRA >= _TINYDIR_PATH_MAX)
-       {
-               errno = ENAMETOOLONG;
-               return -1;
-       }
-
-       /* initialise dir */
-       dir->_files = NULL;
-#ifdef _WIN32
-       dir->_h = INVALID_HANDLE_VALUE;
-#else
-       dir->_d = NULL;
-#endif
-       tinydir_close(dir);
-
-       strcpy(dir->path, path);
-#ifdef _WIN32
-       strcat(dir->path, "\\*");
-       dir->_h = FindFirstFileA(dir->path, &dir->_f);
-       dir->path[strlen(dir->path) - 2] = '\0';
-       if (dir->_h == INVALID_HANDLE_VALUE)
-#else
-       dir->_d = opendir(path);
-       if (dir->_d == NULL)
-#endif
-       {
-               errno = ENOENT;
-               goto bail;
-       }
-
-       /* read first file */
-       dir->has_next = 1;
-#ifndef _WIN32
-       dir->_e = readdir(dir->_d);
-       if (dir->_e == NULL)
-       {
-               dir->has_next = 0;
-       }
-#endif
-
-       return 0;
-
-bail:
-       tinydir_close(dir);
-       return -1;
-}
-
-_TINYDIR_FUNC
-int tinydir_open_sorted(tinydir_dir *dir, const char *path)
-{
-       /* Count the number of files first, to pre-allocate the files array */
-       size_t n_files = 0;
-       if (tinydir_open(dir, path) == -1)
-       {
-               return -1;
-       }
-       while (dir->has_next)
-       {
-               n_files++;
-               if (tinydir_next(dir) == -1)
-               {
-                       goto bail;
-               }
-       }
-       tinydir_close(dir);
-
-       if (tinydir_open(dir, path) == -1)
-       {
-               return -1;
-       }
-
-       dir->n_files = 0;
-       dir->_files = (tinydir_file *)_TINYDIR_MALLOC(sizeof *dir->_files * 
n_files);
-       if (dir->_files == NULL)
-       {
-               errno = ENOMEM;
-               goto bail;
-       }
-       while (dir->has_next)
-       {
-               tinydir_file *p_file;
-               dir->n_files++;
-
-               p_file = &dir->_files[dir->n_files - 1];
-               if (tinydir_readfile(dir, p_file) == -1)
-               {
-                       goto bail;
-               }
-
-               if (tinydir_next(dir) == -1)
-               {
-                       goto bail;
-               }
-
-               /* Just in case the number of files has changed between the 
first and
-               second reads, terminate without writing into unallocated memory 
*/
-               if (dir->n_files == n_files)
-               {
-                       break;
-               }
-       }
-
-       qsort(dir->_files, dir->n_files, sizeof(tinydir_file), 
_tinydir_file_cmp);
-
-       return 0;
-
-bail:
-       tinydir_close(dir);
-       return -1;
-}
-
-_TINYDIR_FUNC
-void tinydir_close(tinydir_dir *dir)
-{
-       if (dir == NULL)
-       {
-               return;
-       }
-
-       memset(dir->path, 0, sizeof(dir->path));
-       dir->has_next = 0;
-       dir->n_files = 0;
-       if (dir->_files != NULL)
-       {
-               _TINYDIR_FREE(dir->_files);
-       }
-       dir->_files = NULL;
-#ifdef _WIN32
-       if (dir->_h != INVALID_HANDLE_VALUE)
-       {
-               FindClose(dir->_h);
-       }
-       dir->_h = INVALID_HANDLE_VALUE;
-#else
-       if (dir->_d)
-       {
-               closedir(dir->_d);
-       }
-       dir->_d = NULL;
-       dir->_e = NULL;
-#endif
-}
-
-_TINYDIR_FUNC
-int tinydir_next(tinydir_dir *dir)
-{
-       if (dir == NULL)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       if (!dir->has_next)
-       {
-               errno = ENOENT;
-               return -1;
-       }
-
-#ifdef _WIN32
-       if (FindNextFileA(dir->_h, &dir->_f) == 0)
-#else
-       dir->_e = readdir(dir->_d);
-       if (dir->_e == NULL)
-#endif
-       {
-               dir->has_next = 0;
-#ifdef _WIN32
-               if (GetLastError() != ERROR_SUCCESS &&
-                       GetLastError() != ERROR_NO_MORE_FILES)
-               {
-                       tinydir_close(dir);
-                       errno = EIO;
-                       return -1;
-               }
-#endif
-       }
-
-       return 0;
-}
-
-_TINYDIR_FUNC
-int tinydir_readfile(const tinydir_dir *dir, tinydir_file *file)
-{
-       if (dir == NULL || file == NULL)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-#ifdef _WIN32
-       if (dir->_h == INVALID_HANDLE_VALUE)
-#else
-       if (dir->_e == NULL)
-#endif
-       {
-               errno = ENOENT;
-               return -1;
-       }
-       if (strlen(dir->path) +
-               strlen(
-#ifdef _WIN32
-                       dir->_f.cFileName
-#else
-                       dir->_e->d_name
-#endif
-               ) + 1 + _TINYDIR_PATH_EXTRA >=
-               _TINYDIR_PATH_MAX)
-       {
-               /* the path for the file will be too long */
-               errno = ENAMETOOLONG;
-               return -1;
-       }
-       if (strlen(
-#ifdef _WIN32
-                       dir->_f.cFileName
-#else
-                       dir->_e->d_name
-#endif
-               ) >= _TINYDIR_FILENAME_MAX)
-       {
-               errno = ENAMETOOLONG;
-               return -1;
-       }
-
-       strcpy(file->path, dir->path);
-       strcat(file->path, "/");
-       strcpy(file->name,
-#ifdef _WIN32
-               dir->_f.cFileName
-#else
-               dir->_e->d_name
-#endif
-       );
-       strcat(file->path, file->name);
-#ifndef _WIN32
-       if (stat(file->path, &file->_s) == -1)
-       {
-               return -1;
-       }
-#endif
-       _tinydir_get_ext(file);
-
-       file->is_dir =
-#ifdef _WIN32
-               !!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
-#else
-               S_ISDIR(file->_s.st_mode);
-#endif
-       file->is_reg =
-#ifdef _WIN32
-               !!(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) ||
-               (
-                       !(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DEVICE) &&
-                       !(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) 
&&
-                       !(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_ENCRYPTED) 
&&
-#ifdef FILE_ATTRIBUTE_INTEGRITY_STREAM
-                       !(dir->_f.dwFileAttributes & 
FILE_ATTRIBUTE_INTEGRITY_STREAM) &&
-#endif
-#ifdef FILE_ATTRIBUTE_NO_SCRUB_DATA
-                       !(dir->_f.dwFileAttributes & 
FILE_ATTRIBUTE_NO_SCRUB_DATA) &&
-#endif
-                       !(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_OFFLINE) &&
-                       !(dir->_f.dwFileAttributes & FILE_ATTRIBUTE_TEMPORARY));
-#else
-               S_ISREG(file->_s.st_mode);
-#endif
-
-       return 0;
-}
-
-_TINYDIR_FUNC
-int tinydir_readfile_n(const tinydir_dir *dir, tinydir_file *file, size_t i)
-{
-       if (dir == NULL || file == NULL)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       if (i >= dir->n_files)
-       {
-               errno = ENOENT;
-               return -1;
-       }
-
-       memcpy(file, &dir->_files[i], sizeof(tinydir_file));
-       _tinydir_get_ext(file);
-
-       return 0;
-}
-
-_TINYDIR_FUNC
-int tinydir_open_subdir_n(tinydir_dir *dir, size_t i)
-{
-       char path[_TINYDIR_PATH_MAX];
-       if (dir == NULL)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       if (i >= dir->n_files || !dir->_files[i].is_dir)
-       {
-               errno = ENOENT;
-               return -1;
-       }
-
-       strcpy(path, dir->_files[i].path);
-       tinydir_close(dir);
-       if (tinydir_open_sorted(dir, path) == -1)
-       {
-               return -1;
-       }
-
-       return 0;
-}
-
-/* Open a single file given its path */
-_TINYDIR_FUNC
-int tinydir_file_open(tinydir_file *file, const char *path)
-{
-       tinydir_dir dir;
-       int result = 0;
-       int found = 0;
-       char dir_name_buf[_TINYDIR_PATH_MAX];
-       char file_name_buf[_TINYDIR_FILENAME_MAX];
-       char *dir_name;
-       char *base_name;
-#ifdef _WIN32
-       char drive_buf[_TINYDIR_PATH_MAX];
-       char ext_buf[_TINYDIR_FILENAME_MAX];
-#endif
-       
-       if (file == NULL || path == NULL || strlen(path) == 0)
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       if (strlen(path) + _TINYDIR_PATH_EXTRA >= _TINYDIR_PATH_MAX)
-       {
-               errno = ENAMETOOLONG;
-               return -1;
-       }
-
-       /* Get the parent path */
-#ifdef _WIN32
-       if (_splitpath_s(
-                       path,
-                       drive_buf, sizeof drive_buf,
-                       dir_name_buf, sizeof dir_name_buf,
-                       file_name_buf, sizeof file_name_buf,
-                       ext_buf, sizeof ext_buf))
-       {
-               errno = EINVAL;
-               return -1;
-       }
-       /* Concatenate the drive letter and dir name to form full dir name */
-       strcat(drive_buf, dir_name_buf);
-       dir_name = drive_buf;
-       /* Concatenate the file name and extension to form base name */
-       strcat(file_name_buf, ext_buf);
-       base_name = file_name_buf;
-#else
-       strcpy(dir_name_buf, path);
-       dir_name = dirname(dir_name_buf);
-       strcpy(file_name_buf, path);
-       base_name = basename(file_name_buf);
-#endif
-       
-       /* Open the parent directory */
-       if (tinydir_open(&dir, dir_name) == -1)
-       {
-               return -1;
-       }
-
-       /* Read through the parent directory and look for the file */
-       while (dir.has_next)
-       {
-               if (tinydir_readfile(&dir, file) == -1)
-               {
-                       result = -1;
-                       goto bail;
-               }
-               if (strcmp(file->name, base_name) == 0)
-               {
-                       /* File found */
-                       found = 1;
-                       goto bail;
-               }
-               tinydir_next(&dir);
-       }
-       if (!found)
-       {
-               result = -1;
-               errno = ENOENT;
-       }
-       
-bail:
-       tinydir_close(&dir);
-       return result;
-}
-
-_TINYDIR_FUNC
-void _tinydir_get_ext(tinydir_file *file)
-{
-       char *period = strrchr(file->name, '.');
-       if (period == NULL)
-       {
-               file->extension = &(file->name[strlen(file->name)]);
-       }
-       else
-       {
-               file->extension = period + 1;
-       }
-}
-
-_TINYDIR_FUNC
-int _tinydir_file_cmp(const void *a, const void *b)
-{
-       const tinydir_file *fa = (const tinydir_file *)a;
-       const tinydir_file *fb = (const tinydir_file *)b;
-       if (fa->is_dir != fb->is_dir)
-       {
-               return -(fa->is_dir - fb->is_dir);
-       }
-       return strncmp(fa->name, fb->name, _TINYDIR_FILENAME_MAX);
-}
-
-#endif

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/tokenizer.h
----------------------------------------------------------------------
diff --git a/include/utils/tokenizer.h b/include/utils/tokenizer.h
deleted file mode 100644
index c66e0af..0000000
--- a/include/utils/tokenizer.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_TOKENIZER_H_
-#define SINGA_UTILS_TOKENIZER_H_
-
-#include <glog/logging.h>
-#include <string>
-
-namespace singa {
-/**
- * Tokenize a string.
- *
- * example:
- * Tokenizer t("assa,asf;wes", ",;");
- * string x;
- * t >> x; // x is assa
- * t >> x; // x is asf
- * t >> x; // x is wes
- * cout << (t >> x); // print 0.
- */
-class Tokenizer {
- public:
-  Tokenizer(const std::string& str, const std::string& sep): start_(0),
-  sep_(sep), buf_(str) {}
-  Tokenizer & operator>>(std::string& out) {
-    CHECK_LT(start_, buf_.length());
-    int start = start_;
-    auto pos = buf_.find_first_of(sep_, start);
-    if (pos == std::string::npos)
-      pos = buf_.length();
-    start_ = pos + 1;
-    out = buf_.substr(start, pos);
-    return *this;
-  }
-  bool Valid() { return start_ < buf_.length(); }
-
- private:
-  unsigned start_;
-  std::string sep_;
-  const std::string& buf_;
-};
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_TOKENIZER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/utils/updater.h
----------------------------------------------------------------------
diff --git a/include/utils/updater.h b/include/utils/updater.h
deleted file mode 100644
index 4afa32f..0000000
--- a/include/utils/updater.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-* 
-*   http://www.apache.org/licenses/LICENSE-2.0
-* 
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_UTILS_UPDATER_H_
-#define SINGA_UTILS_UPDATER_H_
-
-#include "proto/job.pb.h"
-#include "utils/param.h"
-
-namespace singa {
-/**
- * Base learning rate generator.
- *
- * Generate learning rate for a give training step/iteration.
- * There are many different ways to change the learning rate through time/step.
- * Users can inherint this class to implement their own change method.
- */
-class LRGenerator {
- public:
-  static LRGenerator* Create(const LRGenProto& proto);
-
-  virtual ~LRGenerator() {}
-
-  virtual void Init(const LRGenProto& proto) { proto_ = proto; }
-  /**
-   * @param step training step/iteration.
-   * @return base learning rate regardless of step
-   */
-  virtual float Get(int step) { return proto_.base_lr(); }
-
- protected:
-  LRGenProto proto_;
-};
-
-class FixedStepLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
- private:
-  int last_idx_ = 0;
-};
-
-class StepLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
-};
-
-class LinearLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
-};
-
-class ExpLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
-};
-
-class InvLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
-};
-
-class InvTLRGen : public LRGenerator {
- public:
-  float Get(int step) override;
-};
-
-/**
- * Updater for Param.
- */
-class Updater {
- public:
-  static Updater* Create(const UpdaterProto& proto);
-
-  virtual ~Updater() {}
-
-  virtual void Init(const UpdaterProto &proto);
-  virtual void Update(int step, Param* param, float grad_scale) = 0;
-
- protected:
-  UpdaterProto proto_;
-  LRGenerator* lr_gen_;
-  float weight_decay_;
-  float momentum_;
-};
-
-class SGDUpdater : public Updater {
- public:
-  void Update(int step, Param* param, float grad_scale) override;
-};
-
-class AdaGradUpdater : public Updater {
- public:
-  void Update(int step, Param* param, float grad_scale) override;
-};
-
-
-class NesterovUpdater : public Updater {
- public:
-  void Update(int step, Param* param, float grad_scale) override;
-};
-
-/*
-class RMSPropUpdater : public Updater {
- public:
-  virtual void Update(int step, Param* param, float grad_scale);
-
- protected:
-  float base_lr_;
-  float delta_;
-  float rho_;
-  float weight_decay_;
-};
-
-class AdaDeltaUpdater : public Updater {
- public:
-  virtual void Update(int step, Param* param, float grad_scale);
-
- protected:
-  float rho_;
-  float delta_;
-  float weight_decay_;
-};
-*/
-
-}  // namespace singa
-
-#endif  // SINGA_UTILS_UPDATER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/include/worker.h
----------------------------------------------------------------------
diff --git a/include/worker.h b/include/worker.h
deleted file mode 100644
index 58f02c4..0000000
--- a/include/worker.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#ifndef SINGA_WORKER_H_
-#define SINGA_WORKER_H_
-
-#include <string>
-#include <vector>
-#include "comm/socket.h"
-#include "neuralnet/neuralnet.h"
-#include "proto/job.pb.h"
-
-namespace singa {
-
-//!< sleep 5 milliseconds if the Param is not updated to the expected version
-const int kCollectSleepTime = 5;
-/**
- * The Worker class which runs the training algorithm.
- * The first worker group will initialize parameters of the Net,
- * and put them into the distributed memory/table.
- * The virtual function TrainOneBatch and TestOneBatch implement the
- * training and test algorithm for one mini-batch data.
- *
- * Child workers override the two functions to implement their training
- * algorithms, e.g., the BPWorker/CDWorker/BPTTWorker implements the BP/CD/BPTT
- * algorithm respectively.
- */
-class Worker {
- public:
-  /**
-   * Create an instance of the subclass of Worker.
-   *
-   * @param[in] conf configuration of the TrainOneBatch algorithm. Different
-   * Worker subclasses implement different algorithms. Hence the creation is
-   * based on the TrainOneBatch algorithm type. Currently SINGA
-   * provides two algorithms:
-   * -# Back-propagation for the feed-forward models, e.g., CNN and MLP, and 
the
-   *  recurrent neural networks.
-   * -# Contrastive divergence for the energy models, e.g., RBM.
-   *
-   * @return a pointer to the instance of the Worker subclass.
-   */
-  static Worker* Create(const AlgProto& conf);
-  virtual ~Worker();
-  /**
-   * @param[in] grp_id global worker group ID
-   * @param[in] id worker ID within the group
-   * @param[in] conf job configuration
-   * @param[in] train_net pointer to the training neural net, which could be
-   * shared with other workers from the same group. Different workers run over
-   * differnt subset of layers.
-   * @param[in] val_net pointer to the validation neural net. Currently only 
the
-   * first worker from the first group would have validation neural net. All
-   * other workers receive nullptr for this argument.
-   * @param[in] test_net pointer to the test neural net. Currently only the
-   * first worker from the first group would have test neural net. All other
-   * workers receive nullptr for this argument.
-   */
-  virtual void Setup(int grp_id, int id, const JobProto& conf,
-      NeuralNet* train_net, NeuralNet* val_net, NeuralNet* test_net);
-
-  /**
-   * Main function of Worker.
-   *
-   * Train the neuralnet step by step, test/validation is done periodically.
-   */
-  void Run();
-
-  /**
-   * Init values of Param instances assocaited with local layers (i.e., layers
-   * dispatched to this worker).
-   *
-   * If one Param is owned by the worker, then it should be initialized and put
-   * to servers. Otherwise Get() should be called to get the Param. The Get()
-   * may not send get requests if the Param owner is in the same procs, for
-   * which case the memory space of the Param objects are shared. But if this
-   * worker and the Param owner worker run on different devices (e.g., GPUs),
-   * then the get request would be sent.
-   *
-   * If the training starts from scrath, every Param object is initialzed using
-   * ParamGenerator. After that, the worker may
-   * train for a couple of steps to warmup the params before put
-   * them to servers (warmup of JobProto controls this).
-   *
-   * If one Param object's name matches that of one Param object from the
-   * checkpoint files, its Param values would be loaded from checkpoint files.
-   *
-   * @param[in] job_conf job configuration which provides settings for
-   * checkpoint file paths, warmup steps and Param versions.
-   * @param[out] net pointer to a neural net whose Param values will be
-   * initialized.
-   */
-  void InitNetParams(const JobProto& job_conf, NeuralNet* net);
-
-  /**
-   * Checkpoint all Param objects owned by the worker onto disk.
-   * The serialization is done using BlobProtos which includes the name, 
version
-   * and values of each Param object.
-   * Different workers would generate different checkpoint files. The file path
-   * is <workspace>/checkpoint-<jobname>-step<step>-worker<worker_id>.bin
-   * @param[in] step training step
-   * @param[in] folder directory to put the checkpoint file
-   * @param net the training net whose Param objects will be dumped.
-   */
-  void Checkpoint(int step, const std::string& folder, NeuralNet* net);
-
-  /**
-    * Train one mini-batch.
-    * Test/Validation is done before training.
-    *
-    * @param[in] step training step.
-    * @param[in] net neural net to be trained.
-    */
-  virtual void TrainOneBatch(int step, NeuralNet* net) = 0;
-
-  /**
-   * Test/validate one mini-batch data.
-   *
-   * @param[in] step test step.
-   * @param[in] phase test could be done for validation or test phase.
-   * @param[in] net neural net for test
-   */
-  virtual void TestOneBatch(int step, Phase phase, NeuralNet* net) = 0;
-
-  /**
-   * Display infomation from layers.
-   *
-   * @param flag could be a combination of multiple phases, e.g, 
kTest|kForward,
-   * it is passed to the Layer::ToString() function for each layer to decide
-   * what to display .
-   * @param prefix display prefix, e.g., 'Train step 100', 'Test step 90'.
-   * @param net display layers from this neural net.
-   */
-  void Display(int flag, const std::string& prefix, NeuralNet* net);
-
-  /**
-   * Put Param values to server.
-   *
-   * @param param
-   * @param step used as current param version for the put request
-   */
-  int Put(int step, Param* param);
-
-  /**
-   * Get Param with specific version from server
-   * If the current version >= the requested version, then return.
-   * Otherwise send a get request to stub who would forwards it to servers.
-   * @param param
-   * @param step requested param version
-   */
-  int Get(int step, Param* param);
-
-  /**
-   * Update Param.
-   *
-   * @param param
-   * @param step training step used for updating (e.g., deciding learning 
rate).
-   */
-  int Update(int step, Param* param);
-
-  /**
-   * Wait for the response of the update/get requests.
-   *
-   * @param param
-   * @param step not used now.
-   */
-  int Collect(int step, Param* param);
-
-  /**
-   * Call Collect() for every param of net
-   */
-  int CollectAll(int step, NeuralNet* net);
-
-  /**
-   * Receive blobs from other workers due to model partitions.
-   */
-  void ReceiveBlobs(bool data, bool grad, BridgeLayer* layer, NeuralNet* net);
-
-  /**
-   * Send blobs to other workers due to model partitions.
-   */
-  void SendBlobs(bool data, bool grad, BridgeLayer* layer, NeuralNet* net);
-
-
-  /**
-   * @param[in] step
-   * @return true if it is time to display training info, e.g., loss; otherwise
-   * false.
-   */
-  inline bool DisplayNow(int step) const {
-    return job_conf_.disp_freq() > 0
-           && step >= job_conf_.disp_after()
-           && ((step - job_conf_.disp_after()) % job_conf_.disp_freq() == 0);
-  }
-  /**
-   * @param[in] step
-   * @return true if it is time to finish the training; otherwise false.
-   */
-  inline bool StopNow(int step) const {
-    return step >= job_conf_.train_steps();
-  }
-  /**
-   * @param[in] step
-   * @return true if it is time to do checkpoint Param objects; otherwise 
false.
-   */
-  inline bool CheckpointNow(int step) const {
-    return job_conf_.checkpoint_freq() > 0
-           && step >= job_conf_.checkpoint_after()
-           && ((step - job_conf_.checkpoint_after())
-              % job_conf_.checkpoint_freq() == 0);
-  }
-  /**
-   * @param[in] step
-   * @return true if it is time to do test over the test dataset.
-   */
-  inline bool TestNow(int step) const {
-    return job_conf_.test_freq() > 0
-      && job_conf_.test_steps() > 0
-      && step >= job_conf_.test_after()
-      && ((step - job_conf_.test_after()) % job_conf_.test_freq() == 0);
-  }
-  /**
-   * @param[in] step
-   * @return true if it is time to do test over the validation dataset.
-   */
-  inline bool ValidateNow(int step) const {
-    return job_conf_.validate_freq() > 0
-      && job_conf_.validate_steps() > 0
-      && step >= job_conf_.validate_after()
-      && ((step - job_conf_.validate_after()) % job_conf_.validate_freq() == 
0);
-  }
-  /**
-   * @return a vector with pointers to all neural nets.
-   */
-  const std::vector<NeuralNet*> GetNets() const {
-    return std::vector<NeuralNet*> {train_net_, val_net_, test_net_};
-  }
-  /**
-   * @return training net.
-   */
-  inline NeuralNet* train_net() const {
-    return train_net_;
-  }
-  /**
-   * @return group ID
-   */
-  inline int grp_id() const { return grp_id_; }
-  /**
-   * @reutrn worker ID within the worker group.
-   */
-  inline int id() const { return id_; }
-
- protected:
-  int grp_id_ = -1, id_ = -1;
-  int step_ = 0;
-  JobProto job_conf_;
-  NeuralNet* train_net_ = nullptr;
-  NeuralNet* test_net_ = nullptr;
-  NeuralNet* val_net_ = nullptr;
-  Dealer* layer_dealer_ = nullptr;
-  Dealer* dealer_ = nullptr;
-};
-
-class BPWorker: public Worker {
- public:
-  void TrainOneBatch(int step, NeuralNet* net) override;
-  void TestOneBatch(int step, Phase phase, NeuralNet* net) override;
-  void Forward(int step, Phase phase, NeuralNet* net);
-  void Backward(int step, NeuralNet* net);
-};
-
-class CDWorker: public Worker {
- public:
-  void TrainOneBatch(int step, NeuralNet* net) override;
-  void TestOneBatch(int step, Phase phase, NeuralNet* net) override;
-};
-
-inline int BlobTrgt(int grp, int layer) {
-  return (grp << 16) | layer;
-}
-
-inline int BlobGrp(int blob_trgt) {
-  return blob_trgt >> 16;
-}
-
-inline int BlobLayer(int blob_trgt) {
-  static int mask = (1 << 16) -1;
-  return blob_trgt & mask;
-}
-
-}  // namespace singa
-
-#endif  // SINGA_WORKER_H_

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/comm/msg.cc
----------------------------------------------------------------------
diff --git a/src/comm/msg.cc b/src/comm/msg.cc
index 2521c28..5c33026 100644
--- a/src/comm/msg.cc
+++ b/src/comm/msg.cc
@@ -19,7 +19,7 @@
 *
 *************************************************************/
 
-#include "comm/msg.h"
+#include "singa/comm/msg.h"
 
 #include <glog/logging.h>
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/comm/socket.cc
----------------------------------------------------------------------
diff --git a/src/comm/socket.cc b/src/comm/socket.cc
index b9c7810..09a3b66 100644
--- a/src/comm/socket.cc
+++ b/src/comm/socket.cc
@@ -18,7 +18,7 @@
 * under the License.
 *
 *************************************************************/
-#include "comm/socket.h"
+#include "singa/comm/socket.h"
 
 #include <glog/logging.h>
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/driver.cc
----------------------------------------------------------------------
diff --git a/src/driver.cc b/src/driver.cc
index 0d3bbfc..5f81325 100644
--- a/src/driver.cc
+++ b/src/driver.cc
@@ -19,19 +19,48 @@
 *
 *************************************************************/
 
-#include "./driver.h"
+#include "singa/driver.h"
 
 #include <glog/logging.h>
 #include <set>
 #include <string>
 #include <vector>
-#include "neuralnet/layer.h"
-#include "utils/common.h"
-#include "utils/tinydir.h"
-#include "utils/cluster.h"
-#include "./server.h"
-#include "./stub.h"
-#include "./worker.h"
+#include "singa/neuralnet/layer.h"
+#include "singa/utils/common.h"
+#include "singa/utils/tinydir.h"
+#include "singa/utils/cluster.h"
+#include "singa/server.h"
+#include "singa/stub.h"
+#include "singa/worker.h"
+
+#include "singa/neuralnet/connection_layer/bridge.h"
+#include "singa/neuralnet/connection_layer/concate.h"
+#include "singa/neuralnet/connection_layer/slice.h"
+#include "singa/neuralnet/connection_layer/split.h"
+#include "singa/neuralnet/input_layer/parser.h"
+#include "singa/neuralnet/input_layer/csv_record.h"
+#include "singa/neuralnet/input_layer/data.h"
+#include "singa/neuralnet/input_layer/image_preprocess.h"
+#include "singa/neuralnet/input_layer/label.h"
+#include "singa/neuralnet/input_layer/lmdb_data.h"
+#include "singa/neuralnet/input_layer/mnist.h"
+#include "singa/neuralnet/input_layer/prefetch.h"
+#include "singa/neuralnet/input_layer/proto_record.h"
+#include "singa/neuralnet/input_layer/rgb_image.h"
+#include "singa/neuralnet/input_layer/shard_data.h"
+#include "singa/neuralnet/input_layer/store_input.h"
+#include "singa/neuralnet/loss_layer/euclidean.h"
+#include "singa/neuralnet/loss_layer/softmax.h"
+#include "singa/neuralnet/neuron_layer/convolution.h"
+#include "singa/neuralnet/neuron_layer/dropout.h"
+#include "singa/neuralnet/neuron_layer/inner_product.h"
+#include "singa/neuralnet/neuron_layer/lrn.h"
+#include "singa/neuralnet/neuron_layer/pooling.h"
+#include "singa/neuralnet/neuron_layer/rbm.h"
+#include "singa/neuralnet/neuron_layer/relu.h"
+#include "singa/neuralnet/neuron_layer/sigmoid.h"
+#include "singa/neuralnet/neuron_layer/stanh.h"
+#include "singa/neuralnet/output_layer/output_layer.h"
 
 extern "C" void openblas_set_num_threads(int num);
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/io/kvfile.cc
----------------------------------------------------------------------
diff --git a/src/io/kvfile.cc b/src/io/kvfile.cc
index 465d3ee..f090ccf 100644
--- a/src/io/kvfile.cc
+++ b/src/io/kvfile.cc
@@ -19,7 +19,7 @@
 *
 *************************************************************/
 
-#include "io/kvfile.h"
+#include "singa/io/kvfile.h"
 
 #include <glog/logging.h>
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/io/kvfile_store.cc
----------------------------------------------------------------------
diff --git a/src/io/kvfile_store.cc b/src/io/kvfile_store.cc
index 4d251f9..fbf6982 100644
--- a/src/io/kvfile_store.cc
+++ b/src/io/kvfile_store.cc
@@ -19,7 +19,7 @@
 *
 *************************************************************/
 
-#include "io/kvfile_store.h"
+#include "singa/io/kvfile_store.h"
 
 #include <glog/logging.h>
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/io/store.cc
----------------------------------------------------------------------
diff --git a/src/io/store.cc b/src/io/store.cc
index 8d3bf13..530ca58 100644
--- a/src/io/store.cc
+++ b/src/io/store.cc
@@ -19,9 +19,9 @@
 *
 *************************************************************/
 
-#include "io/store.h"
-#include "io/kvfile_store.h"
-#include "io/textfile_store.h"
+#include "singa/io/store.h"
+#include "singa/io/kvfile_store.h"
+#include "singa/io/textfile_store.h"
 
 namespace singa { namespace io {
 Store* CreateStore(const std::string& backend) {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/io/textfile_store.cc
----------------------------------------------------------------------
diff --git a/src/io/textfile_store.cc b/src/io/textfile_store.cc
index 9371de7..7636f26 100644
--- a/src/io/textfile_store.cc
+++ b/src/io/textfile_store.cc
@@ -20,7 +20,7 @@
 *************************************************************/
 
 
-#include "io/textfile_store.h"
+#include "singa/io/textfile_store.h"
 #include <glog/logging.h>
 
 namespace singa {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/main.cc
----------------------------------------------------------------------
diff --git a/src/main.cc b/src/main.cc
index 99c91b8..f45a379 100644
--- a/src/main.cc
+++ b/src/main.cc
@@ -19,7 +19,7 @@
 *
 *************************************************************/
 
-#include "./singa.h"
+#include "singa/singa.h"
 /**
  * \file main.cc provides an example main function.
  *

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/neuralnet/connection_layer.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer.cc 
b/src/neuralnet/connection_layer.cc
deleted file mode 100644
index acf243d..0000000
--- a/src/neuralnet/connection_layer.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/************************************************************
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*************************************************************/
-
-#include "neuralnet/connection_layer.h"
-
-namespace singa {
-
-using std::vector;
-/********* Implementation for BridgeDstLayer **************/
-void BridgeDstLayer::Setup(const LayerProto& proto,
-    const vector<Layer*>& srclayers) {
-  Layer::Setup(proto, srclayers);
-  CHECK_EQ(srclayers.size(), 1);
-  data_.Reshape(srclayers[0]->data(this).shape());
-  grad_.ReshapeLike(data_);
-}
-/************* Implementation for ConcateLayer ***********/
-void ConcateLayer::Setup(const LayerProto& conf,
-    const vector<Layer*>& srclayers) {
-  Layer::Setup(conf, srclayers);
-  size_t concate_dim = conf.concate_conf().concate_dim();
-  CHECK_GE(concate_dim, 0);
-  CHECK_GT(srclayers.size(), 1);
-  vector<int> shape = srclayers[0]->data(this).shape();
-  for (size_t i = 1; i < srclayers.size(); i++) {
-    const vector<int>& srcshape = srclayers[i]->data(this).shape();
-    for (size_t j = 0; j < shape.size(); j++)
-      if (j == concate_dim)
-        shape[j] += srcshape[j];
-      else
-        CHECK_EQ(shape[j], srcshape[j]);
-  }
-  data_.Reshape(shape);
-  grad_.Reshape(shape);
-}
-
-void ConcateLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) {
-  LOG(FATAL) << "Not implemented for Concate Layer";
-}
-
-void ConcateLayer::ComputeGradient(int flag, const vector<Layer*>& srclayers) {
-  LOG(FATAL) << "Not implemented for Concate Layer";
-}
-
-/************* Implementation for SliceLayer****************/
-void SliceLayer::Setup(const LayerProto& conf,
-    const vector<Layer*>& srclayers) {
-  /*
-  Layer::Setup(conf, npartitions);
-  slice_dim_ = conf.slice_conf().slice_dim();
-  slice_num_ = npartitions;
-  CHECK_GE(slice_dim_, 0);
-  CHECK_EQ(slice_num_, dstlayers_.size());
-  data_.Reshape(srclayers[0]->data(this).shape());
-  grad_.ReshapeLike(data_);
-  datavec_.resize(slice_num_);
-  gradvec_.resize(slice_num_);
-  CHECK_EQ(data_.count() % slice_num_, 0);  // restrict equal slicing
-  // LOG(ERROR)<<"slice dim "<<slice_dim<<" slice num "<<slice_num;
-  for (int i = 0; i < slice_num_; i++) {
-    vector<int> newshape(data_.shape());
-    newshape[slice_dim_] = newshape[slice_dim_] / slice_num_ +
-      ((i == slice_num_ - 1) ? newshape[slice_dim_] % slice_num_ : 0);
-    datavec_[i].Reshape(newshape);
-    gradvec_[i].Reshape(newshape);
-    // LOG(ERROR)<<"slice "<<IntVecToString(newshape);
-  }
-  */
-  LOG(FATAL) << "Not implemented";
-}
-
-void SliceLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) {
-  /*
-  CHECK_EQ(srclayers.size(), 1);
-  if (slice_dim_ == 0) {
-    const auto& blob = srclayers.at(0)->data(this);
-    int size = blob.count() / slice_num_;
-    for (int i = 0; i < slice_num_; i++) {
-      float* dst = datavec_[i].mutable_cpu_data();
-      const float* src = blob.cpu_data() + i * size;
-      memcpy(dst, src, size*sizeof(float));
-    }
-  }
-  */
-  LOG(FATAL) << "Not implemented";
-}
-
-void SliceLayer::ComputeGradient(int flag, const vector<Layer*>& srclayers) {
-  LOG(FATAL) << "Not implemented";
-}
-
-/*
-int SliceLayer::SliceID(const Layer* layer) const {
-  CHECK(layer != nullptr);
-  for (size_t i = 0; i < datavec_.size(); i++) {
-    // LOG(ERROR)<<"get slice "<<IntVecToString(shapes_[i]);
-    if (dstlayers_[i] == layer)
-      return i;
-  }
-  CHECK(false);
-  return -1;
-}*/
-
-/************* Implementation for SplitLayer****************/
-void SplitLayer::Setup(const LayerProto& conf,
-    const vector<Layer*>& srclayers) {
-  Layer::Setup(conf, srclayers);
-  CHECK_EQ(srclayers.size(), 1);
-  data_.Reshape(srclayers[0]->data(this).shape());
-  grad_.Reshape(srclayers[0]->data(this).shape());
-}
-
-void SplitLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) {
-  LOG(FATAL) << "Not implemented";
-}
-
-void SplitLayer::ComputeGradient(int flag, const vector<Layer*>& srclayers) {
-  LOG(FATAL) << "Not implemented";
-}
-}  // namespace singa

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/neuralnet/connection_layer/bridge.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer/bridge.cc 
b/src/neuralnet/connection_layer/bridge.cc
new file mode 100644
index 0000000..5a43c20
--- /dev/null
+++ b/src/neuralnet/connection_layer/bridge.cc
@@ -0,0 +1,34 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#include "singa/neuralnet/connection_layer/bridge.h"
+
+namespace singa {
+
+using std::vector;
+void BridgeDstLayer::Setup(const LayerProto& proto,
+    const vector<Layer*>& srclayers) {
+  Layer::Setup(proto, srclayers);
+  CHECK_EQ(srclayers.size(), 1);
+  data_.Reshape(srclayers[0]->data(this).shape());
+  grad_.ReshapeLike(data_);
+}
+}  // namespace singa

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/239ed217/src/neuralnet/connection_layer/concate.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer/concate.cc 
b/src/neuralnet/connection_layer/concate.cc
new file mode 100644
index 0000000..fd2fb24
--- /dev/null
+++ b/src/neuralnet/connection_layer/concate.cc
@@ -0,0 +1,55 @@
+/************************************************************
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*
+*************************************************************/
+
+#include "singa/neuralnet/connection_layer/concate.h"
+
+namespace singa {
+
+using std::vector;
+
+void ConcateLayer::Setup(const LayerProto& conf,
+    const vector<Layer*>& srclayers) {
+  Layer::Setup(conf, srclayers);
+  size_t concate_dim = conf.concate_conf().concate_dim();
+  CHECK_GE(concate_dim, 0);
+  CHECK_GT(srclayers.size(), 1);
+  vector<int> shape = srclayers[0]->data(this).shape();
+  for (size_t i = 1; i < srclayers.size(); i++) {
+    const vector<int>& srcshape = srclayers[i]->data(this).shape();
+    for (size_t j = 0; j < shape.size(); j++)
+      if (j == concate_dim)
+        shape[j] += srcshape[j];
+      else
+        CHECK_EQ(shape[j], srcshape[j]);
+  }
+  data_.Reshape(shape);
+  grad_.Reshape(shape);
+}
+
+void ConcateLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) {
+  LOG(FATAL) << "Not implemented for Concate Layer";
+}
+
+void ConcateLayer::ComputeGradient(int flag, const vector<Layer*>& srclayers) {
+  LOG(FATAL) << "Not implemented for Concate Layer";
+}
+
+}  // namespace singa

Reply via email to