Repository: incubator-singa
Updated Branches:
  refs/heads/master 138599fd1 -> 4a0db51f5


SINGA-113 Model/Hybrid Partition Support

NeuralNet how will automatically add connection layers for distributed training.
The partition is transparent to users.

User just need to configure partition_dim field in NetProto:
  partition_dim = 0 : data partition
  partition_dim = 1 : model partition

User can also overwrite partition_dim for a specific layer in LayerProto.
This will result in hybrid partition.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/2d38cb30
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/2d38cb30
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/2d38cb30

Branch: refs/heads/master
Commit: 2d38cb307466c966e19acd5f888ba0954d60aa19
Parents: 138599f
Author: WANG Sheng <[email protected]>
Authored: Sat Dec 12 19:10:57 2015 +0800
Committer: Wei Wang <[email protected]>
Committed: Fri Dec 25 20:25:27 2015 +0800

----------------------------------------------------------------------
 configure.ac                            |  52 ++--
 include/singa/neuralnet/neuralnet.h     |  12 +-
 include/singa/utils/graph.h             |   6 +
 src/driver.cc                           |   9 +-
 src/neuralnet/connection_layer/slice.cc |  23 +-
 src/neuralnet/connection_layer/split.cc |  23 +-
 src/neuralnet/neuralnet.cc              | 408 +++++++++++++++------------
 src/proto/job.proto                     |   7 +
 src/test/test_connection_layers.cc      |   2 +-
 src/test/test_neuralnet.cc              |  96 ++++++-
 src/utils/blob.cc                       |   5 +-
 src/utils/common.cc                     |   2 +-
 src/utils/graph.cc                      |  72 +----
 src/worker.cc                           |  24 +-
 14 files changed, 410 insertions(+), 331 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/configure.ac
----------------------------------------------------------------------
diff --git a/configure.ac b/configure.ac
index 07dbdc3..bde1d8e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -38,7 +38,7 @@ AC_PROG_LIBTOOL
 
 # Setup custom CUDA paths
 AC_ARG_ENABLE(cuda,
-       [AS_HELP_STRING(--enable-cuda,enable CUDA support)],
+  [AS_HELP_STRING(--enable-cuda,enable CUDA support)],
     cudaval="yes",
     cudaval="no")
 AM_CONDITIONAL(DCUDA, [test "$cudaval" = "yes"])
@@ -71,17 +71,17 @@ if test x"$cudaval" = x"yes"; then
     LIBS="$LIBS $CUDA_LIBS"
     LDFLAGS="$LDFLAGS $CUDA_LDFLAGS"
     NVCC="nvcc"
-       DEBUG="-DUSE_GPU"
+  DEBUG="-DUSE_GPU"
     AC_DEFINE(DCUDA,[1],[Defined if CUDA should be used])
-       AC_CHECK_LIB([cuda], [main], [], [
-               AC_MSG_ERROR([unable to find cuda library])
-        ])
-       AC_CHECK_LIB([cudart], [main], [], [
-               AC_MSG_ERROR([unable to find cudart library])
-       ])
-       AC_CHECK_LIB([curand], [main], [], [
-               AC_MSG_ERROR([unable to find curand library])
-       ])
+  AC_CHECK_LIB([cuda], [main], [], [
+     AC_MSG_ERROR([unable to find cuda library])
+    ])
+  AC_CHECK_LIB([cudart], [main], [], [
+      AC_MSG_ERROR([unable to find cudart library])
+    ])
+  AC_CHECK_LIB([curand], [main], [], [
+      AC_MSG_ERROR([unable to find curand library])
+    ])
 else
     CUDA_CFLAGS=""
     CUDA_LDFLAGS=""
@@ -149,35 +149,35 @@ AC_CHECK_LIB([protobuf], [main], [], [
 
 AC_ARG_ENABLE(lmdb,
      AS_HELP_STRING([--enable-lmdb],[enable debug option]),
-        [enable_lmdb=yes],[enable_lmdb=no])
+   [enable_lmdb=yes],[enable_lmdb=no])
 AM_CONDITIONAL(LMDB, test "$enable_lmdb" = yes)
 if test x"$enable_lmdb" = x"yes"; then
-       AC_SEARCH_LIBS([mdb_env_create], [lmdb], [], [
-         AC_MSG_ERROR([unable to find mdb_env_create() function])
-         ])
-       AC_DEFINE(LMDB, 1, [Enable Option layer])
+  AC_SEARCH_LIBS([mdb_env_create], [lmdb], [], [
+    AC_MSG_ERROR([unable to find mdb_env_create() function])
+    ])
+  AC_DEFINE(LMDB, 1, [Enable Option layer])
 fi
 
 AC_ARG_ENABLE(test,
-       AS_HELP_STRING([--enable-test],[enable singa test]),
-       [enable_test=yes],[enable_test=no])
+  AS_HELP_STRING([--enable-test],[enable singa test]),
+  [enable_test=yes],[enable_test=no])
 AM_CONDITIONAL(SINGATEST, test "$enable_test" = yes)
 if test x"$enable_test" != x"no"; then
-       PROGS='singatest test '
-       LTLIBS='libgtest.la '
+  PROGS='singatest test '
+  LTLIBS='libgtest.la '
 else
-       PROGS=''
-       LTLIBS=''
+  PROGS=''
+  LTLIBS=''
 fi
 
 AC_ARG_ENABLE(debug,
-       AS_HELP_STRING([--enable-debug],[enable debug mode]),
-       [enable_debug=yes],[enable_debug=no])
+  AS_HELP_STRING([--enable-debug],[enable debug mode]),
+  [enable_debug=yes],[enable_debug=no])
 AM_CONDITIONAL(DEBUG, test "$enable_debug" = yes)
 if test x"$enable_debug" != x"no"; then
-       DEBUG+=' -g'
+  DEBUG+=' -g'
 else
-       DEBUG+=' -O2'
+  DEBUG+=' -O2'
 fi
 
 AC_ARG_ENABLE(python,

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/include/singa/neuralnet/neuralnet.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuralnet.h 
b/include/singa/neuralnet/neuralnet.h
index b4105d3..bc1a7d8 100644
--- a/include/singa/neuralnet/neuralnet.h
+++ b/include/singa/neuralnet/neuralnet.h
@@ -131,11 +131,21 @@ class NeuralNet {
   /**
    * Create neural net from graph, one layer per node.
    */
-  void CreateNetFromGraph(Graph* graph, int num_partitions);
+  void CreateNetFromGraph(Graph* graph);
   /**
    * prepare data structures, e.g., params_, layers_, etc.
    */
   void PrepareDataStructures();
+  /**
+   * add split layers, due to connections to multiple dst-layers
+   */
+  NetProto AddModelSplitLayers(const NetProto& netproto);
+  /**
+   * add connection layers, due to partition of the whole nerualnet
+   * this should be done after AddModelSplitLayers()
+   */
+  NetProto AddPartitionConnectionLayers(const NetProto& netproto,
+                                        int npartitions);
 
  protected:
   std::vector<Layer*> layers_;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/include/singa/utils/graph.h
----------------------------------------------------------------------
diff --git a/include/singa/utils/graph.h b/include/singa/utils/graph.h
index d26a8ee..2462808 100644
--- a/include/singa/utils/graph.h
+++ b/include/singa/utils/graph.h
@@ -122,6 +122,12 @@ class Graph {
    */
   Node* AddNode(const string& name, const std::map<string, string>& attrs);
   /**
+   * @deprecated {remove layer related info from node attrs}
+   * Add a node with given name and other info.
+   */
+  Node* AddNode(const std::string& name, const std::string& origin, int id,
+                void* proto);
+  /**
    * Add an edge connecting the two given nodes.
    */
   void AddEdge(Node* srcnode, Node* dstnode);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/driver.cc
----------------------------------------------------------------------
diff --git a/src/driver.cc b/src/driver.cc
index 9389fde..4eb474a 100644
--- a/src/driver.cc
+++ b/src/driver.cc
@@ -69,15 +69,18 @@ void Driver::Init(int argc, char **argv) {
   RegisterLayer<RecordOutputLayer, int>(kRecordOutput);
   RegisterLayer<CSVOutputLayer, int>(kCSVOutput);
 
+  // connection layers
   RegisterLayer<BridgeDstLayer, int>(kBridgeDst);
   RegisterLayer<BridgeSrcLayer, int>(kBridgeSrc);
+  RegisterLayer<ConcateLayer, int>(kConcate);
+  RegisterLayer<SliceLayer, int>(kSlice);
+  RegisterLayer<SplitLayer, int>(kSplit);
 
   RegisterLayer<AccuracyLayer, int>(kAccuracy);
   RegisterLayer<ArgSortLayer, int>(kArgSort);
   RegisterLayer<ConvolutionLayer, int>(kConvolution);
   RegisterLayer<CConvolutionLayer, int>(kCConvolution);
   RegisterLayer<CPoolingLayer, int>(kCPooling);
-  RegisterLayer<ConcateLayer, int>(kConcate);
 
 #ifdef USE_CUDNN
   RegisterLayer<CudnnActivationLayer, int>(kCudnnActivation);
@@ -88,6 +91,8 @@ void Driver::Init(int argc, char **argv) {
   RegisterLayer<CudnnSoftmaxLossLayer, int>(kCudnnSoftmaxLoss);
 #endif
 
+  RegisterLayer<DropoutLayer, int>(kDropout);
+  RegisterLayer<DummyLayer, int>(kDummy);
   RegisterLayer<EuclideanLossLayer, int>(kEuclideanLoss);
   RegisterLayer<InnerProductLayer, int>(kInnerProduct);
   RegisterLayer<LabelLayer, int>(kLabel);
@@ -101,9 +106,7 @@ void Driver::Init(int argc, char **argv) {
   RegisterLayer<ReLULayer, int>(kReLU);
   RegisterLayer<ShardDataLayer, int>(kShardData);
   RegisterLayer<SigmoidLayer, int>(kSigmoid);
-  RegisterLayer<SliceLayer, int>(kSlice);
   RegisterLayer<SoftmaxLossLayer, int>(kSoftmaxLoss);
-  RegisterLayer<SplitLayer, int>(kSplit);
   RegisterLayer<STanhLayer, int>(kSTanh);
   RegisterLayer<SoftmaxLayer, int>(kSoftmax);
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/neuralnet/connection_layer/slice.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer/slice.cc 
b/src/neuralnet/connection_layer/slice.cc
index b28d4c4..ee635e0 100644
--- a/src/neuralnet/connection_layer/slice.cc
+++ b/src/neuralnet/connection_layer/slice.cc
@@ -97,27 +97,28 @@ void SliceLayer::ComputeGradient(int flag, const 
vector<Layer*>& srclayers) {
 }
 
 const Blob<float>& SliceLayer::data(const Layer* from) const {
-  CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
-  return *datavec_[from->partition_id()];
+  int id = from ? from->partition_id() : 0;
+  CHECK_LT(id, num_partitions());
+  return *datavec_[id];
 }
 
 const Blob<float>& SliceLayer::grad(const Layer* from) const {
-  CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
-  return *gradvec_[from->partition_id()];
+  int id = from ? from->partition_id() : 0;
+  CHECK_LT(id, num_partitions());
+  return *gradvec_[id];
 }
 
 Blob<float>* SliceLayer::mutable_data(const Layer* from) {
+  int id = from ? from->partition_id() : 0;
   CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
-  return datavec_[from->partition_id()];
+  CHECK_LT(id, num_partitions());
+  return datavec_[id];
 }
 
 Blob<float>* SliceLayer::mutable_grad(const Layer* from) {
-  CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
-  return gradvec_[from->partition_id()];
+  int id = from ? from->partition_id() : 0;
+  CHECK_LT(id, num_partitions());
+  return gradvec_[id];
 }
 const std::string SliceLayer::ToString(bool debug, int flag) {
   if (!debug)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/neuralnet/connection_layer/split.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/connection_layer/split.cc 
b/src/neuralnet/connection_layer/split.cc
index d2af59d..0ebadc2 100644
--- a/src/neuralnet/connection_layer/split.cc
+++ b/src/neuralnet/connection_layer/split.cc
@@ -37,13 +37,14 @@ void SplitLayer::Setup(const LayerProto& conf,
   CHECK_EQ(srclayers.size(), 1);
   Layer::Setup(conf, srclayers);
   data_.Reshape(srclayers[0]->data(this).shape());
-  data_.ShareData(srclayers[0]->mutable_data(this));
-  CHECK_GT(num_partitions(), 0);
-  // add num_partitions()-1 more grad blobs
-  for (int i = 1; i < num_partitions(); ++i) {
+  data_.ShareData(srclayers[0]->data(this), false);
+  int num_splits = conf.split_conf().num_splits();
+  CHECK_GT(num_splits, 0);
+  // add num_splits-1 more grad blobs
+  for (int i = 1; i < num_splits; ++i) {
     gradvec_.push_back(new Blob<float>());
   }
-  for (int i = 0; i < num_partitions(); ++i)
+  for (int i = 0; i < num_splits; ++i)
     gradvec_[i]->Reshape(srclayers[0]->data(this).shape());
 }
 
@@ -55,22 +56,24 @@ void SplitLayer::ComputeFeature(int flag, const 
vector<Layer*>& srclayers) {
 void SplitLayer::ComputeGradient(int flag, const vector<Layer*>& srclayers) {
   CHECK_EQ(srclayers.size(), 1);
   // aggregate all gradients to grad_[0]
-  for (int i = 1; i < num_partitions(); ++i)
-    for (int j = 0; j < gradvec_[0]->count(); ++j)
-      gradvec_[0]->mutable_cpu_data()[j] += gradvec_[i]->cpu_data()[j];
+  for (int i = 1; i < num_splits; ++i)
+    AXPY<float>(cpu, 1.0, *gradvec_[i], gradvec_[0]);
+//  for (int i = 1; i < num_splits; ++i)
+//    for (int j = 0; j < gradvec_[0]->count(); ++j)
+//      gradvec_[0]->mutable_cpu_data()[j] += gradvec_[i]->cpu_data()[j];
   // copy grad_[0] to srclayer's grad
   srclayers[0]->mutable_grad(this)->CopyFrom(*gradvec_[0]);
 }
 
 const Blob<float>& SplitLayer::grad(const Layer* from) const {
   CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
+  CHECK_LT(from->partition_id(), num_splits);
   return *gradvec_[from->partition_id()];
 }
 
 Blob<float>* SplitLayer::mutable_grad(const Layer* from) {
   CHECK(from);
-  CHECK_LT(from->partition_id(), num_partitions());
+  CHECK_LT(from->partition_id(), num_splits);
   return gradvec_[from->partition_id()];
 }
 const std::string SplitLayer::ToString(bool debug, int flag) {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/neuralnet/neuralnet.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/neuralnet.cc b/src/neuralnet/neuralnet.cc
index acfc9ea..238bd3f 100644
--- a/src/neuralnet/neuralnet.cc
+++ b/src/neuralnet/neuralnet.cc
@@ -99,20 +99,20 @@ NeuralNet* NeuralNet::Create(const NetProto& net_conf, 
Phase phase,
     param->set_name(name);
     param->set_share_from(from);
   }
-  LOG(INFO) << "NeuralNet config is\n" << conf.DebugString();
+  LOG(INFO) << "Initial NeuralNet Config is\n" << conf.DebugString();
   // TODO(wangwei) create net based on net type, e.g., directed, undirected, 
etc
   return new NeuralNet(conf, npartitions);
 }
 
 NeuralNet::NeuralNet(NetProto netproto, int npartitions) {
-  LOG(INFO) << "Constructing Neural Net...";
+  LOG(INFO) << "Constructing NeuralNet...";
   auto graph = CreateGraph(netproto, npartitions);
-  CreateNetFromGraph(graph, npartitions);
+  CreateNetFromGraph(graph);
   PrepareDataStructures();
   for (Node* node : graph->nodes())
     delete static_cast<LayerProto*>(node->proto);
   delete graph;
-  LOG(INFO) << "Neural net constructed";
+  LOG(INFO) << "NeuralNet Constructed";
 }
 
 NeuralNet::~NeuralNet() {
@@ -142,20 +142,7 @@ void NeuralNet::Load(const vector<string>& paths,
   }
 }
 
-/*
-std::string NeuralNet::ToAdjacency() {
-  string disp = "";
-  for (auto& layer : layers_) {
-    disp += layer->name()+": ";
-    for (const auto& dst : layer->dstlayers())
-      disp += dst->name()+", ";
-    disp += "\n";
-  }
-  return disp;
-}
-*/
-
-void NeuralNet::ShareParamsFrom(NeuralNet* other, bool cpu_only) {
+void NeuralNet::ShareParamsFrom(NeuralNet* other) {
   for (auto& layer : layers_) {
     auto otherlayer = other->name2layer(layer->name());
     if (otherlayer != nullptr) {
@@ -169,189 +156,241 @@ void NeuralNet::ShareParamsFrom(NeuralNet* other, bool 
cpu_only) {
   }
 }
 
-// add a node for SliceLayer between srcnode and dstnodes
-Node* SliceNode(Graph* graph, Node* srcnode,
-    const vector<Node*>& dstnodes, bool connect_dst) {
-  string name = srcnode->name + "<";
-  LayerProto *proto = new LayerProto();
-  proto->set_name(name);
-  proto->set_type(LayerType::kSlice);
-  proto->set_partition_id(
-      static_cast<LayerProto*>(srcnode->proto)->partition_id());
-  proto->set_partition_dim(
-      static_cast<LayerProto*>(srcnode->proto)->partition_dim());
-  Node* node = new Node(name, "##" + name, proto->partition_id(), proto);
-  graph->AddNode(node);
-  graph->AddEdge(srcnode, node);
-  if (connect_dst)
-    for (Node* dst : dstnodes)
-      graph->AddEdge(node, dst);
-  return node;
+// name of connection layers
+string splitName(const string& layer) { return "split("+layer+")"; }
+string sliceName(const string& layer) { return "slice("+layer+")"; }
+string concateName(const string& layer) { return "concate("+layer+")"; }
+string bridgeName(const string& src, const string& dst) { return src+"->"+dst; 
}
+string bridgeSrcName(const string& src, const string& dst) {
+  return "bridge_src("+bridgeName(src, dst)+")";
+}
+string bridgeDstName(const string& src, const string& dst) {
+  return "bridge_dst("+bridgeName(src, dst)+")";
 }
 
-// add a node for ConcateLayer between srcnodes and dstnode
-Node* ConcateNodes(Graph* graph, const vector<Node*>& srcnodes, Node* dstnode) 
{
-  string name = ">" + dstnode->name;
-  LayerProto *proto = new LayerProto();
-  proto->set_name(name);
-  proto->set_type(LayerType::kConcate);
-  proto->set_partition_id(
-      static_cast<LayerProto*>(dstnode->proto)->partition_id());
-  proto->set_partition_dim(
-      static_cast<LayerProto*>(srcnodes[0]->proto)->partition_dim());
-  Node* node = new Node(name, "##" + name, proto->partition_id(), proto);
-  graph->AddNode(node);
-  graph->AddEdge(node, dstnode);
-  for (Node* src : srcnodes)
-    graph->AddEdge(src, node);
-  return node;
+ConnectionType dstLayerConnection(const LayerProto& proto) {
+  auto layer = Layer::Create(proto);
+  auto ret = layer->dst_layer_connection();
+  delete layer;
+  return ret;
 }
 
-// add a node for SplitLayer between srcnode and dstnodes
-Node* SplitNode(Graph* graph, Node* srcnode, const vector<Node*>& dstnodes) {
-  string name = srcnode->name + "+";
-  LayerProto *proto = new LayerProto();
-  proto->set_name(name);
-  proto->set_type(LayerType::kSplit);
-  proto->set_partition_id(
-      static_cast<LayerProto*>(srcnode->proto)->partition_id());
-  Node* node = new Node(name, "##" + name, proto->partition_id(), proto);
-  graph->AddNode(node);
-  graph->AddEdge(srcnode, node);
-  for (Node* dst : dstnodes)
-    graph->AddEdge(node, dst);
-  return node;
+ConnectionType srcNeuronConnection(const LayerProto& proto) {
+  auto layer = Layer::Create(proto);
+  auto ret = layer->src_neuron_connection(0);
+  delete layer;
+  return ret;
 }
 
-// add a pair of nodes for BridgeSrcLayer and BridgeDstLayer between srcnode
-// and dstnode
-void BridgeNodes(Graph* graph, Node* srcnode, Node* dstnode) {
-  string sname = srcnode->name + ":-";
-  LayerProto *sproto = new LayerProto();
-  sproto->set_name(sname);
-  sproto->set_type(LayerType::kBridgeSrc);
-  sproto->set_partition_id(
-      static_cast<LayerProto*>(srcnode->proto)->partition_id());
-  auto sbridge = new Node(sname, "##" + sname, sproto->partition_id(), sproto);
-  string dname = "-:" + dstnode->name;
-  LayerProto *dproto = new LayerProto();
-  dproto->set_name(dname);
-  dproto->set_type(LayerType::kBridgeDst);
-  dproto->set_partition_id(
-      static_cast<LayerProto*>(dstnode->proto)->partition_id());
-  auto dbridge = new Node(dname, "##" + dname, dproto->partition_id(), dproto);
-  graph->AddNode(sbridge);
-  graph->AddNode(dbridge);
-  graph->AddEdge(srcnode, sbridge);
-  graph->AddEdge(sbridge, dbridge);
-  graph->AddEdge(dbridge, dstnode);
+NetProto NeuralNet::AddModelSplitLayers(const NetProto& netproto) {
+  NetProto net_w_split;
+  net_w_split.CopyFrom(netproto);
+  net_w_split.clear_layer();
+  // calculate number of dst-layers for each layer
+  map<string, int> dst_count;
+  for (const LayerProto& layer : netproto.layer())
+    for (const string& src_name : layer.srclayers())
+      ++dst_count[src_name];
+  // tag to add split layer if:
+  // dst_count[] > 1 && dst_layer_connection() = OneToOne
+  for (const LayerProto& layer : netproto.layer())
+    if ((dst_count[layer.name()] > 1 && dstLayerConnection(layer) == 
kOneToOne))
+        dst_count[layer.name()] = -dst_count[layer.name()];
+  // add orginal layers and adjust srclayers
+  for (const LayerProto& layer : netproto.layer()) {
+    LayerProto* proto = net_w_split.add_layer();
+    proto->CopyFrom(layer);
+    proto->clear_srclayers();
+    for (const string& src_name : layer.srclayers())
+      if (dst_count[src_name] < 0)
+        proto->add_srclayers(splitName(src_name));
+      else
+        proto->add_srclayers(src_name);
+  }
+  // add split layers
+  for (const LayerProto& layer : netproto.layer()) {
+    if (dst_count[layer.name()] < 0) {
+      LayerProto* split_proto = net_w_split.add_layer();
+      split_proto->set_name(splitName(layer.name()));
+      split_proto->set_type(kSplit);
+      split_proto->set_partition_dim(layer.partition_dim());
+      split_proto->add_srclayers(layer.name());
+      split_proto->mutable_split_conf()
+                 ->set_num_splits(-dst_count[layer.name()]);
+    }
+  }
+  // LOG(INFO) << "NeuralNet Config After Model Split is\n"
+  //           << net_w_split.DebugString();
+  return net_w_split;
+}
+
+NetProto NeuralNet::AddPartitionConnectionLayers(const NetProto& netproto,
+                                                 int npartitions) {
+  CHECK_GT(npartitions, 0);
+  NetProto net_w_connection;
+  net_w_connection.CopyFrom(netproto);
+  // if npartitions is 1, no need to add connection layers
+  if (npartitions == 1) return net_w_connection;
+  // add original layers, but remove all edges first
+  net_w_connection.clear_layer();
+  map<string, LayerProto*> name2proto;
+  for (const LayerProto& layer : netproto.layer()) {
+    LayerProto* layer_proto = net_w_connection.add_layer();
+    layer_proto->CopyFrom(layer);
+    layer_proto->clear_srclayers();
+    name2proto[layer_proto->name()] = layer_proto;
+  }
+  /*
+   * Add Slice, Concate, Split Layers for Model Partition
+   *
+   * All cases are as follows:
+   * src_pdim | dst_pdim | connection_type | Action
+   *     0    |     0    |     OneToOne    | Direct Connection
+   *     1    |     1    |     OneToOne    | Direct Connection
+   *     0    |     0    |     OneToAll    | Direct Connection
+   *     1    |     0    |     OneToOne    | Slice -> Concate
+   *     0    |     1    |     OneToOne    | Slice -> Concate
+   *     1    |     0    |     OneToAll    | Slice -> Concate
+   *     0    |     1    |     OneToAll    | Split -> Concate
+   *     1    |     1    |     OneToAll    | Split -> Concate
+   *
+   * Logic:
+   * dst_pdim = 1 && OneToAll ?
+   *   (YES) Split -> Concate
+   *   (NO)  src_pdim = dst_pdim ?
+   *           (YES) Direct Connection
+   *           (NO)  Slice -> Concate
+   */
+   for (const LayerProto& origin_layer : netproto.layer()) {
+     LayerProto* dst_layer = name2proto[origin_layer.name()];
+     int dst_pdim = dst_layer->partition_dim();
+     ConnectionType connection = srcNeuronConnection(*dst_layer);
+     for (const string& src_name : origin_layer.srclayers()) {
+       LayerProto* src_layer = name2proto[src_name];
+       int src_pdim = src_layer->partition_dim();
+       // dst_pdim = 1 && OneToAll ?
+       if (dst_pdim == 1 && connection == kOneToAll) {
+         // add split layer
+         LayerProto* split_layer = net_w_connection.add_layer();
+         split_layer->set_name(splitName(src_layer->name()));
+         split_layer->set_type(kSplit);
+         split_layer->set_partition_dim(src_layer->partition_dim());
+         split_layer->add_srclayers(src_layer->name());
+         split_layer->mutable_split_conf()->set_num_splits(npartitions);
+        // add concate layer
+        LayerProto* concate_layer = net_w_connection.add_layer();
+        concate_layer->set_name(concateName(split_layer->name()));
+        concate_layer->set_type(kConcate);
+        // concate on src_pdim
+        concate_layer->set_partition_dim(split_layer->partition_dim());
+        concate_layer->add_srclayers(split_layer->name());
+        // connect dst_layer to concate layer
+        dst_layer->add_srclayers(concate_layer->name());
+       } else {
+         // src_pdim = dst_pdim ?
+         if (dst_pdim == src_pdim) {
+           // direct connection
+           dst_layer->add_srclayers(src_layer->name());
+         } else {
+           // add slice layer
+           LayerProto* slice_layer = net_w_connection.add_layer();
+           slice_layer->set_name(sliceName(src_layer->name()));
+           slice_layer->set_type(kSlice);
+           // slice on dst_pdim
+           slice_layer->set_partition_dim(dst_layer->partition_dim());
+           slice_layer->add_srclayers(src_layer->name());
+           // add concate layer
+           LayerProto* concate_layer = net_w_connection.add_layer();
+           concate_layer->set_name(concateName(slice_layer->name()));
+           concate_layer->set_type(kConcate);
+           // concate on src_pdim
+           concate_layer->set_partition_dim(src_layer->partition_dim());
+           concate_layer->add_srclayers(slice_layer->name());
+           // connect dst_layer to concate layer
+           dst_layer->add_srclayers(concate_layer->name());
+         }
+       }
+     }
+   }
+  LOG(INFO) << "NeuralNet Config After Adding Connection Layers is\n"
+            << net_w_connection.DebugString();
+  return net_w_connection;
 }
 
 Graph* NeuralNet::CreateGraph(const NetProto& netproto, int npartitions) {
-  Graph *graph = new Graph();
-  // from name of original layer to nodes
+  NetProto net_w_split = AddModelSplitLayers(netproto);
+  NetProto net_w_connection =
+    AddPartitionConnectionLayers(net_w_split, npartitions);
+  // for each original layer proto, create #npartitions of nodes
+  Graph* graph = new Graph();
   map<string, vector<Node*>> name2nodes;
   map<string, const LayerProto*> name2proto;
-  for (const auto& layer : netproto.layer()) {
+  for (const LayerProto& layer : net_w_connection.layer()) {
     vector<Node*> nodes;
-    int pdim = layer.partition_dim();
-    if (pdim == 0 || pdim == 1) {
-      char suffix[4];
-      for (int i = 0; i < npartitions; i++) {
-        LayerProto *proto = new LayerProto(layer);
-        snprintf(suffix, sizeof(suffix), "%02d", i);
-        // differentiate partitions
-        string nodename = layer.name() + "@" + string(suffix);
-        proto->set_partition_id(i);
-        proto->set_num_partitions(npartitions);
-        proto->set_name(nodename);
-        auto node = new Node(nodename, layer.name(), i, proto);
-        graph->AddNode(node);
-        nodes.push_back(node);
-      }
-    } else if (pdim == -1) {
+    char suffix[4];
+    for (int i = 0; i < npartitions; i++) {
       LayerProto *proto = new LayerProto(layer);
-      auto node = new Node(layer.name(), layer.name(), 0, proto);
-      graph->AddNode(node);
+      snprintf(suffix, sizeof(suffix), "%02d", i);
+      // differentiate partitions
+      string nodename = layer.name() + "@" + string(suffix);
+      proto->set_name(nodename);
+      proto->set_type(layer.type());
+      proto->set_partition_dim(layer.partition_dim());
+      proto->set_partition_id(i);
+      proto->set_num_partitions(npartitions);
+      Node* node = graph->AddNode(nodename, layer.name(), i, proto);
       nodes.push_back(node);
-    } else {
-      LOG(FATAL) << "Cannot partition layer (" << layer.name() <<") on dim: "
-        << layer.partition_dim();
     }
     name2nodes[layer.name()] = nodes;
     name2proto[layer.name()] = &layer;
   }
-
-  // connect nodes, nodes for ConcateLayer, SliceLayer and SplitLayer are 
added.
-  for (const auto& layerproto : netproto.layer()) {
-    string name = layerproto.name();
-    int pdim = layerproto.partition_dim();
-    const vector<Node*>& nodes = name2nodes.at(name);
-    for (auto srcname : layerproto.srclayers()) {
-      const vector<Node*>& srcnodes = name2nodes.at(srcname);
-      // TODO(wangwei): consider the type of each connection
-      Layer *layer = Layer::Create(layerproto);
-      ConnectionType connection = layer->src_neuron_connection(0);
-      delete layer;
-      int src_pdim = name2proto[srcname]->partition_dim();
-      // no partition of src layer
-      if (src_pdim == -1) {
-        Node* srcnode = srcnodes[0];
-        if (pdim == 0 || (pdim == 1 && connection == kOneToOne))
-          SliceNode(graph, srcnode, nodes, true);
-        else if (pdim == -1)
-          graph->AddEdge(srcnode, nodes[0]);
-        else  // type==kLayerPartition&&connection==kOneToAll
-          SplitNode(graph, srcnode, nodes);
-      } else if ((pdim == -1 && (src_pdim == 0 || src_pdim == 1))
-          ||(pdim == 1 && connection == kOneToAll && src_pdim == 0)) {
-        // copy/concate the whole srclayer for every dst partition
-        for (Node* node : nodes)
-          ConcateNodes(graph, srcnodes, node);
-      } else if ((src_pdim == 1 && pdim == 0) || (src_pdim == 0 && pdim == 1)) 
{
-        // TODO(wangwei) rewrite the whole src-dst construction in a clear way
-        LOG(FATAL) << "not implemented";
-        // the most complext scenario
-        // vector<Node*> nodes;
-        // for (Node* srcnode : srcnodes)
-        //   nodes.push_back(SliceNode(graph, srcnode, nodes, false));
-        // for (Node* node : nodes)
-        //   ConcateNodes(graph, nodes, node);
-      } else if ((src_pdim == 0 && pdim == 0)||
-          (src_pdim == 1 && pdim == 1 && connection == kOneToOne)) {
-        CHECK_EQ(srcnodes.size(), nodes.size());
-        for (size_t i = 0; i < srcnodes.size(); i++)
-          graph->AddEdge(srcnodes[i], nodes[i]);
+  // connect layers, add bridge layers if partition id is different
+  for (const LayerProto& origin_layer : net_w_connection.layer()) {
+    vector<Node*> dst_nodes = name2nodes[origin_layer.name()];
+    for (const string& src_name : origin_layer.srclayers()) {
+      vector<Node*> src_nodes = name2nodes[src_name];
+      if (origin_layer.type() != kConcate) {
+        for (size_t i = 0; i < src_nodes.size(); ++i) {
+          CHECK_EQ(src_nodes[i]->partition_id, i);
+          CHECK_EQ(dst_nodes[i]->partition_id, i);
+          graph->AddEdge(src_nodes[i], dst_nodes[i]);
+        }
       } else {
-        LOG(FATAL) << "in wrong branch, not implemented";
-      }
-    }
-  }
-  // must do topology sort, because we have added new nodes.
-  graph->Sort();
-  // add nodes for SplitLayer
-  vector<Node*> oldnodes = graph->nodes();
-  for (Node* node : oldnodes) {
-    auto layer = Layer::Create(*static_cast<LayerProto*>(node->proto));
-    if (node->dstnodes.size() > 1
-        && layer->dst_layer_connection() == kOneToOne) {
-      vector<Node*> dstnodes = node->dstnodes;
-      for (Node* dst : dstnodes)
-        graph->RemoveEdge(node, dst);
-      SplitNode(graph, node, dstnodes);
-    }
-    delete layer;
-  }
-  // add nodes for bridge layers
-  for (Node* node : oldnodes) {
-    vector<Node*> dstnodes = node->dstnodes;
-    auto pid1 = static_cast<LayerProto*>(node->proto)->partition_id();
-    for (size_t i = 0; i < dstnodes.size(); i++) {
-      Node* dstnode = dstnodes.at(i);
-      auto pid2 = static_cast<LayerProto*>(node->proto)->partition_id();
-      if (pid1 != pid2) {
-        graph->RemoveEdge(node, dstnode);
-        BridgeNodes(graph, node, dstnode);
+        // need to add bridge layers
+        for (size_t i = 0; i < src_nodes.size(); ++i) {
+          CHECK_EQ(src_nodes[i]->partition_id, i);
+          for (size_t j = 0; j < dst_nodes.size(); ++j) {
+            CHECK_EQ(dst_nodes[j]->partition_id, j);
+            if (i == j) {  // in same partition, no bridge needed
+              graph->AddEdge(src_nodes[i], dst_nodes[j]);
+            } else {  // add bridges
+              // bridge src && dst layer
+              LayerProto *proto_bsrc = new LayerProto();
+              LayerProto *proto_bdst = new LayerProto();
+              string bsrc_name = bridgeSrcName(src_nodes[i]->name,
+                                               dst_nodes[j]->name);
+              string bdst_name = bridgeDstName(src_nodes[i]->name,
+                                               dst_nodes[j]->name);
+              proto_bsrc->set_name(bsrc_name);
+              proto_bdst->set_name(bdst_name);
+              proto_bsrc->set_type(kBridgeSrc);
+              proto_bdst->set_type(kBridgeDst);
+              proto_bsrc->set_partition_dim(origin_layer.partition_dim());
+              proto_bdst->set_partition_dim(origin_layer.partition_dim());
+              proto_bsrc->set_partition_id(src_nodes[i]->partition_id);
+              proto_bdst->set_partition_id(dst_nodes[j]->partition_id);
+              proto_bsrc->set_num_partitions(npartitions);
+              proto_bdst->set_num_partitions(npartitions);
+              Node* bsrc_node = graph->AddNode(bsrc_name, bsrc_name, i,
+                                               proto_bsrc);
+              Node* bdst_node = graph->AddNode(bdst_name, bdst_name, j,
+                                               proto_bdst);
+              graph->AddEdge(src_nodes[i], bsrc_node);
+              graph->AddEdge(bsrc_node, bdst_node);
+              graph->AddEdge(bdst_node, dst_nodes[j]);
+            }
+          }
+        }
       }
     }
   }
@@ -360,7 +399,7 @@ Graph* NeuralNet::CreateGraph(const NetProto& netproto, int 
npartitions) {
   return graph;
 }
 
-void NeuralNet::CreateNetFromGraph(Graph* graph, int npartitions) {
+void NeuralNet::CreateNetFromGraph(Graph* graph) {
   // create one layer per node
   for (Node* node : graph->nodes()) {
     auto proto_ptr = static_cast<LayerProto*>(node->proto);
@@ -375,12 +414,12 @@ void NeuralNet::CreateNetFromGraph(Graph* graph, int 
npartitions) {
     for (Node* src : node->srcnodes)
       src_map_[layer].push_back(name2layer(src->name));
   }
-
   // setup layers
   int paramid = 0;
   map<string, string> layerinfo;
   map<string, vector<Layer*>> share_param_layers;
   for (Node* node : graph->nodes()) {
+    LOG(INFO) << "constructing graph: " << node->name;
     auto layer = name2layer(node->name);
     layer->Setup(*(static_cast<LayerProto*>(node->proto)), srclayers(layer));
     DLOG(INFO) << "constructing graph: " << layer->name();
@@ -397,7 +436,6 @@ void NeuralNet::CreateNetFromGraph(Graph* graph, int 
npartitions) {
     if (layer->partition_dim() == 0)
       share_param_layers[node->origin].push_back(layer);
   }
-  // LOG(INFO) << "Neural net structure\n"  << graph->ToJson(layerinfo);
   // create map from param name to param ptr
   std::unordered_map<string, Param*> name2param;
   for (auto layer : layers_) {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/proto/job.proto
----------------------------------------------------------------------
diff --git a/src/proto/job.proto b/src/proto/job.proto
index 80752c3..035f2c1 100644
--- a/src/proto/job.proto
+++ b/src/proto/job.proto
@@ -227,6 +227,8 @@ message LayerProto {
   optional SoftmaxProto softmax_conf = 53;
   // configuration for softmax loss layer
   optional SoftmaxLossProto softmaxloss_conf = 40;
+  // configuration for split layer
+  optional SplitProto split_conf = 42;
   // configuration for store input layers
   optional StoreProto store_conf = 51;
 
@@ -334,6 +336,10 @@ message PrefetchProto {
   repeated LayerProto sublayers = 1;
 }
 
+message SplitProto {
+  optional int32 num_splits = 1 [default = 1];
+}
+
 message StoreProto {
   optional string backend = 1;
   optional string path = 2;
@@ -614,6 +620,7 @@ enum LayerType {
   kCudnnActivation = 54;
   kCudnnSoftmaxLoss = 55;
   kDropout = 4;
+  kDummy = 20;
   kInnerProduct = 5;
   kLRN = 6;
   kPooling = 8;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/test/test_connection_layers.cc
----------------------------------------------------------------------
diff --git a/src/test/test_connection_layers.cc 
b/src/test/test_connection_layers.cc
index 28ae341..eab349a 100644
--- a/src/test/test_connection_layers.cc
+++ b/src/test/test_connection_layers.cc
@@ -413,7 +413,7 @@ TEST(ConnectionLayerTest, SplitTest) {
   src_split.push_back(static_cast<Layer*>(&in));
   LayerProto proto_split;
   proto_split.set_name("split");
-  proto_split.set_num_partitions(K);
+  proto_split.mutable_split_conf()->set_num_splits(K);
   SplitLayer split;
   split.Setup(proto_split, src_split);
   ASSERT_EQ(split.data(static_cast<Layer*>(&split)).shape(0), N);

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/test/test_neuralnet.cc
----------------------------------------------------------------------
diff --git a/src/test/test_neuralnet.cc b/src/test/test_neuralnet.cc
index 842dabf..e2b8dd7 100644
--- a/src/test/test_neuralnet.cc
+++ b/src/test/test_neuralnet.cc
@@ -20,11 +20,99 @@
 *************************************************************/
 
 #include "gtest/gtest.h"
+#include "singa/driver.h"
 #include "singa/neuralnet/neuralnet.h"
+#include "singa/neuralnet/neuron_layer/dummy.h"
+#include "singa/neuralnet/connection_layer/slice.h"
+#include "singa/neuralnet/connection_layer/split.h"
+#include "singa/neuralnet/connection_layer/concate.h"
+
 using namespace singa;
 
-TEST(NeuralNet, ParamShareFrom) {
-  NetProto conf;
-  // add net.conf file into test folder, e.g., conf/net.conf
-  // add data shard example in test folder, e.g., data/test.shard
+const int N = 10;  // size of dim 0
+const int M = 20;  // size of dim 1
+const int K = 2;  // size of partitions
+
+TEST(NeuralNet, RegisterLayers) {
+  Driver driver;
+  driver.RegisterLayer<DummyLayer, int>(kDummy);
+  driver.RegisterLayer<SliceLayer, int>(kSlice);
+  driver.RegisterLayer<SplitLayer, int>(kSplit);
+  driver.RegisterLayer<ConcateLayer, int>(kConcate);
+  driver.RegisterLayer<BridgeSrcLayer, int>(kBridgeSrc);
+  driver.RegisterLayer<BridgeDstLayer, int>(kBridgeDst);
+}
+
+TEST(NeuralNet, AddModelSplitLayers) {
+  NetProto proto;
+  // use dummy as input layer
+  LayerProto* proto_in = proto.add_layer();
+  proto_in->set_name("dummy_input");
+  proto_in->set_type(kDummy);
+  proto_in->mutable_dummy_conf()->set_input(true);
+  proto_in->mutable_dummy_conf()->add_shape(N);
+  proto_in->mutable_dummy_conf()->add_shape(M);
+  // use 2 dummy neuron layers
+  for (int i = 0; i < 2; ++i) {
+    LayerProto* proto_neuron = proto.add_layer();
+    proto_neuron->set_name("dummy_neuron_" + std::to_string(i));
+    proto_neuron->set_type(kDummy);
+    proto_neuron->add_srclayers("dummy_input");
+  }
+  // use dummy as output layer
+  for (int i = 0; i < 2; ++i) { 
+    LayerProto* proto_out = proto.add_layer();
+    proto_out->set_name("dummy_output" + std::to_string(i));
+    proto_out->set_type(kDummy);
+    proto_out->mutable_dummy_conf()->set_output(true);
+    proto_out->add_srclayers("dummy_neuron_" + std::to_string(i));
+  }
+  NeuralNet::Create(proto, kTrain, K);
+}
+
+TEST(NeuralNet, DirectConnection) {
+  NetProto proto;
+  // use dummy as input layer
+  LayerProto* proto_in = proto.add_layer();
+  proto_in->set_name("dummy_input");
+  proto_in->set_type(kDummy);
+  proto_in->mutable_dummy_conf()->set_input(true);
+  proto_in->mutable_dummy_conf()->add_shape(N);
+  proto_in->mutable_dummy_conf()->add_shape(M);
+  // use dummy neuron layer
+  LayerProto* proto_neuron = proto.add_layer();
+  proto_neuron->set_name("dummy_neuron");
+  proto_neuron->set_type(kDummy);
+  proto_neuron->add_srclayers("dummy_input");
+  // use dummy as output layer
+  LayerProto* proto_out = proto.add_layer();
+  proto_out->set_name("dummy_output");
+  proto_out->set_type(kDummy);
+  proto_out->mutable_dummy_conf()->set_output(true);
+  proto_out->add_srclayers("dummy_neuron");
+  NeuralNet::Create(proto, kTrain, K);
+}
+
+TEST(NeuralNet, SliceConcate) {
+  NetProto proto;
+  // use dummy as input layer
+  LayerProto* proto_in = proto.add_layer();
+  proto_in->set_name("dummy_input");
+  proto_in->set_type(kDummy);
+  proto_in->mutable_dummy_conf()->set_input(true);
+  proto_in->mutable_dummy_conf()->add_shape(N);
+  proto_in->mutable_dummy_conf()->add_shape(M);
+  // use dummy neuron layer
+  LayerProto* proto_neuron = proto.add_layer();
+  proto_neuron->set_name("dummy_neuron");
+  proto_neuron->set_type(kDummy);
+  proto_neuron->add_srclayers("dummy_input");
+  // use dummy as output layer
+  LayerProto* proto_out = proto.add_layer();
+  proto_out->set_name("dummy_output");
+  proto_out->set_type(kDummy);
+  proto_out->set_partition_dim(1);
+  proto_out->mutable_dummy_conf()->set_output(true);
+  proto_out->add_srclayers("dummy_neuron");
+  NeuralNet::Create(proto, kTrain, K);
 }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/utils/blob.cc
----------------------------------------------------------------------
diff --git a/src/utils/blob.cc b/src/utils/blob.cc
index a49d169..91542e6 100644
--- a/src/utils/blob.cc
+++ b/src/utils/blob.cc
@@ -187,15 +187,12 @@ void SyncedMemory::to_gpu() {
 
 template <typename Dtype>
 void Blob<Dtype>::Reshape(const std::vector<int>& shape) {
-  int count = count_;
-  count_ = 1;
   shape_ = shape;
+  count_ = shape.size() ? 1 : 0;
   for (size_t i = 0; i < shape.size(); ++i) {
     CHECK(shape[i]);
     count_ *= shape[i];
   }
-  if (count > 0)
-    CHECK_EQ(count, count_);
   if (count_ > capacity_) {
     capacity_ = count_;
     data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/utils/common.cc
----------------------------------------------------------------------
diff --git a/src/utils/common.cc b/src/utils/common.cc
index 73d1f69..928d4bb 100644
--- a/src/utils/common.cc
+++ b/src/utils/common.cc
@@ -242,7 +242,7 @@ string GetHostIP() {
   close(fd);
   string ip(inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr));
   /* display result */
-  LOG(INFO) << "Host IP= " << ip;
+  LOG(INFO) << "Host IP = " << ip;
   return ip;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/utils/graph.cc
----------------------------------------------------------------------
diff --git a/src/utils/graph.cc b/src/utils/graph.cc
index f38ea53..0211e5a 100644
--- a/src/utils/graph.cc
+++ b/src/utils/graph.cc
@@ -84,16 +84,13 @@ Graph::~Graph() {
     delete node;
 }
 
-void Graph::AddNode(Node* node) {
+Node* Graph::AddNode(const string& name, const string& origin, int id,
+                    void* proto) {
+  Node* node = new Node(name, origin, id, proto);
   nodes_.push_back(node);
   CHECK(name2node_.find(node->name) == name2node_.end())
     << "node " << node->name << " already exists";
   name2node_[node->name] = node;
-}
-
-Node* Graph::AddNode(const string& name) {
-  Node* node = new Node(name);
-  AddNode(node);
   return node;
 }
 
@@ -140,64 +137,6 @@ void Graph::RemoveEdge(const string &src, const string& 
dst) {
   RemoveEdge(srcnode->second, dstnode->second);
 }
 
-string Graph::ToJson() const {
-  map<string, string> label;
-  return ToJson(label);
-}
-
-/*
-string Graph::ToJson(const map<string, string>& info) const {
-  map<string, int> nodeid;
-  string disp = "{\"directed\":1,\n";
-
-  // add nodes
-  disp += "\"nodes\":[\n";
-  bool first = true;
-  vector<string> colors = {"red", "blue", "black", "green"};
-
-  // see for more shapes at http://www.graphviz.org/doc/info/shapes.html
-  // vector<string> shapes = {"box", "ellipse"};
-  int id = 0;
-  for (auto node : nodes_) {
-    char str[1024];
-    string name = node->name;
-    string color = colors[(node->partition_id)%colors.size()];
-    string shape;
-    string origin = node->origin;
-    snprintf(str, sizeof(str),
-        "{\"id\":\"%s%s\", \"color\":\"%s\",\"shape\":\"%s\"}\n", name.c_str(),
-        info.find(name) != info.end() ? info.at(name).c_str() : "",
-        color.c_str(), "box");
-    if (!first)
-      disp += ",";
-    else
-      first = false;
-    disp += string(str);
-    nodeid[name] = id++;
-  }
-  disp += "]\n,";
-
-  // add edges
-  disp += "\"links\":[\n";
-  first = true;
-  for (auto src : nodes_) {
-    for (auto dst : src->dstnodes) {
-      char str[1024];
-      snprintf(str, sizeof(str),
-          "{\"source\":%d, \"target\":%d, \"color\":\"%s\"}\n",
-          nodeid[src->name], nodeid[dst->name], "black");
-      if (!first)
-        disp += ",";
-      else
-        first = false;
-      disp += string(str);
-    }
-  }
-  disp += "]\n";
-  return disp+"}";
-}
-*/
-
 // sort to make `bottom' nodes be placed in the front positions
 void Graph::Sort() {
   // nodes to be visited
@@ -271,6 +210,11 @@ const Graph Graph::Reverse() const {
     }
   return g;
 }
+string Graph::ToJson() const {
+  map<string, string> label;
+  return ToJson(label);
+}
+
 
 string Graph::ToJson(const map<string, string>& label) const {
   string disp = "{\"directed\":1,\n";

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2d38cb30/src/worker.cc
----------------------------------------------------------------------
diff --git a/src/worker.cc b/src/worker.cc
index 23868da..c240e84 100644
--- a/src/worker.cc
+++ b/src/worker.cc
@@ -130,8 +130,8 @@ void Worker::InitSockets(const NeuralNet* net) {
   ConnectStub(grp_id_, id_, dealer_, kWorkerParam);
   for (auto layer : net->layers()) {
     if (layer->partition_id() == id_) {
-      if (typeid(layer) == typeid(BridgeDstLayer)
-          || typeid(layer) == typeid(BridgeSrcLayer)) {
+      if (typeid(*layer) == typeid(BridgeDstLayer)
+          || typeid(*layer) == typeid(BridgeSrcLayer)) {
         // TODO(wangsh): provide a unique socket id from cluster
         bridge_dealer_ = new Dealer(1);
         ConnectStub(grp_id_, id_, bridge_dealer_, kWorkerLayer);
@@ -142,7 +142,7 @@ void Worker::InitSockets(const NeuralNet* net) {
   // bind dealer to bridge layers
   if (bridge_dealer_ != nullptr) {
     for (auto dst : net->layers()) {
-      if (typeid(dst) == typeid(BridgeDstLayer)) {
+      if (typeid(*dst) == typeid(BridgeDstLayer)) {
         auto src = net->srclayers(dst)[0];
         name2bridge_[src->name()] = src;
         name2bridge_[dst->name()] = dst;
@@ -338,10 +338,6 @@ void BPWorker::Forward(int step, Phase phase, NeuralNet* 
net) {
   map<string, string> label;
   for (auto& layer : net->layers()) {
     if (layer->partition_id() == id_) {
-      // TODO(wangwei): enable this for model partition
-      // recv data from other workers
-      // if (typeid(*layer) == typeid(BridgeDstLayer))
-      //   ReceiveBlobs(true, false, dynamic_cast<BridgeLayer*>(layer), net);
       if (phase == kTrain) {
         // wait until param is updated
         for (Param* p : layer->GetParams()) {
@@ -352,11 +348,6 @@ void BPWorker::Forward(int step, Phase phase, NeuralNet* 
net) {
       layer->ComputeFeature(phase | kForward, net->srclayers(layer));
       if (job_conf_.debug() && grp_id_ == 0)
         label[layer->name()] = layer->ToString(true, phase | kForward);
-
-      // TODO(wangwei): enable this for model partition
-      // send data to other workers
-      // if (typeid(*layer) == typeid(BridgeSrcLayer))
-      //   SendBlobs(true, false, dynamic_cast<BridgeLayer*>(layer), net);
     }
   }
   if (label.size()) {
@@ -372,20 +363,11 @@ void BPWorker::Backward(int step, NeuralNet* net) {
   for (auto it = layers.rbegin(); it != layers.rend(); it++) {
     Layer* layer = *it;
     if (layer->partition_id() == id_) {
-      // TODO(wangwei): enable this for model partition
-      // send data to other workers
-      // if (typeid(layer) == typeid(BridgeSrcLayer))
-      //   ReceiveBlobs(false, true, layer, net);
-      // LOG(ERROR) << layer->name() << " backward";
       layer->ComputeGradient(kTrain | kBackward, net->srclayers(layer));
       if (job_conf_.debug() && grp_id_ == 0)
         label[layer->name()] = layer->ToString(true, kTrain | kBackward);
       for (Param* p : layer->GetParams())
         Update(step, p);
-      // TODO(wangwei): enable this for model partition
-      // recv data from other workers
-      // if (typeid(layer) == typeid(BridgeDstLayer))
-      //   SendBlobs(false, true, dynamic_cast<BridgeDstLayer*>(layer), net);
     }
   }
   if (label.size()) {


Reply via email to