SINGA-81 Add Python Helper, which enables users to construct a model (JobProto) 
and run Singa in Python

- Add wrapper API "Driver::Train(const std::string Job_conf)" for python.
- Add wrapper API "Driver::Test(const std::string Job_conf)" for python.

- Python codes (1) construct a model (JobProto), and (2) run singa

- Users are supposed to generate 'usermodel.py'
  . examples are provided, e.g., cifar10_cnn.py, mnist_mlp.py, mnist_rbm.py
  . 'cluster.conf' is required to maintain cluster information

- Users are supposed to run it as follows. e.g.,
  {code}
  cd SINGA_ROOT
  bin/singa-run.sh -conf tool/python/examples/cluster.conf -exe 
tool/python/examples/mnist_mlp.py
  {code}

- Note: in job.proto, 'required' rule of the following fields should be changed 
to 'optional'
  . JobProto: name, neuralnet, train_one_batch, updater, train_steps
  . ClusterProto: workspace
     . workspace field can be set in either (i) cluster.conf or (ii) python code

- __init__.py is required in the following directories
  . singa
  . singa/utils
  . singa/datasets
  . examples

- Add StoreResult() that takes care of training results
  . in SingaRun() called by fit() or evaluate()
  . read logfile
  . store accuracy, loss, ppl, se, etc. in dictionary format

- Parameter initialization
  . Parameter class is internally used
     . Weight follows gaussian distribution at default
     . Bias follows constant at default
  . As an option, users can explicitly specify parameter (e.g., *_parameter.py)

- Removed dataset/ae.py and dataset/rbm.py
  . RBM and Autoencoder examples use Mnist dataset


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/7d43e273
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/7d43e273
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/7d43e273

Branch: refs/heads/master
Commit: 7d43e27330581c3eecbd44a04f0c8691c3502ec6
Parents: 74a28dc
Author: chonho <[email protected]>
Authored: Wed Dec 23 12:09:23 2015 +0800
Committer: chonho <[email protected]>
Committed: Fri Jan 1 15:59:14 2016 +0800

----------------------------------------------------------------------
 include/singa/driver.h                        |   9 +
 src/proto/job.proto                           |   3 +-
 tool/python/README.md                         | 322 +++++++++++++++
 tool/python/examples/__init__.py              |   0
 tool/python/examples/cifar10_cnn.py           |  32 ++
 tool/python/examples/cifar10_cnn_parameter.py |  35 ++
 tool/python/examples/cluster.conf             |   6 +
 tool/python/examples/mnist_ae.py              |  27 ++
 tool/python/examples/mnist_mlp.py             |  33 ++
 tool/python/examples/mnist_mlp_parameter.py   |  28 ++
 tool/python/examples/mnist_mlp_test.py        |  30 ++
 tool/python/examples/mnist_rbm1.py            |  24 ++
 tool/python/examples/mnist_rbm1_parameter.py  |  26 ++
 tool/python/examples/mnist_rbm2.py            |  25 ++
 tool/python/examples/mnist_rbm2_parameter.py  |  28 ++
 tool/python/examples/mnist_rbm3.py            |  25 ++
 tool/python/examples/mnist_rbm3_parameter.py  |  28 ++
 tool/python/examples/mnist_rbm4.py            |  25 ++
 tool/python/examples/mnist_rbm4_parameter.py  |  27 ++
 tool/python/examples/rnnlm_usermodel.py       |  22 +
 tool/python/singa.py                          |   3 +-
 tool/python/singa/__init__.py                 |   0
 tool/python/singa/datasets/__init__.py        |   0
 tool/python/singa/datasets/cifar10.py         |  34 ++
 tool/python/singa/datasets/mnist.py           |  32 ++
 tool/python/singa/datasets/rnnlm.py           |  20 +
 tool/python/singa/driver.i                    |   1 +
 tool/python/singa/driver.py                   |   1 +
 tool/python/singa/driver_wrap.cxx             |  33 ++
 tool/python/singa/initializations.py          |  34 ++
 tool/python/singa/layer.py                    | 300 ++++++++++++++
 tool/python/singa/model.py                    | 455 +++++++++++++++++++++
 tool/python/singa/parameter.py                | 105 +++++
 tool/python/singa/utils/__init__.py           |   0
 tool/python/singa/utils/message.py            |  56 +++
 tool/python/singa/utils/utility.py            |  50 +++
 36 files changed, 1877 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/include/singa/driver.h
----------------------------------------------------------------------
diff --git a/include/singa/driver.h b/include/singa/driver.h
index f28f8c6..fb5a33a 100644
--- a/include/singa/driver.h
+++ b/include/singa/driver.h
@@ -93,6 +93,15 @@ class Driver {
    */
   void Test(const JobProto& job_conf);
   /**
+   * Used for python binding. Users can also directly call it as a C++ API.
+   *
+   * It completes the functions as defined above but accept serialized string
+   * parameters.
+   *
+   * @param[in] str serialized string recorded job configuration.
+   */
+  void Test(const std::string str);
+  /**
    * Setting the checkpoint field of the job configuration to resume training.
    *
    * The checkpoint folder will be searched to get the files for the latest

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/src/proto/job.proto
----------------------------------------------------------------------
diff --git a/src/proto/job.proto b/src/proto/job.proto
index 98c03f1..22d4bc5 100644
--- a/src/proto/job.proto
+++ b/src/proto/job.proto
@@ -150,7 +150,8 @@ message ClusterProto {
   optional int32 nworkers_per_procs = 5 [default = 1];
   optional int32 nservers_per_procs = 6 [default = 1];
   // local workspace for checkpoint files and vis files
-  required string workspace = 10;
+  //required string workspace = 10;
+  optional string workspace = 10;
 
   // servers and workers in different processes?
   optional bool server_worker_separate = 20 [default = false];

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/README.md
----------------------------------------------------------------------
diff --git a/tool/python/README.md b/tool/python/README.md
new file mode 100644
index 0000000..4308ed1
--- /dev/null
+++ b/tool/python/README.md
@@ -0,0 +1,322 @@
+## SINGA-81 Add Python Helper, which enables users to construct a model 
(JobProto) and run Singa in Python
+
+    SINGAROOT/tool/python
+    |-- pb2 (has job_pb2.py)
+    |-- singa 
+        |-- model.py 
+        |-- layer.py 
+        |-- parameter.py 
+        |-- initialization.py 
+        |-- utils 
+            |-- utility.py 
+            |-- message.py 
+        |-- datasets 
+            |-- cifar10.py 
+            |-- mnist.py 
+            |-- rbm.py 
+            |-- ae.py 
+    |-- examples 
+        |-- cifar10_cnn.py, mnist_mlp.py, , mnist_rbm1.py, rnnlm_usermodel.py, 
etc. 
+
+### How to Run
+```
+bin/singa-run.sh -exe user_main.py -conf cluster.conf
+```
+The python code, i.e., `user_main.py`, would create the JobProto object and 
pass it to Driver::Train.
+Currently, ./bin/singa-run.sh needs to get the cluster topology, hence we 
still need to pass a `cluster.conf` to it.
+The cluster.conf has the configuration for a JobProto with all other fields 
empty except the cluster field.
+
+Note that 'workspace' field in ClusterProto can be set in either (i) 
cluster.conf or (ii) python code.
+
+#### Examples
+```
+cd SINGA_ROOT
+bin/singa-run.sh -exe tool/python/examples/cifar10_cnn.py -conf 
tool/python/examples/cluster.conf
+```
+
+### Layer class (inherited)
+
+* Data
+* Dense
+* Activation
+* Convolution2D
+* MaxPooling2D
+* AvgPooling2D
+* LRN2D 
+* Dropout
+* RBM
+* Autoencoder
+
+#### for user defined layers (IN PROGRESS) 
+
+The following classes are designed to construct user-defined layers for RNNLM 
example.
+
+* Embedding
+* RNNLM
+* UserLossRNNLM
+
+
+### Model class
+
+* Model class has `jobconf` (JobProto) and `layers` (layer list)
+
+Methods in Model class
+
+* add
+       * add Layer into Model
+       * 2 subclasses: Sequential model and Energy model
+
+* compile      
+       * set Updater (i.e., optimizer) and Cluster (i.e., topology) components
+
+* fit 
+       * set Training data and parameter values for the training
+               * (optional) set Validatiaon data and parameter values
+       * set Train_one_batch component
+       * specify `with_test` field if a user wants to run singa with test data 
simultaneously.
+       * [TODO] recieve train/validation results, e.g., accuracy, loss, ppl, 
etc. 
+
+* evaluate
+       * set Testing data and parameter values for the testing
+       * specify `checkpoint_path` field if a user want to run singa only for 
testing.
+       * [TODO] recieve test results, e.g., accuracy, loss, ppl, etc. 
+
+#### Results
+
+fit() and evaluate() return `result`, a dictionary containing
+
+* [key]: step number
+* [value]: a list of dictionay
+       * 'acc' for accuracy
+       * 'loss' for loss
+       * 'ppl' for ppl
+       * 'se' for squred error   
+
+#### Other classes
+
+* Store
+* Algorithm
+* Updater
+* SGD (inherited)
+* AdaGrad (inherited)
+* Cluster
+
+
+## MLP Example
+
+An example (to generate job.conf for mnist)
+
+```
+X_train, X_test, workspace = mnist.load_data()
+
+m = Sequential('mlp', sys.argv)  
+
+m.add(Dense(2500, init='uniform', activation='tanh'))
+m.add(Dense(2000, init='uniform', activation='tanh'))
+m.add(Dense(1500, init='uniform', activation='tanh'))
+m.add(Dense(1000, init='uniform', activation='tanh'))
+m.add(Dense(500,  init='uniform', activation='tanh'))
+m.add(Dense(10, init='uniform', activation='softmax')) 
+
+sgd = SGD(lr=0.001, lr_type='step')
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+m.fit(X_train, nb_epoch=1000, with_test=True)
+result = m.evaluate(X_test, batch_size=100, test_steps=10, test_freq=60)
+```
+
+## CNN Example
+
+An example (to generate job.conf for cifar10)
+
+```
+X_train, X_test, workspace = cifar10.load_data()
+
+m = Sequential('cnn', sys.argv)
+
+m.add(Convolution2D(32, 5, 1, 2, w_std=0.0001, b_lr=2))
+m.add(MaxPooling2D(pool_size=(3,3), stride=2))
+m.add(Activation('relu'))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(32, 5, 1, 2, b_lr=2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(64, 5, 1, 2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+
+m.add(Dense(10, w_wd=250, b_lr=2, b_wd=0, activation='softmax'))
+
+sgd = SGD(decay=0.004, lr_type='fixed', step=(0,60000,65000), 
step_lr=(0.001,0.0001,0.00001))
+topo = Cluster(workspace)
+m.compile(updater=sgd, cluster=topo)
+m.fit(X_train, nb_epoch=1000, with_test=True)
+result = m.evaluate(X_test, 1000, test_steps=30, test_freq=300)
+```
+
+
+## RBM Example
+```
+rbmid = 3                                                                      
                     
+X_train, X_test, workspace = mnist.load_data(nb_rbm=rbmid)                     
                          
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500, 250]
+m.add(RBM(out_dim, w_std=0.1, b_wd=0)) 
+                                                                               
          
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)                                
+topo = Cluster(workspace)                                                      
              
+m.compile(optimizer=sgd, cluster=topo)                                         
           
+m.fit(X_train, alg='cd', nb_epoch=6000)                            
+```
+
+## AutoEncoder Example
+```
+rbmid = 4
+X_train, X_test, workspace = mnist.load_data(nb_rbm=rbmid+1)                   
                            
+m = Sequential('autoencoder', sys.argv)
+
+hid_dim = [1000, 500, 250, 30]
+m.add(Autoencoder(hid_dim, out_dim=784, activation='sigmoid', 
param_share=True))
+
+agd = AdaGrad(lr=0.01)
+topo = Cluster(workspace)
+m.compile(loss='mean_squared_error', optimizer=agd, cluster=topo)
+m.fit(X_train, alg='bp', nb_epoch=12200)
+```
+
+### TIPS
+
+Hidden layers for MLP can be written as
+```
+for n in [2500, 2000, 1500, 1000, 500]:
+  m.add(Dense(n, init='uniform', activation='tanh'))
+m.add(Dense(10, init='uniform', activation='softmax'))
+```
+
+Activation layer can be specified separately
+```
+m.add(Dense(2500, init='uniform'))
+m.add(Activation('tanh'))
+```
+
+Users can explicity specify weight and bias, and their values
+
+for example of MLP
+```
+par = Parameter(init='uniform', scale=0.05)
+m.add(Dense(2500, w_param=par, b_param=par, activation='tanh'))
+m.add(Dense(2000, w_param=par, b_param=par, activation='tanh'))
+m.add(Dense(1500, w_param=par, b_param=par, activation='tanh'))
+m.add(Dense(1000, w_param=par, b_param=par, activation='tanh'))
+m.add(Dense(500, w_param=par, b_param=par, activation='tanh'))
+m.add(Dense(10, w_param=par, b_param=par, activation='softmax'))
+```
+
+for example of Cifar10 
+```
+parw = Parameter(init='gauss', std=0.0001)
+parb = Parameter(init='const', value=0)
+m.add(Convolution(32, 5, 1, 2, w_param=parw, b_param=parb, b_lr=2))
+m.add(MaxPooling2D(pool_size(3,3), stride=2))
+m.add(Activation('relu'))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+parw.update(std=0.01)
+m.add(Convolution(32, 5, 1, 2, w_param=parw, b_param=parb))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size(3,3), stride=2))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution(64, 5, 1, 2, w_param=parw, b_param=parb, b_lr=1))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size(3,3), stride=2))
+
+m.add(Dense(10, w_param=parw, w_wd=250, b_param=parb, b_lr=2, b_wd=0, 
activation='softmax'))
+```
+
+
+Alternative ways to add Data layer
+```
+X_train, X_test = mnist.load_data()  // parameter values are set in 
load_data() 
+m.fit(X_train, ...)                  // Data layer for training is added
+m.evaluate(X_test, ...)              // Data layer for testing is added
+```
+```
+X_train, X_test = mnist.load_data()  // parameter values are set in 
load_data() 
+m.add(X_train)                       // explicitly add Data layer
+m.add(X_test)                        // explicitly add Data layer
+```
+```
+store = Store(path='train.bin', batch_size=64, ...)        // parameter values 
are set explicitly 
+m.add(Data(load='recordinput', phase='train', conf=store)) // Data layer is 
added
+store = Store(path='test.bin', batch_size=100, ...)        // parameter values 
are set explicitly 
+m.add(Data(load='recordinput', phase='test', conf=store))  // Data layer is 
added
+```
+
+### Parameter class
+
+Users need to set parameter and initial values. For example,
+
+* Parameter 
+       * lr = (float) // learning rate
+       * wd = (float) // weight decay
+
+* Parameter initialization
+       * init = (string) // one of the types, 'uniform', 'constant', 
'gaussian' 
+       * for uniform [default]
+               * high = (float)
+               * low = (float)
+       * for constant
+               * value = (float)
+       * for gaussian
+               * mean = (float)
+               * std = (float)
+
+* Weight (w_param) is gaussian with mean=0, std=0.01 at default
+
+* Bias (b_param) is constant with value=0 at default
+
+* How to update the parameter fields
+       * for updating Weight, put 'w_' in front of field name
+       * for updating Bias, put 'b_' in front of field name
+
+Several ways to set Parameter values
+```
+m.add(Dense(10, w_mean=1, w_std=0.1, w_lr=2, w_wd=10, ...)
+```
+```
+parw = Parameter(lr=2, wd=10, init='constant', value=0)
+m.add(Dense(10, w_param=parw, ...)
+```
+```
+parw = Parameter(init='constant', value=0)
+m.add(Dense(10, w_param=parw, w_lr=2, w_wd=10, ...)
+```
+
+### Example to run singa
+
+(1) Run singa for training
+```
+m.fit(X_train, nb_epoch=1000)
+```
+
+(2) Run singa for training and validation
+```
+m.fit(X_train, validate_data=X_valid, nb_epoch=1000)
+```
+
+(3) Run singa for test while training 
+```
+m.fit(X_train, nb_epoch=1000, with_test=True)
+result = m.evaluate(X_test, batch_size=100, test_steps=100)
+```
+
+(4) Run singa for test only
+Assume a checkpoint exists after training
+```
+result = m.evaluate(X_test, batch_size=100, 
checkpoint_path=workspace+'/checkpoint/step100-worker0')
+```

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/__init__.py b/tool/python/examples/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/cifar10_cnn.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/cifar10_cnn.py 
b/tool/python/examples/cifar10_cnn.py
new file mode 100755
index 0000000..9ef552b
--- /dev/null
+++ b/tool/python/examples/cifar10_cnn.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import cifar10
+
+X_train, X_test, workspace = cifar10.load_data()
+
+m = Sequential('cifar10-cnn', sys.argv)
+
+m.add(Convolution2D(32, 5, 1, 2, w_std=0.0001, b_lr=2))
+m.add(MaxPooling2D(pool_size=(3,3), stride=2))
+m.add(Activation('relu'))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(32, 5, 1, 2, b_lr=2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(64, 5, 1, 2))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+
+m.add(Dense(10, w_wd=250, b_lr=2, b_wd=0, activation='softmax'))
+
+sgd = SGD(decay=0.004, lr_type='fixed', step=(0,60000,65000), 
step_lr=(0.001,0.0001,0.00001))
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+m.fit(X_train, nb_epoch=1000, with_test=True)
+result = m.evaluate(X_test, test_steps=100, test_freq=300)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/cifar10_cnn_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/cifar10_cnn_parameter.py 
b/tool/python/examples/cifar10_cnn_parameter.py
new file mode 100755
index 0000000..dd03f5c
--- /dev/null
+++ b/tool/python/examples/cifar10_cnn_parameter.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import cifar10
+
+X_train, X_test, workspace = cifar10.load_data()
+
+m = Sequential('cifar10-cnn', sys.argv)
+
+parw = Parameter(init='gaussian', std=0.0001)
+parb = Parameter(init='constant')
+m.add(Convolution2D(32, 5, 1, 2, w_param=parw, b_param=parb, b_lr=2))
+m.add(MaxPooling2D(pool_size=(3,3), stride=2))
+m.add(Activation('relu'))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+parw.update(std=0.01)
+m.add(Convolution2D(32, 5, 1, 2, w_param=parw, b_param=parb))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+m.add(LRN2D(3, alpha=0.00005, beta=0.75))
+
+m.add(Convolution2D(64, 5, 1, 2, w_param=parw, b_param=parb, b_lr=1))
+m.add(Activation('relu'))
+m.add(AvgPooling2D(pool_size=(3,3), stride=2))
+
+m.add(Dense(10, w_param=parw, w_wd=250, b_param=parb, b_lr=2, b_wd=0, 
activation='softmax'))
+
+sgd = SGD(decay=0.004, lr_type='fixed', step=(0,60000,65000), 
step_lr=(0.001,0.0001,0.00001))
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+m.fit(X_train, nb_epoch=100, with_test=True)
+result = m.evaluate(X_test, test_steps=10, test_freq=300)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/cluster.conf
----------------------------------------------------------------------
diff --git a/tool/python/examples/cluster.conf 
b/tool/python/examples/cluster.conf
new file mode 100644
index 0000000..16623d8
--- /dev/null
+++ b/tool/python/examples/cluster.conf
@@ -0,0 +1,6 @@
+cluster {
+  nworker_groups: 1
+  nserver_groups: 1
+  nworkers_per_group: 1
+  nworkers_per_procs: 1
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_ae.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_ae.py b/tool/python/examples/mnist_ae.py
new file mode 100755
index 0000000..0b7e590
--- /dev/null
+++ b/tool/python/examples/mnist_ae.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+# Sample parameter values for Autoencoder example
+rbmid = 4
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/autoencoder',
+            nb_rbm = rbmid+1,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Sequential('autoencoder', sys.argv)
+
+hid_dim = [1000, 500, 250, 30]
+m.add(Autoencoder(hid_dim, out_dim=784, activation='sigmoid', 
param_share=True))
+
+
+agd = AdaGrad(lr=0.01)
+topo = Cluster(workspace)
+m.compile(loss='mean_squared_error', optimizer=agd, cluster=topo)
+m.fit(X_train, alg='bp', nb_epoch=12200, with_test=True)
+result = m.evaluate(X_test, test_steps=100, test_freq=1000)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_mlp.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_mlp.py 
b/tool/python/examples/mnist_mlp.py
new file mode 100755
index 0000000..da5ccce
--- /dev/null
+++ b/tool/python/examples/mnist_mlp.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import * 
+from singa.datasets import mnist 
+
+# Sample parameter values for Mnist MLP example
+pvalues = {'batchsize' : 64, 'shape' : 784, 'random_skip' : 5000,
+           'std_value' : 127.5, 'mean_value' : 127.5}
+X_train, X_test, workspace = mnist.load_data(**pvalues)
+
+m = Sequential('mlp', argv=sys.argv)
+
+''' Weight and Bias are initialized by
+    uniform distribution with scale=0.05 at default
+'''
+m.add(Dense(2500, init='uniform', activation='tanh'))
+m.add(Dense(2000, init='uniform', activation='tanh'))
+m.add(Dense(1500, init='uniform', activation='tanh'))
+m.add(Dense(1000, init='uniform', activation='tanh'))
+m.add(Dense(500,  init='uniform', activation='tanh'))
+m.add(Dense(10, init='uniform', activation='softmax')) 
+
+sgd = SGD(lr=0.001, lr_type='step')
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+
+m.fit(X_train, nb_epoch=100, with_test=True)
+result = m.evaluate(X_test, batch_size=100, test_steps=10)
+
+#e.g., display result
+#for k, v in sorted(result.items(), key=lambda x: x[0]):
+#  print k, v

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_mlp_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_mlp_parameter.py 
b/tool/python/examples/mnist_mlp_parameter.py
new file mode 100755
index 0000000..24fc960
--- /dev/null
+++ b/tool/python/examples/mnist_mlp_parameter.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import * 
+from singa.datasets import mnist 
+
+# Sample parameter values for Mnist MLP example
+pvalues = {'batchsize' : 64, 'shape' : 784,
+           'random_skip' : 5000,
+           'std_value' : 127.5, 'mean_value' : 127.5}
+X_train, X_test, workspace = mnist.load_data(**pvalues)
+
+m = Sequential('mlp', argv=sys.argv)
+
+par = Parameter(init='uniform', scale=0.05)
+m.add(Dense(2500, w_param=par, b_param=par, activation='tanh')) 
+m.add(Dense(2000, w_param=par, b_param=par, activation='tanh')) 
+m.add(Dense(1500, w_param=par, b_param=par, activation='tanh')) 
+m.add(Dense(1000, w_param=par, b_param=par, activation='tanh')) 
+m.add(Dense(500, w_param=par, b_param=par, activation='tanh')) 
+m.add(Dense(10, w_param=par, b_param=par, activation='softmax')) 
+
+sgd = SGD(lr=0.001, lr_type='step')
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+
+m.fit(X_train, nb_epoch=100, with_test=True)
+result = m.evaluate(X_test, batch_size=100, test_steps=10)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_mlp_test.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_mlp_test.py 
b/tool/python/examples/mnist_mlp_test.py
new file mode 100755
index 0000000..67cf3b3
--- /dev/null
+++ b/tool/python/examples/mnist_mlp_test.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import * 
+from singa.datasets import mnist 
+
+# Sample parameter values for Mnist MLP example
+pvalues = {'batchsize' : 64, 'shape' : 784,
+           'std_value' : 127.5, 'mean_value' : 127.5}
+X_train, X_test, workspace = mnist.load_data(**pvalues)
+
+m = Sequential('mlp', argv=sys.argv)
+
+m.add(Dense(2500, init='uniform', activation='tanh'))
+m.add(Dense(2000, init='uniform', activation='tanh'))
+m.add(Dense(1500, init='uniform', activation='tanh'))
+m.add(Dense(1000, init='uniform', activation='tanh'))
+m.add(Dense(500,  init='uniform', activation='tanh'))
+m.add(Dense(10, init='uniform', activation='softmax')) 
+
+sgd = SGD(lr=0.001, lr_type='step')
+topo = Cluster(workspace)
+m.compile(loss='categorical_crossentropy', optimizer=sgd, cluster=topo)
+
+''' For doing test only, normally users sets checkpoint path
+    e.g., assume that checkpoint exists by
+          m.fit(X_train, nb_epoch=100, checkpoint_freq=100)
+'''
+path = workspace+'/checkpoint/step100-worker0'
+result = m.evaluate(X_test, batch_size=100, test_steps=100, 
checkpoint_path=path)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm1.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm1.py 
b/tool/python/examples/mnist_rbm1.py
new file mode 100755
index 0000000..765be8b
--- /dev/null
+++ b/tool/python/examples/mnist_rbm1.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 1
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm1',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+m.add(RBM(1000, w_std=0.1, b_wd=0)) 
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm1_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm1_parameter.py 
b/tool/python/examples/mnist_rbm1_parameter.py
new file mode 100755
index 0000000..54fe421
--- /dev/null
+++ b/tool/python/examples/mnist_rbm1_parameter.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 1
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm1',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+parw = Parameter(init='gaussian', mean=0, std=0.1)
+parb = Parameter(wd=0, init='constant', value=0)
+m.add(RBM(1000, w_param=parw, b_param=parb)) 
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm2.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm2.py 
b/tool/python/examples/mnist_rbm2.py
new file mode 100755
index 0000000..f4d187f
--- /dev/null
+++ b/tool/python/examples/mnist_rbm2.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 2
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm2',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500]
+m.add(RBM(out_dim, w_std=0.1, b_wd=0)) 
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm2_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm2_parameter.py 
b/tool/python/examples/mnist_rbm2_parameter.py
new file mode 100755
index 0000000..9837836
--- /dev/null
+++ b/tool/python/examples/mnist_rbm2_parameter.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 2
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm2',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500]
+parw = Parameter(init='gaussian', mean=0, std=0.1)
+parb = Parameter(wd=0, init='constant', value=0)
+m.add(RBM(out_dim, w_param=parw, b_param=parb)) 
+
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm3.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm3.py 
b/tool/python/examples/mnist_rbm3.py
new file mode 100755
index 0000000..48bbe38
--- /dev/null
+++ b/tool/python/examples/mnist_rbm3.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 3
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm3',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500, 250]
+m.add(RBM(out_dim, w_std=0.1, b_wd=0)) 
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm3_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm3_parameter.py 
b/tool/python/examples/mnist_rbm3_parameter.py
new file mode 100755
index 0000000..6c9a378
--- /dev/null
+++ b/tool/python/examples/mnist_rbm3_parameter.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 3
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm3',
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500, 250]
+parw = Parameter(init='gaussian', mean=0, std=0.1)
+parb = Parameter(wd=0, init='constant', value=0)
+m.add(RBM(out_dim, w_param=parw, b_param=parb)) 
+
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm4.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm4.py 
b/tool/python/examples/mnist_rbm4.py
new file mode 100755
index 0000000..f12b739
--- /dev/null
+++ b/tool/python/examples/mnist_rbm4.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 4
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm'+str(rbmid),
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500, 250, 30]
+m.add(RBM(out_dim, sampling='gaussian', w_std=0.1, b_wd=0)) 
+
+sgd = SGD(lr=0.001, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/mnist_rbm4_parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/mnist_rbm4_parameter.py 
b/tool/python/examples/mnist_rbm4_parameter.py
new file mode 100755
index 0000000..2a7be1d
--- /dev/null
+++ b/tool/python/examples/mnist_rbm4_parameter.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import mnist 
+
+rbmid = 4
+pvalues = {'batchsize' : 100, 'shape' : 784, 'std_value' : 255}
+X_train, X_test, workspace = mnist.load_data(
+            workspace = 'examples/rbm/rbm'+rbmid,
+            nb_rbm = rbmid,
+            checkpoint_steps = 6000,
+            **pvalues)
+
+m = Energy('rbm'+str(rbmid), sys.argv)
+
+out_dim = [1000, 500, 250, 30]
+parw = Parameter(init='gaussian', mean=0, std=0.1)
+parb = Parameter(wd=0, init='constant', value=0)
+m.add(RBM(out_dim, w_param=parw, b_param=parb)) 
+
+sgd = SGD(lr=0.1, decay=0.0002, momentum=0.8)
+topo = Cluster(workspace)
+m.compile(optimizer=sgd, cluster=topo)
+m.fit(X_train, alg='cd', nb_epoch=6000)
+#result = m.evaluate(X_test, test_steps=100, test_freq=500)
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/examples/rnnlm_usermodel.py
----------------------------------------------------------------------
diff --git a/tool/python/examples/rnnlm_usermodel.py 
b/tool/python/examples/rnnlm_usermodel.py
new file mode 100755
index 0000000..1b49321
--- /dev/null
+++ b/tool/python/examples/rnnlm_usermodel.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__),'..')) 
+from singa.model import *
+from singa.datasets import rnnlm 
+
+vocab_size = 3720
+
+X_train, X_valid, workspace = rnnlm.load_data()
+
+m = Sequential('rnnlm', sys.argv)
+
+parw = Parameter(init='uniform', range=0.3)
+m.add(Embedding(in_dim=vocab_size, out_dim=15, w_param=parw))
+m.add(RNNLM(1, w_param=parw))
+
+sgd = SGD(lr_type='fixed', step=(0,48810,56945,65080,73215), 
step_lr=(0.1,0.05,0.025,0.0125,0.00625))
+topo = Cluster(workspace)
+m.compile(loss='user_loss_rnnlm', in_dim=vocab_size, nclass=100, 
optimizer=sgd, cluster=topo)
+
+m.fit(X_train, validate=X_valid, validate_steps=683, nb_epoch=81350, 
execpath='examples/rnnlm/rnnlm.bin')
+#result = m.evaluate(X_valid, validate_steps=683, validate_freq=8135, 
execpath='examples/rnnlm/rnnlm.bin')

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa.py
----------------------------------------------------------------------
diff --git a/tool/python/singa.py b/tool/python/singa.py
index c179192..6d7fbdf 100755
--- a/tool/python/singa.py
+++ b/tool/python/singa.py
@@ -40,4 +40,5 @@ if __name__ == '__main__':
     d = driver.Driver()
     d.InitLog(sys.argv[0])
     d.Init(sys.argv)
-    d.Train(False,b)
+#    d.Train(False,b)
+    d.Test(b)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/__init__.py b/tool/python/singa/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/datasets/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/datasets/__init__.py 
b/tool/python/singa/datasets/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/datasets/cifar10.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/datasets/cifar10.py 
b/tool/python/singa/datasets/cifar10.py
new file mode 100644
index 0000000..65bcd60
--- /dev/null
+++ b/tool/python/singa/datasets/cifar10.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+from singa.model import *
+
+def load_data(
+         workspace = None, 
+         backend = 'kvfile',
+         batchsize = 64,
+         random = 5000,
+         shape = (3, 32, 32),
+         std = 127.5,
+         mean = 127.5
+      ):
+
+  # using cifar10 dataset
+  data_dir = 'examples/cifar10'
+  path_train = data_dir + '/train_data.bin'
+  path_test  = data_dir + '/test_data.bin'
+  path_mean  = data_dir + '/image_mean.bin'
+  if workspace == None: workspace = data_dir
+
+  store = Store(path=path_train, mean_file=path_mean, backend=backend,
+              random_skip=random, batchsize=batchsize,
+              shape=shape) 
+
+  data_train = Data(load='recordinput', phase='train', conf=store)
+
+  store = Store(path=path_test, mean_file=path_mean, backend=backend,
+              batchsize=batchsize,
+              shape=shape) 
+
+  data_test = Data(load='recordinput', phase='test', conf=store)
+
+  return data_train, data_test, workspace
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/datasets/mnist.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/datasets/mnist.py 
b/tool/python/singa/datasets/mnist.py
new file mode 100644
index 0000000..c8695ec
--- /dev/null
+++ b/tool/python/singa/datasets/mnist.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from singa.model import * 
+
+def load_data(
+     workspace = None,
+     backend = 'kvfile',
+     nb_rbm = 0,  # the number of layers for RBM and Autoencoder 
+     checkpoint_steps = 0, 
+     **pvalues
+   ):
+
+  # using mnist dataset
+  data_dir = 'examples/mnist'
+  path_train = data_dir + '/train_data.bin'
+  path_test  = data_dir + '/test_data.bin'
+  if workspace == None: workspace = data_dir
+
+  # checkpoint path to load
+  checkpoint_list = None 
+  if checkpoint_steps > 0:
+    workerid = 0
+    checkpoint_list = [] 
+    for i in range(nb_rbm-1, 0, -1):
+      
checkpoint_list.append('examples/rbm/rbm{0}/checkpoint/step{1}-worker{2}'.format(str(i),checkpoint_steps,workerid))
+
+  store = Store(path=path_train, backend=backend, **pvalues)
+  data_train = Data(load='recordinput', phase='train', conf=store, 
checkpoint=checkpoint_list)
+
+  store = Store(path=path_test, backend=backend, **pvalues)
+  data_test = Data(load='recordinput', phase='test', conf=store)
+
+  return data_train, data_test, workspace

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/datasets/rnnlm.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/datasets/rnnlm.py 
b/tool/python/singa/datasets/rnnlm.py
new file mode 100644
index 0000000..ef8142a
--- /dev/null
+++ b/tool/python/singa/datasets/rnnlm.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+from singa.model import *
+
+def load_data(
+         workspace = 'examples/rnnlm',
+         backend = 'kvfile',
+         max_window = 10
+      ):
+
+  path_train = workspace + '/train_data.bin'
+  path_valid = workspace + '/valid_data.bin'
+  path_test  = workspace + '/test_data.bin'
+
+
+  data_train = Data(load='kData', phase='train', path=path_train, 
backend=backend, max_window=max_window)
+
+  data_valid = Data(load='kData', phase='val', path=path_valid, 
max_window=max_window)
+
+  return data_train, data_valid, workspace
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/driver.i
----------------------------------------------------------------------
diff --git a/tool/python/singa/driver.i b/tool/python/singa/driver.i
index 56599b6..f756d57 100644
--- a/tool/python/singa/driver.i
+++ b/tool/python/singa/driver.i
@@ -37,6 +37,7 @@ public:
 void Train(bool resume, const std::string job_conf);
 void Init(int argc, char **argv);
 void InitLog(char* arg);
+void Test(const std::string job_conf);
 };
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/driver.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/driver.py b/tool/python/singa/driver.py
index c5f3b4f..c1aac7f 100644
--- a/tool/python/singa/driver.py
+++ b/tool/python/singa/driver.py
@@ -107,6 +107,7 @@ class Driver(_object):
     def Train(self, *args): return _driver.Driver_Train(self, *args)
     def Init(self, *args): return _driver.Driver_Init(self, *args)
     def InitLog(self, *args): return _driver.Driver_InitLog(self, *args)
+    def Test(self, *args): return _driver.Driver_Test(self, *args)
     def __init__(self): 
         this = _driver.new_Driver()
         try: self.this.append(this)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/driver_wrap.cxx
----------------------------------------------------------------------
diff --git a/tool/python/singa/driver_wrap.cxx 
b/tool/python/singa/driver_wrap.cxx
index 99756f1..e6e6de2 100644
--- a/tool/python/singa/driver_wrap.cxx
+++ b/tool/python/singa/driver_wrap.cxx
@@ -4579,6 +4579,38 @@ fail:
 }
 
 
+SWIGINTERN PyObject *_wrap_Driver_Test(PyObject *SWIGUNUSEDPARM(self), 
PyObject *args) {
+  PyObject *resultobj = 0;
+  singa::Driver *arg1 = (singa::Driver *) 0 ;
+  std::string arg2 ;
+  void *argp1 = 0 ;
+  int res1 = 0 ;
+  PyObject * obj0 = 0 ;
+  PyObject * obj1 = 0 ;
+  
+  if (!PyArg_ParseTuple(args,(char *)"OO:Driver_Test",&obj0,&obj1)) SWIG_fail;
+  res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_singa__Driver, 0 |  0 );
+  if (!SWIG_IsOK(res1)) {
+    SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "Driver_Test" "', 
argument " "1"" of type '" "singa::Driver *""'"); 
+  }
+  arg1 = reinterpret_cast< singa::Driver * >(argp1);
+  {
+    std::string *ptr = (std::string *)0;
+    int res = SWIG_AsPtr_std_string(obj1, &ptr);
+    if (!SWIG_IsOK(res) || !ptr) {
+      SWIG_exception_fail(SWIG_ArgError((ptr ? res : SWIG_TypeError)), "in 
method '" "Driver_Test" "', argument " "2"" of type '" "std::string const""'"); 
+    }
+    arg2 = *ptr;
+    if (SWIG_IsNewObj(res)) delete ptr;
+  }
+  (arg1)->Test(arg2);
+  resultobj = SWIG_Py_Void();
+  return resultobj;
+fail:
+  return NULL;
+}
+
+
 SWIGINTERN PyObject *_wrap_new_Driver(PyObject *SWIGUNUSEDPARM(self), PyObject 
*args) {
   PyObject *resultobj = 0;
   singa::Driver *result = 0 ;
@@ -4643,6 +4675,7 @@ static PyMethodDef SwigMethods[] = {
         { (char *)"Driver_Train", _wrap_Driver_Train, METH_VARARGS, NULL},
         { (char *)"Driver_Init", _wrap_Driver_Init, METH_VARARGS, NULL},
         { (char *)"Driver_InitLog", _wrap_Driver_InitLog, METH_VARARGS, NULL},
+        { (char *)"Driver_Test", _wrap_Driver_Test, METH_VARARGS, NULL},
         { (char *)"new_Driver", _wrap_new_Driver, METH_VARARGS, NULL},
         { (char *)"delete_Driver", _wrap_delete_Driver, METH_VARARGS, NULL},
         { (char *)"Driver_swigregister", Driver_swigregister, METH_VARARGS, 
NULL},

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/initializations.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/initializations.py 
b/tool/python/singa/initializations.py
new file mode 100644
index 0000000..f3037cd
--- /dev/null
+++ b/tool/python/singa/initializations.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+def get(identifier, **kwargs):
+
+  field = {}
+
+  if identifier == 'none':
+    return
+  
+  if identifier == 'uniform':
+    scale = kwargs['scale'] if 'scale' in kwargs else 0.05 
+    names = ['low', 'high']
+    values = [-scale, scale]
+
+  elif identifier == 'constant':
+    names = ['value']
+    values = [0]
+
+  elif identifier == 'gaussian':
+    names = ['mean', 'std']
+    values = [0, 0.01]
+
+  elif identifier == 'conv2d':
+    names = ['stride', 'pad']
+    values = [1, 0]
+
+  elif identifier == 'lrn2d':
+    names = ['alpha', 'beta', 'knorm']
+    values = [1, 0.75, 1]
+
+  for i in range(len(names)):
+    field[names[i]] = kwargs[names[i]] if names[i] in kwargs else values[i]
+ 
+  return field

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/layer.py b/tool/python/singa/layer.py
new file mode 100644
index 0000000..1744d50
--- /dev/null
+++ b/tool/python/singa/layer.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python
+from parameter import *
+from utils.utility import * 
+from utils.message import * 
+from google.protobuf import text_format
+
+class Layer(object):
+  def __init__(self, **kwargs):
+    self.layer = Message('Layer', **kwargs).proto
+    # required
+    if not 'name' in kwargs:
+      setval(self.layer, name=generateName('layer', 1))
+
+    # srclayers are set in Model.build()
+    self.is_datalayer = False 
+
+class Data(Layer):
+  def __init__(self, load, phase='train', checkpoint=None,
+               conf=None, **kwargs):
+    assert load != None, 'data type should be specified'
+    if load == 'kData':
+      super(Data, self).__init__(name=generateName('data'), user_type=load)
+    else:
+      self.layer_type = enumLayerType(load)
+      super(Data, self).__init__(name=generateName('data'), 
type=self.layer_type)
+
+    # include/exclude
+    setval(self.layer, include=enumPhase(phase))
+    #setval(self.layer, exclude=kTest if phase=='train' else kTrain)
+
+    if conf == None:
+      if load == 'kData':
+        setval(self.layer.Extensions[data_conf], **kwargs)
+      else:
+        setval(self.layer.store_conf, **kwargs)
+    else:
+      setval(self.layer, store_conf=conf.proto)
+    self.is_datalayer = True
+
+    self.checkpoint = checkpoint # checkpoint for training data
+
+
+class Convolution2D(Layer):
+  def __init__(self, nb_filter=0, kernel=0, stride=1, pad=0,
+               init=None, w_param=None, b_param=None,
+               activation=None, **kwargs):
+    '''
+    required
+      nb_filter = (int)  // the number of filters
+      kernel    = (int)  // the size of filter
+    optional
+      stride    = (int)  // the size of stride
+      pad       = (int)  // the size of padding
+    '''
+    assert nb_filter > 0 and kernel > 0, 'should be set as positive int'
+    super(Convolution2D, self).__init__(name=generateName('conv',1), 
type=kCConvolution)
+    fields = {'num_filters' : nb_filter,
+              'kernel' : kernel,
+              'stride' : stride,
+              'pad' : pad}
+    setval(self.layer.convolution_conf, **fields)
+
+    # parameter w  
+    if w_param == None:
+      self.init = 'gaussian' if init==None else init 
+      w_param = Parameter(init=self.init) 
+    setParamField(w_param.param, 'w', True, **kwargs)
+    setval(self.layer, param=w_param.param)
+
+    # parameter b  
+    if b_param == None:
+      self.init = 'constant' if init==None else init 
+      b_param = Parameter(init=self.init) # default: constant
+    setParamField(b_param.param, 'b', True, **kwargs)
+    setval(self.layer, param=b_param.param)
+
+    # following layers: e.g., activation, dropout, etc.
+    if activation:
+      self.mask = Activation(activation=activation).layer
+
+class MaxPooling2D(Layer):
+  def __init__(self, pool_size=None, stride=1, ignore_border=True, **kwargs): 
+    '''
+    required
+      pool_size     = (int|tuple) // the size for pooling
+    optional
+      stride        = (int)       // the size of striding
+      ignore_border = (bool)      // flag for padding
+      **kwargs                    // fields for Layer class
+    '''
+    assert pool_size != None, 'pool_size is required'
+    if type(pool_size) == int:
+      pool_size = (pool_size, pool_size)
+    assert type(pool_size) == tuple and  \
+           pool_size[0] == pool_size[1], 'pool size should be square in Singa'
+    super(MaxPooling2D, self).__init__(name=generateName('pool'), 
type=kCPooling, **kwargs)
+    fields = {'pool' : PoolingProto().MAX,
+              'kernel' : pool_size[0],
+              'stride' : stride,
+              'pad' : 0 if ignore_border else 1}
+    setval(self.layer.pooling_conf, **fields)
+
+class AvgPooling2D(Layer):
+  def __init__(self, pool_size=None, stride=1, ignore_border=True, **kwargs): 
+    '''
+    required
+      pool_size     = (int|tuple) // size for pooling
+    optional
+      stride        = (int)       // size of striding
+      ignore_border = (bool)      // flag for padding
+      **kwargs                    // fields for Layer class
+    '''
+    assert pool_size != None, 'pool_size is required'
+    if type(pool_size) == int:
+      pool_size = (pool_size, pool_size)
+    assert type(pool_size) == tuple and  \
+           pool_size[0] == pool_size[1], 'pool size should be square in Singa'
+    super(AvgPooling2D, self).__init__(name=generateName('pool'), 
type=kCPooling, **kwargs)
+    self.layer.pooling_conf.pool = PoolingProto().AVG 
+    fields = {'pool' : PoolingProto().AVG,
+              'kernel' : pool_size[0],
+              'stride' : stride,
+              'pad' : 0 if ignore_border else 1}
+    setval(self.layer.pooling_conf, **fields)
+
+class LRN2D(Layer):
+  def __init__(self, size=0, **kwargs):
+    super(LRN2D, self).__init__(name=generateName('norm'), type=kLRN)
+    # required
+    assert size != 0, 'local size should be set'
+    self.layer.lrn_conf.local_size = size 
+    init_value = initializations.get('lrn2d', **kwargs)
+    setval(self.layer.lrn_conf, **init_value)
+
+
+class Activation(Layer):
+  def __init__(self, activation='stanh', topk=1):
+    self.name = activation 
+    if activation == 'tanh': activation = 'stanh' # <-- better way to set?
+    self.layer_type = enumLayerType(activation)  
+    super(Activation, self).__init__(name=generateName(self.name), 
type=self.layer_type)
+    if activation == 'softmaxloss':
+      self.layer.softmaxloss_conf.topk = topk
+
+class Dropout(Layer): 
+  def __init__(self, ratio=0.5):
+    self.name = 'dropout'
+    self.layer_type = kDropout
+    super(Dropout, self).__init__(name=generateName(self.name), 
type=self.layer_type)
+    self.layer.dropout_conf.dropout_ratio = ratio
+
+
+class RGB(Layer):
+  def __init__(self, meanfile=None, **kwargs):
+    assert meanfile != None, 'meanfile should be specified'
+    self.name = 'rgb'
+    self.layer_type = kRGBImage
+    super(RGB, self).__init__(name=generateName(self.name), 
type=self.layer_type)
+    self.layer.rgbimage_conf.meanfile = meanfile
+
+class Dense(Layer):
+  def __init__(self, output_dim=0, activation=None, 
+               init=None, w_param=None, b_param=None, input_dim=None,
+               **kwargs):
+    '''
+    required
+      output_dim = (int)
+    optional
+      activation = (string)
+      init       = (string) // 'unirom', 'gaussian', 'constant'
+      **kwargs
+        w_lr = (float) // learning rate for w
+        w_wd = (float) // weight decay for w
+        b_lr = (float) // learning rate for b
+        b_wd = (float) // weight decay for b
+    '''
+    # required
+    assert output_dim > 0, 'output_dim should be set'
+    super(Dense, self).__init__(type=kInnerProduct, **kwargs)
+    self.layer.innerproduct_conf.num_output = output_dim
+    if 'transpose' in kwargs:
+      self.layer.innerproduct_conf.transpose = kwargs['transpose']
+    
+    # parameter w (default: gaussian)  
+    if w_param == None:
+      self.init = 'gaussian' if init==None else init 
+      w_param = Parameter(init=self.init) 
+    setParamField(w_param.param, 'w', False, **kwargs)
+    setval(self.layer, param=w_param.param)
+
+    # parameter b (default: constant) 
+    if b_param == None:
+      self.init = 'constant' if init==None else init 
+      b_param = Parameter(init=self.init)
+    setParamField(b_param.param, 'b', False, **kwargs)
+    setval(self.layer, param=b_param.param)
+
+    # following layers: e.g., activation, dropout, etc.
+    if activation:
+      self.mask = Activation(activation=activation).layer
+
+
+''' Class to deal with multiple layers
+'''
+class Autoencoder(object):
+  def __init__(self, hid_dim=None, out_dim=0, activation=None, 
+               param_share=True, **kwargs):
+    # required
+    assert out_dim >  0, 'out_dim should be set'
+    self.out_dim = out_dim
+    assert hid_dim != None, 'hid_dim should be set'
+    self.hid_dim = [hid_dim] if type(hid_dim)==int else hid_dim 
+
+    self.layer_type = 'AutoEncoder' 
+    self.activation = activation
+    self.param_share = param_share
+
+class RBM(Layer):
+  def __init__(self, out_dim=None, w_param=None, b_param=None, sampling=None, 
**kwargs):
+    '''
+    Generate layers (like MLP) according to the number of elements in out_dim, 
and
+      on top of it, two layers RBMVis and RBMHid with bidirectional connection
+
+    required
+      out_dim  = (int) or (int list) // the number of hidden nodes
+    optional
+      sampling = (string)
+    '''
+    assert out_dim >  0, 'out_dim should be set'
+    self.out_dim = [out_dim] if type(out_dim)==int else out_dim 
+
+    self.name = kwargs['name'] if 'name' in kwargs else 'RBMVis' 
+    self.layer_type = kwargs['type'] if 'type' in kwargs else kRBMVis
+    super(RBM, self).__init__(name=generateName(self.name, withnumber=False), 
type=self.layer_type)
+    setval(self.layer.rbm_conf, hdim=self.out_dim[-1])
+    if self.layer_type == kRBMHid and sampling != None: 
+      if sampling == 'gaussian':
+        setval(self.layer.rbm_conf, gaussian=True)
+
+    # parameter w
+    if w_param == None:
+      w_param = Parameter(init='gaussian', **kwargs)
+      setParamField(w_param.param, 'w', withnumber=False, 
level=len(self.out_dim), **kwargs)
+    else:
+      if self.layer_type == kRBMHid:
+        del kwargs['name']
+      else:
+        setParamField(w_param.param, 'w', withnumber=False, 
level=len(self.out_dim), **kwargs)
+    setval(self.layer, param=w_param.param)
+
+    # parameter b
+    if b_param == None:
+      b_param = Parameter(init='constant', **kwargs)
+      setParamField(b_param.param, 'b', withnumber=False, 
level=len(self.out_dim), **kwargs)
+    else:
+      if self.layer_type == kRBMHid:
+        pass
+      else:
+        setParamField(b_param.param, 'b', withnumber=False, 
level=len(self.out_dim), **kwargs)
+    setval(self.layer, param=b_param.param)
+
+    if self.layer_type == kRBMVis: 
+      wname = w_param.param.name
+      parw = Parameter(name=wname+"_", init='none', share_from=wname)
+      bname = b_param.param.name
+      parb = Parameter(name=bname+"2", wd=0, init='constant')
+      self.bidirect = RBM(self.out_dim, name='RBMHid', type=kRBMHid,
+                      w_param=parw, b_param=parb, sampling=sampling).layer
+
+ 
+class Embedding(Layer):
+  def __init__(self, in_dim, out_dim, w_param=None, **kwargs):
+    super(Embedding, self).__init__(name=generateName('embedding',1), 
user_type='kEmbedding')
+    fields = { 'vocab_size': in_dim,
+               'word_dim': out_dim }
+    setval(self.layer.Extensions[embedding_conf], **fields)
+    if w_param == None:
+      w_param = Parameter(name=generateName('w'), init=init) # default: uniform
+    else:
+      setParamField(w_param.param, 'w', True, **kwargs)
+    setval(self.layer, param=w_param.param)
+    
+class RNNLM(Layer):
+  def __init__(self, dim, w_param=None, **kwargs):
+    super(RNNLM, self).__init__(name=generateName('hidden',1), 
user_type='kHidden')
+    if w_param == None:
+      w_param = Parameter(name=generateName('w'), init=init) # default: uniform
+    else:
+      setParamField(w_param.param, 'w', True, **kwargs)
+    setval(self.layer, param=w_param.param)
+
+class UserLossRNNLM(Layer):
+  def __init__(self, **kwargs):
+    super(UserLossRNNLM, self).__init__(name=generateName('loss',1), 
user_type='kLoss')
+    self.layer.Extensions[loss_conf].nclass = kwargs['nclass'] 
+    self.layer.Extensions[loss_conf].vocab_size = kwargs['vocab_size'] 
+    setval(self.layer, param=Parameter(name=generateName('w'), init='uniform', 
scale=0.3).param)
+    setval(self.layer, param=Parameter(name=generateName('w',1), 
init='uniform', scale=0.3).param)
+
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/model.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/model.py b/tool/python/singa/model.py
new file mode 100644
index 0000000..1aa62b0
--- /dev/null
+++ b/tool/python/singa/model.py
@@ -0,0 +1,455 @@
+#!/usr/bin/env python
+import sys, re, subprocess
+from layer import *
+from utils.utility import * 
+from utils.message import * 
+from google.protobuf import text_format
+
+class Model(object):
+
+  def __init__(self, name='my model', argv=[], label=False):
+    '''
+    optional
+      name  = (string) // name of model/job
+      label = (bool)   // exist label layer (depreciated)
+    '''
+    self.jobconf = Message('Job', name=name).proto 
+    self.layers = []
+    self.label = label
+    self.argv = argv
+    self.result = None
+    self.last_checkpoint_path = None
+    
+  def exist_datalayer(self, phase):
+    for ly in self.layers:
+      if enumPhase(phase) in ly.layer.include:
+        return True
+    return False
+
+  def compile(self, optimizer=None, cluster=None, loss=None, topk=1, **kwargs):
+    '''
+    required
+      optimizer = (Updater) // updater settings, e.g., SGD
+      cluster   = (Cluster) // cluster settings
+    optional
+      loss      = (string)  // name of loss function type
+      topk      = (int)     // the number of results considered to compute 
accuracy
+    '''
+    assert optimizer != None, 'optimizer (Updater component) should be set'
+    assert cluster != None, 'cluster (Cluster component) should be set'  
+    setval(self.jobconf, updater=optimizer.proto)
+    setval(self.jobconf, cluster=cluster.proto)
+
+    # take care of loss function layer
+    if loss == None:
+      print 'loss layer is not set'
+    else:
+      if hasattr(self.layers[-1], 'mask'):
+        ly = self.layers[-1].mask
+      else:
+        ly = self.layers[-1].layer
+
+      # take care of the last layer
+      if ly.type == enumLayerType('softmax'):
+        # revise the last layer
+        if loss == 'categorical_crossentropy':
+          setval(ly, type=enumLayerType('softmaxloss'))
+          setval(ly.softmaxloss_conf, topk=topk) 
+        elif loss == 'mean_squared_error':
+          setval(ly, type=enumLayerType('euclideanloss'))
+      else:
+        # add new layer
+        if loss == 'categorical_crossentropy':
+          self.add(Activation('softmaxloss', topk=topk))
+        elif loss == 'mean_squared_error':
+          self.add(Activation('euclideanloss'))
+        elif loss == 'user_loss_rnnlm': # user-defined loss layer for rnnlm
+          self.add(UserLossRNNLM(nclass=kwargs['nclass'], 
vocab_size=kwargs['in_dim']))
+
+  def build(self):
+    '''
+    construct neuralnet proto
+    '''
+    net = NetProto() 
+    slyname = self.layers[0].layer.name
+    for i in range(len(self.layers)):
+      ly = net.layer.add()
+      ly.CopyFrom(self.layers[i].layer)
+      lastly = ly
+      if self.layers[i].is_datalayer == True:
+        continue
+      getattr(ly, 'srclayers').append(slyname)
+      slyname = ly.name
+      if hasattr(self.layers[i], 'mask'):
+        mly = net.layer.add()
+        mly.CopyFrom(self.layers[i].mask)
+        getattr(mly, 'srclayers').append(slyname)
+        slyname = mly.name
+        lastly = mly
+      if hasattr(self.layers[i], 'bidirect'):
+        bly = net.layer.add()
+        bly.CopyFrom(self.layers[i].bidirect)
+        getattr(bly, 'srclayers').append(slyname)
+
+    # deal with label layer (depreciated)
+    if self.label == True:
+      label_layer = Layer(name='label', type=kLabel)      
+      ly = net.layer.add()
+      ly.CopyFrom(label_layer.layer)
+      getattr(ly, 'srclayers').append(self.layers[0].layer.name)
+      getattr(lastly, 'srclayers').append(label_layer.layer.name)
+    else:
+      if lastly.name == 'RBMVis':
+        getattr(lastly, 'srclayers').append(bly.name)
+      else:
+        getattr(lastly, 'srclayers').append(self.layers[0].layer.name)
+
+    setval(self.jobconf, neuralnet=net)
+
+  def fit(self, data=None, alg='bp', nb_epoch=0,
+          with_test=False, execpath='', **fields):
+    '''
+    required
+      data        = (Data)   // Data class object for training data
+      alg         = (string) // algorithm, e.g., 'bp', 'cd'
+      nb_epoch    = (int)    // the number of training steps
+    optional
+      with_test   = (bool)   // flag if singa runs for test data
+      execpath    = (string) // path to user own singa (executable file)
+      **fields (KEY=VALUE)
+        batch_size       = (int)    // batch size for training data
+        train_steps      = (int)    // the number of steps for training, i.e., 
epoch
+        disp_freq        = (int)    // frequency to display training info
+        disp_after       = (int)    // display after this number 
+        validate_data    = (Data)   // validation data, specified in 
load_data()
+        validate_freq    = (int)    // frequency of validation
+        validate_steps   = (int)    // total number of steps for validation
+        validate_after   = (int)    // start validation after this number
+        checkpoint_path  = (string) // path to checkpoint file
+        checkpoint_freq  = (int)    // frequency for checkpoint
+        checkpoint_after = (int)    // start checkpointing after this number
+    '''
+    assert data != None, 'Training data shold be set'
+    assert nb_epoch > 0, 'Training steps shold be set'
+
+    if 'batch_size' in fields:  # if new value is set, replace the batch_size
+      setval(data.layer.store_conf, batchsize=fields['batch_size'])
+
+    # insert layer for training
+    if self.exist_datalayer('train') == False: 
+      self.layers.insert(0, data)
+    setval(self.jobconf, train_steps=nb_epoch)
+    setval(self.jobconf, disp_freq=nb_epoch/10)
+    if 'disp_freq' in fields:
+      setval(self.jobconf, disp_freq=fields['disp_freq'])
+
+    if 'validate_data' in fields:
+      self.layers.insert(1, fields['validate_data'])
+      setval(self.jobconf, validate_freq=nb_epoch/10)
+
+    setval(self.jobconf, **fields)
+
+    # loading checkpoint if it is set
+    if data.checkpoint != None:
+      setval(self.jobconf, checkpoint_path=data.checkpoint)
+
+    # save model parameter (i.e., checkpoint_path)
+    setval(self.jobconf, checkpoint_freq=nb_epoch)
+    self.last_checkpoint_path = '{0}/step{1}-worker0'.format(
+                     self.jobconf.cluster.workspace, nb_epoch) 
+    
+    # set Train_one_batch component, using backprogapation at default
+    setval(self.jobconf, 
train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
+
+    # start to run singa for training
+    if with_test == False: 
+      self.build()  # construct Nneuralnet Component
+      #self.display()
+      return SingaRun(jobproto=self.jobconf, argv=self.argv, execpath=execpath)
+    else:
+      # run singa in evaluate() with test data
+      pass
+
+
+  def evaluate(self, data=None, alg='bp',
+               checkpoint_path=None, execpath='', **fields):
+    '''
+    required
+      data = (Data)   // Data class object for testing data
+    optional
+      checkpoint_path = (list)   // checkpoint path is necessary only for 
testing
+      execpaths       = (string) // path to user's own executable 
+      **fields (KEY=VALUE)
+        batch_size   = (int)  // batch size for testing data
+        test_freq    = (int)  // frequency of testing
+        test_steps   = (int)  // total number of steps for testing 
+        test_after   = (int)  // start testing after this number of steps 
+    '''
+    assert data != None, 'Testing data should be set'
+    is_testonly = False
+
+    if 'batch_size' in fields:  # if new value is set, replace the batch_size
+      setval(data.layer.store_conf, batchsize=fields['batch_size'])
+
+    # insert layer for testing
+    if self.exist_datalayer('test') == False: 
+      self.layers.insert(0, data)
+
+    # loading checkpoint if singa runs only for testing
+    if self.exist_datalayer('train') == False: 
+      is_testonly = True
+      if checkpoint_path == None:
+        print 'checkpoint_path has not been specified'
+      else:
+        setval(self.jobconf, checkpoint_path=checkpoint_path)
+
+    steps = fields['test_steps'] if 'test_steps' in fields else 10
+    setval(self.jobconf, test_steps=steps)
+    setval(self.jobconf, **fields)
+    
+    # set Train_one_batch component, using backprogapation at default
+    setval(self.jobconf, 
train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
+
+    self.build()  # construct Nneuralnet Component
+
+    #--- generate job.conf file for debug purpose 
+    #filename = 'job.conf'
+    #with open(filename, 'w') as f:
+    #  f.write(text_format.MessageToString(self.jobconf.cluster))
+    #self.display()
+
+    #--- run singa --- 
+    return SingaRun(jobproto=self.jobconf, argv=self.argv, execpath=execpath, 
testmode=is_testonly)
+    #return SingaRun_script(filename=filename, execpath=execpath)
+    
+
+  def display(self):
+    print text_format.MessageToString(self.jobconf)
+
+
+class Energy(Model):
+  def __init__(self, name='my model', argv=[], label=False):
+    super(Energy, self).__init__(name=name, argv=argv, label=label)
+
+  def add(self, layer):
+    if hasattr(layer, 'layer_type'):
+      if layer.layer_type == kRBMVis:
+        dim = 0 
+        for i in range(1, len(layer.out_dim)):
+          parw = Parameter(name='w', init='none', level=i)
+          parb = Parameter(name='b', init='none', level=i)
+          dim = layer.out_dim[i-1]
+          self.layers.append(Dense(dim, w_param=parw, b_param=parb, 
activation='sigmoid'))
+        self.layers.append(layer)
+
+
+class Sequential(Model):
+  def __init__(self, name='my model', argv=[], label=False):
+    super(Sequential, self).__init__(name=name, argv=argv, label=label)
+
+  def add(self, layer):
+    if hasattr(layer, 'layer_type'):
+      if layer.layer_type == 'AutoEncoder':
+        if layer.param_share == True:
+          dim = 0 
+          # Encoding
+          for i in range(1, len(layer.hid_dim)+1):
+            parw = Parameter(name='w', init='none', level=i)
+            parb = Parameter(name='b', init='none', level=i)
+            dim = layer.hid_dim[i-1]
+            if i == len(layer.hid_dim): activation = None
+            else: activation = layer.activation
+            self.layers.append(Dense(dim, w_param=parw, b_param=parb, 
activation=activation))
+          # Decoding
+          for i in range(len(layer.hid_dim), 0, -1):
+            parw = Parameter(name=generateName('w',2), init='none')
+            parb = Parameter(name=generateName('b',2), init='none')
+            setval(parw.param, share_from='w'+str(i))
+            setval(parb.param, name='b'+str(i))
+            if i == 1: dim = layer.out_dim
+            else: dim = layer.hid_dim[i-2]
+            self.layers.append(Dense(dim, w_param=parw, b_param=parb, 
activation=layer.activation, transpose=True))
+        else:
+          # MLP
+          for i in range(1, len(layer.hid_dim)+2):
+            parw = Parameter(name='w', init='none', level=i)
+            parb = Parameter(name='b', init='none', level=i)
+            if i == len(layer.hid_dim)+1: dim = layer.out_dim
+            else: dim = layer.hid_dim[i-1]
+            self.layers.append(Dense(dim, w_param=parw, b_param=parb, 
activation=layer.activation))
+    else:
+      self.layers.append(layer)
+
+
+class Store(object):
+  def __init__(self, **kwargs):
+    '''
+    **kwargs
+        path       = (string)  // path to dataset
+        backend    = (string)  // 
+        batch_size = (int)     // batch size of dataset
+        shape      = (int)     // 
+
+    '''
+    self.proto = Message('Store', **kwargs).proto
+
+class Algorithm(object):
+  def __init__(self, type=enumAlgType('bp'), **kwargs):
+    alg = Message('Alg', alg=type, **kwargs).proto
+    if type == enumAlgType('cd'):
+      setval(alg.cd_conf, **kwargs)
+    self.proto = alg
+
+class Updater(object):
+  def __init__(self, upd_type, lr, lr_type,
+               decay, momentum,
+               step, step_lr, **fields):
+
+    upd = Message('Updater', type=upd_type, **fields).proto
+    setval(upd.learning_rate, base_lr=lr) 
+    if decay > 0:
+      setval(upd, weight_decay=decay) 
+    if momentum > 0:
+      setval(upd, momentum=momentum) 
+
+    if lr_type == None:
+      setval(upd.learning_rate, type=kFixed) 
+    elif lr_type == 'step':
+      cp = Message('Step', change_freq=60, gamma=0.997)
+      setval(upd.learning_rate, type=kStep, step_conf=cp.proto) 
+    elif lr_type == 'fixed':
+      cp = Message('FixedStep', step=step, step_lr=step_lr)
+      setval(upd.learning_rate, type=kFixedStep, fixedstep_conf=cp.proto) 
+    elif lr_type == 'linear':
+      cp = Message('Linear', change_freq=10, final_lr=0.1)
+      setval(upd.learning_rate, type=kLinear, linear_conf=cp.proto) 
+    self.proto = upd
+
+class SGD(Updater):
+  def __init__(self, lr=0.01, lr_type=None,
+               decay=0, momentum=0,
+               step=(0), step_lr=(0.01), **fields):
+    '''
+    required
+       lr      = (float)  // base learning rate
+    optional
+       lr_type = (string) // type of learning rate, 'Fixed' at default
+       decay    = (float) // weight decay
+       momentum = (float) // momentum
+       **fields (KEY=VALUE)
+
+    '''
+    assert lr
+    super(SGD, self).__init__(upd_type=kSGD,
+               lr=lr, lr_type=lr_type,
+               decay=decay, momentum=momentum,
+               step=step, step_lr=step_lr, **fields)
+
+class AdaGrad(Updater):
+  def __init__(self, lr=0.01, lr_type=None,
+               decay=0, momentum=0,
+               step=(0), step_lr=(0.01), **fields):
+    '''
+    required
+       lr      = (float)  // base learning rate
+    optional
+       lr_type = (string) // type of learning rate, 'Fixed' at default
+       decay    = (float) // weight decay
+       momentum = (float) // momentum
+       **fields (KEY=VALUE)
+
+    '''
+    assert lr
+    super(AdaGrad, self).__init__(upd_type=kAdaGrad,
+               lr=lr, lr_type=lr_type,
+               decay=decay, momentum=momentum,
+               step=step, step_lr=step_lr, **fields)
+
+
+class Cluster(object):
+  def __init__(self, workspace=None,
+               nworker_groups=1, nserver_groups=1,
+               nworkers_per_group=1, nservers_per_group=1,
+               nworkers_per_procs=1, nservers_per_procs=1,
+               **fields):
+    '''
+    required
+      workspace = (string) // workspace path
+    optional
+      nworker_groups     = (int)
+      nserver_groups     = (int)
+      nworkers_per_group = (int)
+      nservers_per_group = (int)
+      nworkers_per_procs = (int)
+      nservers_per_procs = (int)
+      **fields
+        server_worker_separate = (bool)
+    '''
+    assert workspace != None, 'need to set workspace'
+    self.proto = Message('Cluster', workspace=workspace).proto
+    # optional
+    self.proto.nworker_groups = nworker_groups 
+    self.proto.nserver_groups = nserver_groups 
+    self.proto.nworkers_per_group = nworkers_per_group 
+    self.proto.nservers_per_group = nservers_per_group 
+    self.proto.nworkers_per_procs = nworkers_per_procs 
+    self.proto.nservers_per_procs = nservers_per_procs 
+    # other fields
+    setval(self.proto, **fields)
+
+
+
+def StoreResults(lines):
+
+  resultDic = {} 
+  for line in lines:
+    line = re.findall(r'[\w|*.*]+', line)
+    if 'Train' in line:
+      step = line[line.index('step')+1]
+      if 'accuracy' in line:
+        resultDic.setdefault(step,{})['acc'] = line[line.index('accuracy')+1] 
+      if 'loss' in line:
+        resultDic.setdefault(step,{})['loss'] = line[line.index('loss')+1] 
+      if 'ppl' in line:
+        resultDic.setdefault(step,{})['ppl'] = line[line.index('ppl')+1] 
+      if 'Squared' in line:
+        resultDic.setdefault(step,{})['se'] = line[line.index('Squared')+2] 
+  return resultDic
+
+def SingaRun(jobproto='', argv=[], execpath='', testmode=False):
+
+  import singa.driver as driver
+  d = driver.Driver()
+  d.InitLog(argv[0]) 
+  d.Init(argv)
+  if testmode == True:
+    d.Test(jobproto.SerializeToString())
+  else:
+    d.Train(False, jobproto.SerializeToString())
+
+  logfile = '/tmp/singa-log/{0}.ERROR'.format(argv[0].split('/')[-1])
+  fin = open(logfile, 'r')
+  result = StoreResults(fin.readlines())
+ 
+  return result
+
+def SingaRun_script(filename='', execpath=''):
+  SINGAROOT = '../../../'
+  conf = 'examples/' + filename
+  if execpath=='':
+    cmd = SINGAROOT+'bin/singa-run.sh ' \
+        + '-conf %s ' % conf 
+  else:
+    cmd = SINGAROOT+'bin/singa-run.sh ' \
+        + '-conf %s ' % conf \
+        + '-exec %s ' % execpath 
+
+  procs = subprocess.Popen(cmd.strip().split(' '), stdout = subprocess.PIPE, 
stderr = subprocess.STDOUT)
+
+  resultDic = {} 
+  outputlines = iter(procs.stdout.readline, '')
+  resultDic = StoreResults(outputlines)
+
+  #TODO better format to store the result??
+  return resultDic
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/parameter.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/parameter.py b/tool/python/singa/parameter.py
new file mode 100644
index 0000000..3b33243
--- /dev/null
+++ b/tool/python/singa/parameter.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+import initializations
+from utils.utility import * 
+from utils.message import * 
+from google.protobuf import text_format
+
+class Parameter(object):
+
+  def __init__(self, **kwargs):
+    '''
+    optional
+      **kwargs
+        name  = (string) // parameter name
+        lr    = (float)  // learning rate
+        wd    = (float)  // weight decay
+        init  = (string) // initialization type 
{'constant','uniform','gaussian'} 
+        value = (int)    // value for 'constant'
+        scale = (float)  // [low, high] for 'uniform', low=-scale, high=scale
+        low   = (float)  // low value   for 'uniform'
+        high  = (float)  // high value  for 'uniform' 
+        mean  = (float)  // mean for 'gaussian'
+        std   = (float)  // std  for 'gaussian'
+    '''
+    fields = {'lr_scale' : kwargs['lr'] if 'lr' in kwargs else 1,
+              'wd_scale' : kwargs['wd'] if 'wd' in kwargs else 1
+             }
+    self.param = Message('Param', **fields).proto
+
+    if not 'name' in kwargs:
+      setval(self.param, name=generateName('param', 1))
+    else:
+      pname = kwargs['name']
+      # parameter name for RBM
+      if 'level' in kwargs:
+        pname += str(kwargs['level'])
+        if pname[0] == 'b':
+          pname += '2'
+      setval(self.param, name=pname)
+
+    if 'share_from' in kwargs:
+      setval(self.param, share_from=kwargs['share_from'])
+
+    if 'init' in kwargs:
+      init_values = initializations.get(kwargs['init'], **kwargs)
+
+      if not kwargs['init'] == 'none':
+        pg = Message('ParamGen', type=enumInitMethod(kwargs['init']), 
**init_values)
+        del kwargs['init']
+        setval(self.param, init=pg.proto)
+    else: # default: uniform
+      pg = Message('ParamGen', type=enumInitMethod('uniform'))
+      setval(self.param, init=pg.proto)
+
+  def update(self, **fields):
+    setval(self.param, **fields) 
+    setval(self.param.init, **fields) 
+
+
+def setParamField(param, pname, changename=False, withnumber=True, **kwargs):
+  ''' param      = (ParamProto)
+      pname      = (string)     // 'w' for wiehgt, or 'b' for bias
+      changename = (bool)       // update parameter name if True
+      withnumber = (bool)       // add layer number if True
+      **kwargs
+  '''
+  assert pname == 'w' or pname == 'b', 'pname should be w or b'
+
+  lr = param.lr_scale
+  wd = param.wd_scale
+  initkv = {}
+
+  if pname == 'w':
+    if 'w_lr' in kwargs:
+      lr = kwargs['w_lr'] 
+      del kwargs['w_lr']
+    if 'w_wd' in kwargs:
+      wd = kwargs['w_wd']
+      del kwargs['w_wd']
+    for k, v in kwargs.items():
+      if k.startswith('w_'): 
+        initkv[k[2:]] = v 
+
+  elif pname == 'b':
+    if 'b_lr' in kwargs:
+      lr = kwargs['b_lr']
+      del kwargs['b_lr']
+    if 'b_wd' in kwargs:
+      wd = kwargs['b_wd']
+      del kwargs['b_wd']
+    for k, v in kwargs.items():
+      if k.startswith('b_'): 
+        initkv[k[2:]] = v 
+
+  field = {'lr_scale' : lr, 'wd_scale' : wd}
+
+  # Set/update parameter fields
+  if param.name.startswith('param') or changename==True:
+    if 'level' in kwargs:  # parameter name for RBM
+      pname += str(kwargs['level'])
+    setval(param, name=generateName(pname, withnumber=withnumber), **field)
+  else:
+    setval(param, **field)
+
+  # Set/update parameter init fields
+  setval(param.init, **initkv)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/utils/__init__.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/utils/__init__.py 
b/tool/python/singa/utils/__init__.py
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/utils/message.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/utils/message.py 
b/tool/python/singa/utils/message.py
new file mode 100644
index 0000000..251a377
--- /dev/null
+++ b/tool/python/singa/utils/message.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python 
+import sys, os 
+from utility import * 
+sys.path.append(os.path.join(os.path.dirname(__file__),'../../pb2')) 
+
+'''
+ - This script reads proto files in ../../pb2, generated by protocol buffer 
compiler
+ - Message class creates an object for proto and sets the fields specified by 
kwargs
+ - enumXXX function returns enum values of name XXX 
+'''
+
+module_list=[]
+
+# import all modules in dir singa_root/too/pb2, except common, singa and 
__init__
+for f in os.listdir(os.path.join(os.path.dirname(__file__),'../../pb2')):
+  if (f.endswith(".pyc")):
+    continue
+  if(f == "__init__.py" or f == "common_pb2.py" or f == "singa_pb2.py" ):
+    continue
+  module_name = f.split('.')[0]
+  module=__import__(module_name)  
+  module_list.append(module)
+  for func_name in dir(module):
+    if not func_name.startswith("__"):
+      globals()[func_name] = getattr(module,func_name)
+
+class Message(object):
+  def __init__(self,protoname,**kwargs):
+    for module in module_list:
+      if hasattr(module,protoname+"Proto"):
+        class_ = getattr(module,protoname+"Proto")
+        self.proto = class_()
+        return setval(self.proto,**kwargs)
+    raise Exception('invalid protoname')
+
+enumDict_=dict()
+
+#get all enum type list in modules
+for module in module_list:
+  for enumtype in module.DESCRIPTOR.enum_types_by_name:
+    tempDict=enumDict_[enumtype]=dict()
+    for name in getattr(module,enumtype).DESCRIPTOR.values_by_name: 
+      tempDict[name[1:].lower()]=getattr(module,name)
+
+def make_function(enumtype):
+  def _function(key):
+    return enumDict_[enumtype][key]
+  return _function
+
+current_module = sys.modules[__name__]
+
+#def all the enumtypes
+for module in module_list:
+  for enumtype in module.DESCRIPTOR.enum_types_by_name:
+    setattr(current_module,"enum"+enumtype,make_function(enumtype))
+

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/7d43e273/tool/python/singa/utils/utility.py
----------------------------------------------------------------------
diff --git a/tool/python/singa/utils/utility.py 
b/tool/python/singa/utils/utility.py
new file mode 100644
index 0000000..93d2f7f
--- /dev/null
+++ b/tool/python/singa/utils/utility.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+layerid = 0 
+paramid = 0 
+
+def generateName(label, op=0, withnumber=True):
+  global layerid, paramid
+  num = layerid
+  if label == 'layer':
+    if op ==1: layerid += 1
+    num = layerid
+  elif label == 'param':
+    if op ==1: paramid += 1
+    num = paramid
+  else:
+    if op ==1: layerid += 1
+    num = layerid
+    if op ==2:
+      num = layerid+1
+
+  if withnumber == False:
+    return '{0}'.format(label)
+
+  return '{0}{1}'.format(label, num)
+
+
+def setval(proto, **kwargs):
+  for k,v in kwargs.items():
+    #print 'kv: ', k, ', ', v
+    if hasattr(proto, k):
+      flabel = proto.DESCRIPTOR.fields_by_name[k].label
+      ftype  = proto.DESCRIPTOR.fields_by_name[k].type
+
+      fattr  = getattr(proto, k) 
+      if flabel == 3: # repeated field
+        if ftype == 11: # message type 
+          fattr = fattr.add()
+          fattr.MergeFrom(v)
+        else:
+          if type(v) == list or type(v) == tuple:
+            for i in range(len(v)):
+              fattr.append(v[i])
+          else:
+            fattr.append(v)
+      else:
+        if ftype == 11: # message type 
+          fattr = getattr(proto,k)
+          fattr.MergeFrom(v)
+        else:
+          setattr(proto, k, v)

Reply via email to