SINGA-21 Code review

review cluster.proto, model.proto
  -- reformat


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/2586e147
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/2586e147
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/2586e147

Branch: refs/heads/master
Commit: 2586e1474822848eab6c3996d687e7d5bffeb16b
Parents: b2d7332
Author: wang sheng <[email protected]>
Authored: Mon Jun 22 17:14:02 2015 +0800
Committer: wang wei <[email protected]>
Committed: Wed Jun 24 17:02:52 2015 +0800

----------------------------------------------------------------------
 src/proto/cluster.proto |  42 ++++++------
 src/proto/model.proto   | 158 ++++++++++++++++++++++---------------------
 2 files changed, 103 insertions(+), 97 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2586e147/src/proto/cluster.proto
----------------------------------------------------------------------
diff --git a/src/proto/cluster.proto b/src/proto/cluster.proto
index 52cfd51..c2f941f 100644
--- a/src/proto/cluster.proto
+++ b/src/proto/cluster.proto
@@ -1,46 +1,46 @@
 package singa;
 
-message ClusterProto{
-  optional int32 nworker_groups=1 [default=1];
-  optional int32 nserver_groups=2 [default=1];
-  optional int32 nworkers_per_group=3 [default=1];
-  optional int32 nservers_per_group=4 [default=1];
-  optional int32 nworkers_per_procs=5 [default=1];
-  optional int32 nservers_per_procs=6 [default=1];
+message ClusterProto {
+  optional int32 nworker_groups = 1;
+  optional int32 nserver_groups = 2;
+  optional int32 nworkers_per_group = 3 [default = 1];
+  optional int32 nservers_per_group = 4 [default = 1];
+  optional int32 nworkers_per_procs = 5 [default = 1];
+  optional int32 nservers_per_procs = 6 [default = 1];
 
   // Used in standalone mode, one ip or hostname per line
   // For YARN or Mesos version, the processes are allocted dynamically,
   // hence no need to specify the hosts statically
-  optional string hostfile=10 [default=""];
+  optional string hostfile = 10 [default=""];
 
   // servers and workers in different processes?
-  optional bool server_worker_separate=11 [default=false];
+  optional bool server_worker_separate = 11 [default = false];
 
   // port number is used by ZeroMQ
-  optional int32 start_port=13 [default=6723];
+  optional int32 start_port = 13 [default = 6723];
   // local workspace, train/val/test shards, checkpoint files
-  required string workspace=14;
+  required string workspace = 14;
   // relative path to workspace. if not set, use the default dir of glog
-  optional string log_dir=15 [default="/tmp"];
+  optional string log_dir = 15 [default="/tmp"];
   // ip/hostname : port [, ip/hostname : port]
-  optional string zookeeper_host=16 [default="localhost:2181"];
+  optional string zookeeper_host = 16 [default = "localhost:2181"];
   // message size limit, default 1MB
-  // optional int32 largest_message=20 [default=1048576];
-  // optional float bandwidth=21 [default=100];//MB/s
+  // optional int32 largest_message = 20 [default = 1048576];
+  // optional float bandwidth = 21 [default = 100];  // MB/s
 
        //repeated ServerTopology server_group = 20;
 
-  optional int32 stub_timeout=30 [default=5000];
-  optional int32 worker_timeout=31 [default=5000];
-  optional int32 server_timeout=32 [default=5000];
+  optional int32 stub_timeout = 30 [default = 5000];
+  optional int32 worker_timeout = 31 [default = 5000];
+  optional int32 server_timeout = 32 [default = 5000];
 
   // conduct updates at server side; otherwise do it at worker side
-  optional bool server_update=40 [default=true];
+  optional bool server_update = 40 [default = true];
   // share memory space between worker groups in one procs
-  optional bool share_memory=41 [default=true];
+  optional bool share_memory = 41 [default = true];
 }
 
-message ServerTopology{
+message ServerTopology {
   // group id
        required int32 id = 1;
        optional int32 sync_interval = 2;

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/2586e147/src/proto/model.proto
----------------------------------------------------------------------
diff --git a/src/proto/model.proto b/src/proto/model.proto
index 8cb45a3..1e12087 100644
--- a/src/proto/model.proto
+++ b/src/proto/model.proto
@@ -1,14 +1,15 @@
 package singa;
 enum Phase {
   kTrain = 0;
-  kValidation=1;
+  kValidation = 1;
   kTest= 2;
   kPositive = 3;
   kNegative = 4;
 }
-enum ShareOption{
-  kValueOnly=0;
-  kWhole=1;
+
+enum ShareOption {
+  kValueOnly = 0;
+  kWhole = 1;
 };
 message ModelProto{
   required string name = 1;
@@ -29,9 +30,7 @@ message ModelProto{
   optional int32 checkpoint_after_steps = 15 [default = 0];
   // frequency of test
   optional int32 checkpoint_frequency = 16 [default = 0];
-  optional bool prefetch=18[default=true];
-
-
+  optional bool prefetch=18[default = true];
   // total num of steps for training
   required int32 train_steps = 20;
   // total num of steps for validation
@@ -44,7 +43,7 @@ message ModelProto{
   required UpdaterProto updater=31;
   // There are two basic algorithms for calculating gradients.
   // Different deep learning models use different algorithms.
-  enum GradCalcAlg{
+  enum GradCalcAlg {
     kBackPropagation = 1;
     kContrastiveDivergence = 2;
   }
@@ -54,9 +53,9 @@ message ModelProto{
   optional int32 warmup_steps=50 [default=0];
 }
 
-message NetProto{
-  repeated LayerProto layer=1;
-  optional PartitionType partition_type=3 [default=kNone];
+message NetProto {
+  repeated LayerProto layer = 1;
+  optional PartitionType partition_type = 3 [default = kNone];
 }
 
 message ParamProto {
@@ -67,15 +66,12 @@ message ParamProto {
   // in most situations, user do not need to config this,
   // the program will calculate it
   repeated int32 shape = 3;
-
   // split the parameter into multiple sub params for serialzation and
   // transferring (Google Protobuf has size limit)
-  optional int32 split_threshold=4 [default=5000000];
+  optional int32 split_threshold = 4 [default = 5000000];
   // partition dimension, -1 for no partition
-  optional int32 partition_dim=5 [default =-1];
-
-  optional int32 owner=6;
-
+  optional int32 partition_dim = 5 [default = -1];
+  optional int32 owner = 6;
   enum InitMethod {
     kConstant = 0;
     // sample gaussian with std and mean
@@ -107,15 +103,15 @@ message ParamProto {
   optional float mean = 11 [default = 0];
   optional float std = 12 [default = 1];
   // multiplied on the global learning rate.
-  optional float learning_rate_multiplier =13 [default=1];
+  optional float learning_rate_multiplier = 13 [default = 1];
   // multiplied on the global weight decay.
-  optional float weight_decay_multiplier =14 [default=1];
+  optional float weight_decay_multiplier = 14 [default = 1];
 }
 
-message BlobProtos{
-  repeated BlobProto blobs=1;
-  repeated int32 ids=2;
-  repeated string names=3;
+message BlobProtos {
+  repeated BlobProto blobs = 1;
+  repeated int32 ids = 2;
+  repeated string names = 3;
 }
 
 enum PartitionType{
@@ -139,17 +135,15 @@ message LayerProto {
   optional string datablob=7;
   // can be pos/neg neuron value for CD, neuron value/grad for BP
   //repeated DAryProto ary = 10;
-  repeated string share_ary =11;
+  repeated string share_ary = 11;
   // parameters, e.g., weight matrix or bias vector
   repeated ParamProto param = 12;
   // names of parameters shared from other layers
-  repeated string share_param=13;
-
+  repeated string share_param = 13;
   // All layers are included in the net structure for training phase by 
default.
   // Layers, e.g., computing performance metrics for test phase, can be 
excluded
   // by this field which defines in which phase this layer should be excluded.
   repeated Phase exclude = 20;
-
   // hyper-parameters for layers
   optional ConvolutionProto convolution_param = 21;
   optional ConcateProto concate_param = 31;
@@ -159,13 +153,13 @@ message LayerProto {
   optional LRNProto lrn_param = 25;
   optional MnistProto mnist_param= 26;
   optional PoolingProto pooling_param = 27;
-  repeated LayerProto sublayers=35;
-  optional SliceProto slice_param = 32;
-  optional SplitProto split_param = 33;
   optional ReLUProto relu_param = 28;
-  optional RGBImage rgbimage_param=34;
   optional SoftmaxLossProto softmaxloss_param = 29;
-  optional TanhProto tanh_param=30;
+  optional TanhProto tanh_param = 30;
+  optional SliceProto slice_param = 32;
+  optional SplitProto split_param = 33;
+  optional RGBImage rgbimage_param = 34;
+  repeated LayerProto sublayers = 35;
 }
 
 message RGBImage {
@@ -177,10 +171,11 @@ message RGBImage {
 message SplitProto{
   required int32 num_splits=1;
 }
+
 // scaled tan: A*tan(B*x)
-message TanhProto{
-  optional float outer_scale=1 [default=1.0];
-  optional float inner_scale=2 [default=1.0];
+message TanhProto {
+  optional float outer_scale = 1 [default = 1.0];
+  optional float inner_scale = 2 [default = 1.0];
 }
 
 // Message that stores parameters used by SoftmaxLossProto
@@ -188,18 +183,22 @@ message SoftmaxLossProto {
   // accuracy is not comptued by default, unless topk>0;
   // When computing accuracy, count as correct by comparing the true label to
   // the top k scoring classes.
-  optional int32 topk = 1 [default=1] ;
-  optional float scale=2 [default=1];
+  optional int32 topk = 1 [default = 1];
+  optional float scale= 2 [default = 1];
 }
+
 // Message that stores parameters used by ConvolutionLayer
 message ConvolutionProto {
   required uint32 num_filters = 1; // The number of outputs for the layer
   optional bool bias_term = 2 [default = true]; // whether to have bias terms
   // Pad, kernel size, and stride are all given as a single value for equal
   // dimensions in height and width or as Y, X pairs.
-  optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X)
-  optional uint32 stride = 4 [default = 1]; // The stride (equal in Y, X)
-  required uint32 kernel= 5; // The kernel height/width
+  // The padding size (equal in Y, X)
+  optional uint32 pad = 3 [default = 0];
+  // The stride (equal in Y, X)
+  optional uint32 stride = 4 [default = 1];
+  // The kernel height/width
+  required uint32 kernel= 5;
 }
 
 message ConcateProto{
@@ -215,28 +214,30 @@ message DataProto {
   // Specify the batch size.
   required uint32 batchsize = 4;
   // skip [0,random_skip] records
-  optional uint32 random_skip=5 [default=0];
+  optional uint32 random_skip = 5 [default = 0];
 }
 
 message MnistProto {
   // elastic distortion
-  optional int32 kernel=1 [default=0];
-  optional float sigma=2 [default=0];
-  optional float alpha=3 [default=0];
+  optional int32 kernel = 1 [default = 0];
+  optional float sigma = 2 [default = 0];
+  optional float alpha = 3 [default = 0];
   // rotation or horizontal shearing
-  optional float beta=4 [default=0];
+  optional float beta = 4 [default = 0];
   // scaling
-  optional float gamma=5 [default=0];
+  optional float gamma = 5 [default = 0];
   // scale to this size as input for deformation
-  optional int32 resize=6 [default=0] ;
-  optional int32 elastic_freq=7 [default=0];
-  optional float norm_a=8 [default=1];
-  optional float norm_b=9 [default=0];
+  optional int32 resize = 6 [default = 0] ;
+  optional int32 elastic_freq = 7 [default = 0];
+  optional float norm_a = 8 [default = 1];
+  optional float norm_b = 9 [default = 0];
 }
+
 // Message that stores parameters used by DropoutLayer
 message DropoutProto {
   optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio
 }
+
 // Message that stores parameters used by InnerProductLayer
 message InnerProductProto {
   required uint32 num_output = 1; // The number of outputs for the layer
@@ -253,7 +254,7 @@ message LRNProto {
     WITHIN_CHANNEL = 1;
   }
   optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
-  optional float knorm =5 [default=1.0];
+  optional float knorm =5 [default = 1.0];
 }
 
 // Message that stores parameters used by PoolingLayer
@@ -262,18 +263,23 @@ message PoolingProto {
     MAX = 0;
     AVE = 1;
   }
-  optional PoolMethod pool = 1 [default = MAX]; // The pooling method
+  // The pooling method
+  optional PoolMethod pool = 1 [default = MAX];
   // Pad, kernel size, and stride are all given as a single value for equal
   // dimensions in height and width or as Y, X pairs.
-  required uint32 kernel= 2; // The kernel size (square)
-  optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
-  optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
+  // The kernel size (square)
+  required uint32 kernel= 2;
+  // The padding size (equal in Y, X)
+  optional uint32 pad = 4 [default = 0];
+  // The stride (equal in Y, X)
+  optional uint32 stride = 3 [default = 1];
 }
 
 message SliceProto{
   required int32 slice_dimension=1;
   required int32 slice_num=2;
 }
+
 // Message that stores parameters used by ReLULayer
 message ReLUProto {
   // Allow non-zero slope for negative inputs to speed up optimization
@@ -284,14 +290,12 @@ message ReLUProto {
   optional float negative_slope = 1 [default = 0];
 }
 
-
-
 message Record {
-  enum Type{
-    kSingleLabelImage=0;
+  enum Type {
+    kSingleLabelImage = 0;
   }
-  optional Type type=1 [default=kSingleLabelImage];
-  optional SingleLabelImageRecord image=2;
+  optional Type type = 1 [default = kSingleLabelImage];
+  optional SingleLabelImageRecord image = 2;
 }
 
 // to import caffe's lmdb dataset
@@ -307,16 +311,17 @@ message Datum {
   // If true data contains an encoded image that need to be decoded
   optional bool encoded = 7 [default = false];
 }
-message SingleLabelImageRecord{
-  repeated int32 shape=1;
-  optional int32 label=2;
-  optional bytes pixel=3;
-  repeated float data=4;
+
+message SingleLabelImageRecord {
+  repeated int32 shape = 1;
+  optional int32 label = 2;
+  optional bytes pixel = 3;
+  repeated float data = 4;
 }
 
 message UpdaterProto {
-  optional float momentum=4 [default=0];
-  optional float weight_decay=5 [default=0];
+  optional float momentum = 4 [default = 0];
+  optional float weight_decay = 5 [default = 0];
   // used in changing learning rate
   optional float gamma = 6 [default=1];
   optional float pow=7 [default=0];
@@ -327,21 +332,22 @@ message UpdaterProto {
   optional int32 learning_rate_change_frequency = 14 [default=0];
   enum ChangeProto {
     kFixed = 0;
-    kInverse_t= 1;
-    kInverse= 2;
+    kInverse_t = 1;
+    kInverse = 2;
     kExponential = 3;
     kLinear = 4;
     kStep = 5;
-    kFixedStep=6;
+    kFixedStep = 6;
   }
   optional ChangeProto learning_rate_change_method = 16 [default = kFixed];
-  optional int32 sync_frequency=17 [default=1];
+  optional int32 sync_frequency = 17 [default = 1];
   // warmup the parameters and then send to parameter servers.
-  optional float moving_rate=26 [default=0];
-  optional string param_type=27[default="Param"];
-  repeated int32 step=28;
-  repeated float step_lr=29;
+  optional float moving_rate = 26 [default = 0];
+  optional string param_type = 27 [default = "Param"];
+  repeated int32 step = 28;
+  repeated float step_lr = 29;
 }
+
 message BlobProto {
   optional int32 num = 1 [default = 0];
   optional int32 channels = 2 [default = 0];

Reply via email to