Modified: incubator/singa/site/trunk/docs/model_zoo/index.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/model_zoo/index.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/model_zoo/index.html (original)
+++ incubator/singa/site/trunk/docs/model_zoo/index.html Sat Jun 29 14:42:24 
2019
@@ -104,6 +104,7 @@
 <li class="toctree-l1 current"><a class="reference internal" 
href="../index.html">Documentation</a><ul class="current">
 <li class="toctree-l2"><a class="reference internal" 
href="../installation.html">Installation</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="../software_stack.html">Software Stack</a></li>
+<li class="toctree-l2"><a class="reference internal" 
href="../benchmark.html">Benchmark for Distributed training</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="../device.html">Device</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="../tensor.html">Tensor</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="../layer.html">Layer</a></li>
@@ -121,13 +122,7 @@
 <li class="toctree-l2 current"><a class="current reference internal" 
href="#">Model Zoo</a><ul>
 <li class="toctree-l3"><a class="reference internal" 
href="cifar10/README.html">Train CNN over Cifar-10</a></li>
 <li class="toctree-l3"><a class="reference internal" 
href="char-rnn/README.html">Train Char-RNN over plain text</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="mnist/README.html">Train a RBM model against MNIST dataset</a></li>
 <li class="toctree-l3"><a class="reference internal" 
href="imagenet/alexnet/README.html">Train AlexNet over ImageNet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/densenet/README.html">name: DenseNet models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/densenet/README.html#image-classification-using-densenet">Image 
Classification using DenseNet</a></li>
 <li class="toctree-l3"><a class="reference internal" 
href="imagenet/googlenet/README.html">name: GoogleNet on ImageNet
 SINGA version: 1.0.1
 SINGA commit: 8c990f7da2de220e8a012c6a8ecc897dc7532744
@@ -135,24 +130,6 @@ parameter_url: https://s3-ap-southeast-1
 parameter_sha1: 0a88e8948b1abca3badfd8d090d6be03f8d7655d
 license: unrestricted 
https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet</a></li>
 <li class="toctree-l3"><a class="reference internal" 
href="imagenet/googlenet/README.html#image-classification-using-googlenet">Image
 Classification using GoogleNet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/inception/README.html">name: Inception V4 on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/inception_v4.tar.gz
-parameter_sha1: 5fdd6f5d8af8fd10e7321d9b38bb87ef14e80d56
-license: https://github.com/tensorflow/models/tree/master/slim</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/inception/README.html#image-classification-using-inception-v4">Image
 Classification using Inception V4</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/resnet/README.html">name: Resnets on ImageNet
-SINGA version: 1.1
-SINGA commit: 45ec92d8ffc1fa1385a9307fdf07e21da939ee2f
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/resnet/resnet-18.tar.gz
-license: Apache V2, 
https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/resnet/README.html#image-classification-using-residual-networks">Image
 Classification using Residual Networks</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/vgg/README.html">name: VGG models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="imagenet/vgg/README.html#image-classification-using-vgg">Image 
Classification using VGG</a></li>
 </ul>
 </li>
 </ul>
@@ -257,10 +234,6 @@ license: https://github.com/pytorch/visi
 <li class="toctree-l2"><a class="reference internal" 
href="char-rnn/README.html#instructions">Instructions</a></li>
 </ul>
 </li>
-<li class="toctree-l1"><a class="reference internal" 
href="mnist/README.html">Train a RBM model against MNIST dataset</a><ul>
-<li class="toctree-l2"><a class="reference internal" 
href="mnist/README.html#running-instructions">Running instructions</a></li>
-</ul>
-</li>
 <li class="toctree-l1"><a class="reference internal" 
href="imagenet/alexnet/README.html">Train AlexNet over ImageNet</a><ul>
 <li class="toctree-l2"><a class="reference internal" 
href="imagenet/alexnet/README.html#instructions">Instructions</a><ul>
 <li class="toctree-l3"><a class="reference internal" 
href="imagenet/alexnet/README.html#compile-singa">Compile SINGA</a></li>
@@ -271,15 +244,6 @@ license: https://github.com/pytorch/visi
 </li>
 </ul>
 </li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/densenet/README.html">name: DenseNet models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py</a></li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/densenet/README.html#image-classification-using-densenet">Image 
Classification using DenseNet</a><ul>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/densenet/README.html#instructions">Instructions</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/densenet/README.html#details">Details</a></li>
-</ul>
-</li>
 <li class="toctree-l1"><a class="reference internal" 
href="imagenet/googlenet/README.html">name: GoogleNet on ImageNet
 SINGA version: 1.0.1
 SINGA commit: 8c990f7da2de220e8a012c6a8ecc897dc7532744
@@ -291,36 +255,6 @@ license: unrestricted https://github.com
 <li class="toctree-l2"><a class="reference internal" 
href="imagenet/googlenet/README.html#details">Details</a></li>
 </ul>
 </li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/inception/README.html">name: Inception V4 on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/inception_v4.tar.gz
-parameter_sha1: 5fdd6f5d8af8fd10e7321d9b38bb87ef14e80d56
-license: https://github.com/tensorflow/models/tree/master/slim</a></li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/inception/README.html#image-classification-using-inception-v4">Image
 Classification using Inception V4</a><ul>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/inception/README.html#instructions">Instructions</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/inception/README.html#details">Details</a></li>
-</ul>
-</li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/resnet/README.html">name: Resnets on ImageNet
-SINGA version: 1.1
-SINGA commit: 45ec92d8ffc1fa1385a9307fdf07e21da939ee2f
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/resnet/resnet-18.tar.gz
-license: Apache V2, 
https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE</a></li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/resnet/README.html#image-classification-using-residual-networks">Image
 Classification using Residual Networks</a><ul>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/resnet/README.html#instructions">Instructions</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/resnet/README.html#details">Details</a></li>
-</ul>
-</li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/vgg/README.html">name: VGG models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py</a></li>
-<li class="toctree-l1"><a class="reference internal" 
href="imagenet/vgg/README.html#image-classification-using-vgg">Image 
Classification using VGG</a><ul>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/vgg/README.html#instructions">Instructions</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="imagenet/vgg/README.html#details">Details</a></li>
-</ul>
-</li>
 </ul>
 </div>
 </div>

Modified: incubator/singa/site/trunk/docs/model_zoo/mnist/README.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/model_zoo/mnist/README.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/model_zoo/mnist/README.html (original)
+++ incubator/singa/site/trunk/docs/model_zoo/mnist/README.html Sat Jun 29 
14:42:24 2019
@@ -36,8 +36,6 @@
   <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" 
/>
     <link rel="index" title="Index" href="../../../genindex.html" />
     <link rel="search" title="Search" href="../../../search.html" />
-    <link rel="next" title="Train AlexNet over ImageNet" 
href="../imagenet/alexnet/README.html" />
-    <link rel="prev" title="Train Char-RNN over plain text" 
href="../char-rnn/README.html" />
     <link href="../../../_static/style.css" rel="stylesheet" type="text/css">
     <!--link href="../../../_static/fontawesome-all.min.css" rel="stylesheet" 
type="text/css"-->
        <link rel="stylesheet" 
href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"; 
integrity="sha384-DNOHZ68U8hZfKXOrtjWvjxusGo9WQnrNx2sqG0tfsghAvtVlRW3tvkXWZh58N9jp"
 crossorigin="anonymous">
@@ -100,66 +98,8 @@
               
             
             
-              <ul class="current">
-<li class="toctree-l1 current"><a class="reference internal" 
href="../../index.html">Documentation</a><ul class="current">
-<li class="toctree-l2"><a class="reference internal" 
href="../../installation.html">Installation</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../software_stack.html">Software Stack</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../device.html">Device</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../tensor.html">Tensor</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../layer.html">Layer</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../net.html">FeedForward Net</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../initializer.html">Initializer</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../loss.html">Loss</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../metric.html">Metric</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../optimizer.html">Optimizer</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../autograd.html">Autograd in Singa</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../data.html">Data</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../image_tool.html">Image Tool</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../snapshot.html">Snapshot</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../converter.html">Caffe Converter</a></li>
-<li class="toctree-l2"><a class="reference internal" 
href="../../utils.html">Utils</a></li>
-<li class="toctree-l2 current"><a class="reference internal" 
href="../index.html">Model Zoo</a><ul class="current">
-<li class="toctree-l3"><a class="reference internal" 
href="../cifar10/README.html">Train CNN over Cifar-10</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../char-rnn/README.html">Train Char-RNN over plain text</a></li>
-<li class="toctree-l3 current"><a class="current reference internal" 
href="#">Train a RBM model against MNIST dataset</a><ul>
-<li class="toctree-l4"><a class="reference internal" 
href="#running-instructions">Running instructions</a></li>
-</ul>
-</li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/alexnet/README.html">Train AlexNet over ImageNet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/densenet/README.html">name: DenseNet models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/densenet/README.html#image-classification-using-densenet">Image
 Classification using DenseNet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/googlenet/README.html">name: GoogleNet on ImageNet
-SINGA version: 1.0.1
-SINGA commit: 8c990f7da2de220e8a012c6a8ecc897dc7532744
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/bvlc_googlenet.tar.gz
-parameter_sha1: 0a88e8948b1abca3badfd8d090d6be03f8d7655d
-license: unrestricted 
https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/googlenet/README.html#image-classification-using-googlenet">Image
 Classification using GoogleNet</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/inception/README.html">name: Inception V4 on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/inception_v4.tar.gz
-parameter_sha1: 5fdd6f5d8af8fd10e7321d9b38bb87ef14e80d56
-license: https://github.com/tensorflow/models/tree/master/slim</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/inception/README.html#image-classification-using-inception-v4">Image
 Classification using Inception V4</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/resnet/README.html">name: Resnets on ImageNet
-SINGA version: 1.1
-SINGA commit: 45ec92d8ffc1fa1385a9307fdf07e21da939ee2f
-parameter_url: 
https://s3-ap-southeast-1.amazonaws.com/dlfile/resnet/resnet-18.tar.gz
-license: Apache V2, 
https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/resnet/README.html#image-classification-using-residual-networks">Image
 Classification using Residual Networks</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/vgg/README.html">name: VGG models on ImageNet
-SINGA version: 1.1.1
-SINGA commit:
-license: 
https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py</a></li>
-<li class="toctree-l3"><a class="reference internal" 
href="../imagenet/vgg/README.html#image-classification-using-vgg">Image 
Classification using VGG</a></li>
-</ul>
-</li>
-</ul>
-</li>
+              <ul>
+<li class="toctree-l1"><a class="reference internal" 
href="../../index.html">Documentation</a></li>
 <li class="toctree-l1"><a class="reference internal" 
href="../../../downloads.html">Download SINGA</a></li>
 <li class="toctree-l1"><a class="reference internal" 
href="../../../security.html">Security</a></li>
 </ul>
@@ -223,10 +163,6 @@ license: https://github.com/pytorch/visi
     
       <li><a href="../../../index.html">Docs</a> &raquo;</li>
         
-          <li><a href="../../index.html">Documentation</a> &raquo;</li>
-        
-          <li><a href="../index.html">Model Zoo</a> &raquo;</li>
-        
       <li>Train a RBM model against MNIST dataset</li>
     
     
@@ -244,24 +180,7 @@ license: https://github.com/pytorch/visi
           <div role="main" class="document" itemscope="itemscope" 
itemtype="http://schema.org/Article";>
            <div itemprop="articleBody">
             
-  <!--
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
---><div class="section" id="train-a-rbm-model-against-mnist-dataset">
+  <div class="section" id="train-a-rbm-model-against-mnist-dataset">
 <h1>Train a RBM model against MNIST dataset<a class="headerlink" 
href="#train-a-rbm-model-against-mnist-dataset" title="Permalink to this 
headline">¶</a></h1>
 <p>This example is to train an RBM model using the
 MNIST dataset. The RBM model and its hyper-parameters are set following
@@ -269,9 +188,8 @@ MNIST dataset. The RBM model and its hyp
 <div class="section" id="running-instructions">
 <h2>Running instructions<a class="headerlink" href="#running-instructions" 
title="Permalink to this headline">¶</a></h2>
 <ol>
-<li><p class="first">Download the pre-processed <a class="reference external" 
href="https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz";>MNIST
 dataset</a></p>
-</li>
-<li><p class="first">Start the training</p>
+<li><p>Download the pre-processed <a class="reference external" 
href="https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz";>MNIST
 dataset</a></p></li>
+<li><p>Start the training</p>
 <div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span> <span class="n">python</span> <span 
class="n">train</span><span class="o">.</span><span class="n">py</span> <span 
class="n">mnist</span><span class="o">.</span><span class="n">pkl</span><span 
class="o">.</span><span class="n">gz</span>
 </pre></div>
 </div>
@@ -291,15 +209,6 @@ the program with an additional argument<
           </div>
           <footer>
   
-    <div class="rst-footer-buttons" role="navigation" aria-label="footer 
navigation">
-      
-        <a href="../imagenet/alexnet/README.html" class="btn btn-neutral 
float-right" title="Train AlexNet over ImageNet" accesskey="n" rel="next">Next 
<span class="fa fa-arrow-circle-right"></span></a>
-      
-      
-        <a href="../char-rnn/README.html" class="btn btn-neutral float-left" 
title="Train Char-RNN over plain text" accesskey="p" rel="prev"><span class="fa 
fa-arrow-circle-left"></span> Previous</a>
-      
-    </div>
-  
 
   <hr/>
 

Modified: incubator/singa/site/trunk/docs/net.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/net.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/net.html (original)
+++ incubator/singa/site/trunk/docs/net.html Sat Jun 29 14:42:24 2019
@@ -104,6 +104,7 @@
 <li class="toctree-l1 current"><a class="reference internal" 
href="index.html">Documentation</a><ul class="current">
 <li class="toctree-l2"><a class="reference internal" 
href="installation.html">Installation</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="software_stack.html">Software Stack</a></li>
+<li class="toctree-l2"><a class="reference internal" 
href="benchmark.html">Benchmark for Distributed training</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="device.html">Device</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="tensor.html">Tensor</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="layer.html">Layer</a></li>
@@ -203,8 +204,267 @@
           <div role="main" class="document" itemscope="itemscope" 
itemtype="http://schema.org/Article";>
            <div itemprop="articleBody">
             
-  <div class="section" id="feedforward-net">
-<h1>FeedForward Net<a class="headerlink" href="#feedforward-net" 
title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="module-singa.net">
+<span id="feedforward-net"></span><h1>FeedForward Net<a class="headerlink" 
href="#module-singa.net" title="Permalink to this headline">¶</a></h1>
+<p>Nerual net class for constructing the nets using layers and providing access
+functions for net info, e.g., parameters.</p>
+<p>Example usages:</p>
+<div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span><span class="kn">from</span> <span 
class="nn">singa</span> <span class="k">import</span> <span 
class="n">net</span> <span class="k">as</span> <span class="n">ffnet</span>
+<span class="kn">from</span> <span class="nn">singa</span> <span 
class="k">import</span> <span class="n">metric</span>
+<span class="kn">from</span> <span class="nn">singa</span> <span 
class="k">import</span> <span class="n">loss</span>
+<span class="kn">from</span> <span class="nn">singa</span> <span 
class="k">import</span> <span class="n">layer</span>
+<span class="kn">from</span> <span class="nn">singa</span> <span 
class="k">import</span> <span class="n">device</span>
+
+<span class="c1"># create net and add layers</span>
+<span class="n">net</span> <span class="o">=</span> <span 
class="n">ffnet</span><span class="o">.</span><span 
class="n">FeedForwardNet</span><span class="p">(</span><span 
class="n">loss</span><span class="o">.</span><span 
class="n">SoftmaxCrossEntropy</span><span class="p">(),</span> <span 
class="n">metric</span><span class="o">.</span><span 
class="n">Accuracy</span><span class="p">())</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">add</span><span class="p">(</span><span class="n">layer</span><span 
class="o">.</span><span class="n">Conv2D</span><span class="p">(</span><span 
class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span 
class="mi">32</span><span class="p">,</span> <span class="mi">5</span><span 
class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span 
class="n">input_sample_shape</span><span class="o">=</span><span 
class="p">(</span><span class="mi">3</span><span class="p">,</span><span 
class="mi">32</span><span class="p">,</span><span class="mi">32</span><span 
class="p">,)))</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">add</span><span class="p">(</span><span class="n">layer</span><span 
class="o">.</span><span class="n">Activation</span><span 
class="p">(</span><span class="s1">&#39;relu1&#39;</span><span 
class="p">))</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">add</span><span class="p">(</span><span class="n">layer</span><span 
class="o">.</span><span class="n">MaxPooling2D</span><span 
class="p">(</span><span class="s1">&#39;pool1&#39;</span><span 
class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span 
class="mi">2</span><span class="p">))</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">add</span><span class="p">(</span><span class="n">layer</span><span 
class="o">.</span><span class="n">Flatten</span><span class="p">(</span><span 
class="s1">&#39;flat&#39;</span><span class="p">))</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">add</span><span class="p">(</span><span class="n">layer</span><span 
class="o">.</span><span class="n">Dense</span><span class="p">(</span><span 
class="s1">&#39;dense&#39;</span><span class="p">,</span> <span 
class="mi">10</span><span class="p">))</span>
+
+<span class="c1"># init parameters</span>
+<span class="k">for</span> <span class="n">p</span> <span class="ow">in</span> 
<span class="n">net</span><span class="o">.</span><span 
class="n">param_values</span><span class="p">():</span>
+    <span class="k">if</span> <span class="nb">len</span><span 
class="p">(</span><span class="n">p</span><span class="o">.</span><span 
class="n">shape</span><span class="p">)</span> <span class="o">==</span> <span 
class="mi">0</span><span class="p">:</span>
+        <span class="n">p</span><span class="o">.</span><span 
class="n">set_value</span><span class="p">(</span><span 
class="mi">0</span><span class="p">)</span>
+    <span class="k">else</span><span class="p">:</span>
+        <span class="n">p</span><span class="o">.</span><span 
class="n">gaussian</span><span class="p">(</span><span class="mi">0</span><span 
class="p">,</span> <span class="mf">0.01</span><span class="p">)</span>
+
+<span class="c1"># move net onto gpu</span>
+<span class="n">dev</span> <span class="o">=</span> <span 
class="n">device</span><span class="o">.</span><span 
class="n">create_cuda_gpu</span><span class="p">()</span>
+<span class="n">net</span><span class="o">.</span><span 
class="n">to_device</span><span class="p">(</span><span 
class="n">dev</span><span class="p">)</span>
+
+<span class="c1"># training (skipped)</span>
+
+<span class="c1"># do prediction after training</span>
+<span class="n">x</span> <span class="o">=</span> <span 
class="n">tensor</span><span class="o">.</span><span 
class="n">Tensor</span><span class="p">((</span><span class="mi">2</span><span 
class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span 
class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span 
class="p">),</span> <span class="n">dev</span><span class="p">)</span>
+<span class="n">x</span><span class="o">.</span><span 
class="n">uniform</span><span class="p">(</span><span class="o">-</span><span 
class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span 
class="p">)</span>
+<span class="n">y</span> <span class="o">=</span> <span 
class="n">net</span><span class="o">.</span><span class="n">predict</span><span 
class="p">(</span><span class="n">x</span><span class="p">)</span>
+<span class="nb">print</span> <span class="n">tensor</span><span 
class="o">.</span><span class="n">to_numpy</span><span class="p">(</span><span 
class="n">y</span><span class="p">)</span>
+</pre></div>
+</div>
+<dl class="class">
+<dt id="singa.net.FeedForwardNet">
+<em class="property">class </em><code 
class="descclassname">singa.net.</code><code 
class="descname">FeedForwardNet</code><span 
class="sig-paren">(</span><em>loss=None</em>, <em>metric=None</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <code class="xref py py-class docutils literal 
notranslate"><span class="pre">object</span></code></p>
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.to_device">
+<code class="descname">to_device</code><span 
class="sig-paren">(</span><em>dev</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.net.FeedForwardNet.to_device" title="Permalink 
to this definition">¶</a></dt>
+<dd><p>Move the net onto the given device, including
+all parameters and intermediate data.</p>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.add">
+<code class="descname">add</code><span class="sig-paren">(</span><em>lyr</em>, 
<em>src=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.add" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Append a layer into the layer list.</p>
+<p>This function will get the sample shape from the src layers to setup the
+newly added layer. For the first layer, it is setup outside. The calling
+function should ensure the correctness of the layer order. If src is
+None, the last layer is the src layer. If there are multiple src layers,
+the src is a list of the src layers.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>lyr</strong> (<a class="reference internal" 
href="layer.html#singa.layer.Layer" 
title="singa.layer.Layer"><em>Layer</em></a>) – the layer to be added</p></li>
+<li><p><strong>src</strong> (<a class="reference internal" 
href="layer.html#singa.layer.Layer" 
title="singa.layer.Layer"><em>Layer</em></a>) – the source layer of 
lyr</p></li>
+</ul>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.param_values">
+<code class="descname">param_values</code><span 
class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.param_values" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Return a list of tensors for all parameters</p>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.param_specs">
+<code class="descname">param_specs</code><span class="sig-paren">(</span><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.param_specs" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Return a list of ParamSpec for all parameters</p>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.param_names">
+<code class="descname">param_names</code><span class="sig-paren">(</span><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.param_names" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Return a list for the names of all params</p>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.train">
+<code class="descname">train</code><span class="sig-paren">(</span><em>x</em>, 
<em>y</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.train" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Run BP for one iteration.
+This method is deprecated. It is only kept for backward compatibility.
+The name of this method is confusing since it does not update parameters.
+Please use backprob() instead.
+The back progagation algorithm computes gradients but it does not train.</p>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.backprob">
+<code class="descname">backprob</code><span 
class="sig-paren">(</span><em>x</em>, <em>y</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.backprob" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Run BP for one iteration.</p>
+<p>Currently only support nets with a single output layer, and a single
+loss objective and metric.
+For multiple outputs (with multiple loss/metric), please manually
+call forward, compute loss/metric and call backward. backward() is also
+more memory efficient than this function.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>x</strong> – input data, a single input Tensor or a dict: 
layer name -&gt; Tensor</p></li>
+<li><p><strong>y</strong> – label data, a single input Tensor.</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>gradients of parameters and the loss and metric 
values.</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.evaluate">
+<code class="descname">evaluate</code><span 
class="sig-paren">(</span><em>x</em>, <em>y</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.evaluate" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Evaluate the loss and metric of the given data.</p>
+<p>Currently only support nets with a single output layer, and a single
+loss objective and metric.
+TODO(wangwei) consider multiple loss objectives and metrics.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>x</strong> – input data, a single input Tensor or a dict: 
layer name -&gt; Tensor</p></li>
+<li><p><strong>y</strong> – label data, a single input Tensor.</p></li>
+</ul>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.predict">
+<code class="descname">predict</code><span 
class="sig-paren">(</span><em>x</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.net.FeedForwardNet.predict" title="Permalink to 
this definition">¶</a></dt>
+<dd><p>Forward the input data through each layer to get the values of the
+output layers.</p>
+<p>Currently only support nets with a single output layer</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>x</strong> – input data, a single input 
Tensor or a dict: layer name -&gt; Tensor</p>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>a single output tensor as the prediction result.</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.topo_sort">
+<code class="descname">topo_sort</code><span 
class="sig-paren">(</span><em>layers</em>, <em>src_of_layer</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.topo_sort" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Topology sort of layers.</p>
+<p>It would try to preserve the orders of the input layers.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>layers</strong> – a list of layers; the layers from the 
output of the same
+layer (e.g., slice layer) should be added by users in correct
+order; This function would not change their order.</p></li>
+<li><p><strong>src_of_layer</strong> – a dictionary: src layer name -&gt; a 
list of src layers</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>A list of ordered layer</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.forward">
+<code class="descname">forward</code><span 
class="sig-paren">(</span><em>flag</em>, <em>x</em>, <em>output=[]</em>, 
<em>freeze=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.forward" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Forward the input(s) through every layer.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>flag</strong> – True for training; False for evaluation; 
could also be
+model_pb2.kTrain or model_pb2.kEval, or other values for future
+use.</p></li>
+<li><p><strong>x</strong> – a single SINGA tensor if there is a single 
input; otherwise, a
+dictionary: layer name-&gt; singa tensor, for each layer accepting
+input data. Do not associate a layer with input tensor if it is
+connected from another layer. For such case, use a Dummy() layer
+to accept the input data and connect the dummy layer to this
+layer.</p></li>
+<li><p><strong>output</strong> (<em>list</em>) – a list of layer names whose 
output would be returned
+in addition to the default output.</p></li>
+<li><p><strong>freeze</strong> (<em>str</em>) – layer name, freeze all 
layers before this layer; flag
+is set to false for these layers.</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p><dl class="simple">
+<dt>if there is only one output layer and output arg is empty, 
return</dt><dd><p>the result from the single output layer; otherwise, return a
+dictionary: layer name -&gt; output tensor(s)</p>
+</dd>
+</dl>
+</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.backward">
+<code class="descname">backward</code><span 
class="sig-paren">(</span><em>dy</em>, <em>output=[]</em>, 
<em>freeze=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.backward" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Run back-propagation after forward-propagation.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>dy</strong> – a single tensor if there is a single loss 
function; otherwise,
+a dictionary maps the name of the layer connecting to the loss
+function -&gt; gradient from the loss function. Do not associate a
+layer with gradient tensor if it is connecting to another layer.
+For such case, connect this layer to a Dummy() layer and use the
+dummy layer to accept the gradient.</p></li>
+<li><p><strong>output</strong> (<em>list</em>) – a list of layer names whose 
output gradient would be
+returned in addition to the param gradient</p></li>
+<li><p><strong>freeze</strong> (<em>str</em>) – layer name, stop backward 
after this layer.</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>a geneartor iterator that generates
+(param_names, param_values, param_grads, layer_grads) after
+processing each layer h, where the first three lists are for h
+and the last item is a dictionary which maps
+layer name -&gt; its output gradient tensor(s). At the end of this
+function, the key set includes all layers in the output arg.</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.save">
+<code class="descname">save</code><span class="sig-paren">(</span><em>f</em>, 
<em>buffer_size=10</em>, <em>use_pickle=False</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.save" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Save model parameters using io/snapshot.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>f</strong> – file name</p></li>
+<li><p><strong>buffer_size</strong> – size (MB) of the IO, default setting 
is 10MB; Please
+make sure it is larger than any single parameter object.</p></li>
+<li><p><strong>use_pickle</strong> (<em>Boolean</em>) – if true, it would 
use pickle for dumping;
+otherwise, it would use protobuf for serialization, which uses
+less space.</p></li>
+</ul>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.net.FeedForwardNet.load">
+<code class="descname">load</code><span class="sig-paren">(</span><em>f</em>, 
<em>buffer_size=10</em>, <em>use_pickle=False</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.net.FeedForwardNet.load" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Load model parameters using io/snapshot.</p>
+<p>Please refer to the argument description in save().</p>
+</dd></dl>
+
+</dd></dl>
+
 </div>
 
 

Modified: incubator/singa/site/trunk/docs/neural-net.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/neural-net.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/neural-net.html (original)
+++ incubator/singa/site/trunk/docs/neural-net.html Sat Jun 29 14:42:24 2019
@@ -331,7 +331,7 @@ partitioned into two sub-layers.</p>
 <h3>Partitioning configuration<a class="headerlink" 
href="#partitioning-configuration" title="Permalink to this 
headline">¶</a></h3>
 <p>There are 4 partitioning schemes, whose configurations are give below,</p>
 <ol>
-<li><p class="first">Partitioning each singe layer into sub-layers on batch 
dimension (see
+<li><p>Partitioning each singe layer into sub-layers on batch dimension (see
 below). It is enabled by configuring the partition dimension of the layer to
 0, e.g.,</p>
 <div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span> <span class="c1"># with other fields 
omitted</span>
@@ -341,7 +341,7 @@ below). It is enabled by configuring the
 </pre></div>
 </div>
 </li>
-<li><p class="first">Partitioning each singe layer into sub-layers on feature 
dimension (see
+<li><p>Partitioning each singe layer into sub-layers on feature dimension (see
 below).  It is enabled by configuring the partition dimension of the layer to
 1, e.g.,</p>
 <div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span> <span class="c1"># with other fields 
omitted</span>
@@ -351,7 +351,7 @@ below).  It is enabled by configuring th
 </pre></div>
 </div>
 </li>
-<li><p class="first">Partitioning all layers into different subsets. It is 
enabled by
+<li><p>Partitioning all layers into different subsets. It is enabled by
 configuring the location ID of a layer, e.g.,</p>
 <div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span> <span class="c1"># with other fields 
omitted</span>
  <span class="n">layer</span> <span class="p">{</span>
@@ -363,7 +363,7 @@ configuring the location ID of a layer,
 </pre></div>
 </div>
 </li>
-<li><p class="first">Hybrid partitioning of strategy 1, 2 and 3. The hybrid 
partitioning is
+<li><p>Hybrid partitioning of strategy 1, 2 and 3. The hybrid partitioning is
 useful for large models. An example application is to implement the
 <a class="reference external" href="http://arxiv.org/abs/1404.5997";>idea 
proposed by Alex</a>.
 Hybrid partitioning is configured like,</p>
@@ -394,16 +394,16 @@ are under test and will be released in l
 <h2>Parameter sharing<a class="headerlink" href="#parameter-sharing" 
title="Permalink to this headline">¶</a></h2>
 <p>Parameters can be shared in two cases,</p>
 <ul class="simple">
-<li>sharing parameters among layers via user configuration. For example, the
+<li><p>sharing parameters among layers via user configuration. For example, the
 visible layer and hidden layer of a RBM shares the weight matrix, which is 
configured through
 the <code class="docutils literal notranslate"><span 
class="pre">share_from</span></code> field as shown in the above RBM 
configuration. The
-configurations must be the same (except name) for shared parameters.</li>
-<li>due to neural net partitioning, some <code class="docutils literal 
notranslate"><span class="pre">Param</span></code> objects are replicated into
+configurations must be the same (except name) for shared parameters.</p></li>
+<li><p>due to neural net partitioning, some <code class="docutils literal 
notranslate"><span class="pre">Param</span></code> objects are replicated into
 different workers, e.g., partitioning one layer on batch dimension. These
 workers share parameter values. SINGA controls this kind of parameter
-sharing automatically, users do not need to do any configuration.</li>
-<li>the <code class="docutils literal notranslate"><span 
class="pre">NeuralNet</span></code> for training and testing (and validation) 
share most layers
-, thus share <code class="docutils literal notranslate"><span 
class="pre">Param</span></code> values.</li>
+sharing automatically, users do not need to do any configuration.</p></li>
+<li><p>the <code class="docutils literal notranslate"><span 
class="pre">NeuralNet</span></code> for training and testing (and validation) 
share most layers
+, thus share <code class="docutils literal notranslate"><span 
class="pre">Param</span></code> values.</p></li>
 </ul>
 <p>If the shared <code class="docutils literal notranslate"><span 
class="pre">Param</span></code> instances resident in the same process (may in 
different
 threads), they use the same chunk of memory space for their values. But they

Modified: incubator/singa/site/trunk/docs/optimizer.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/optimizer.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/optimizer.html (original)
+++ incubator/singa/site/trunk/docs/optimizer.html Sat Jun 29 14:42:24 2019
@@ -104,6 +104,7 @@
 <li class="toctree-l1 current"><a class="reference internal" 
href="index.html">Documentation</a><ul class="current">
 <li class="toctree-l2"><a class="reference internal" 
href="installation.html">Installation</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="software_stack.html">Software Stack</a></li>
+<li class="toctree-l2"><a class="reference internal" 
href="benchmark.html">Benchmark for Distributed training</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="device.html">Device</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="tensor.html">Tensor</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="layer.html">Layer</a></li>
@@ -203,8 +204,403 @@
           <div role="main" class="document" itemscope="itemscope" 
itemtype="http://schema.org/Article";>
            <div itemprop="articleBody">
             
-  <div class="section" id="optimizer">
-<h1>Optimizer<a class="headerlink" href="#optimizer" title="Permalink to this 
headline">¶</a></h1>
+  <div class="section" id="module-singa.optimizer">
+<span id="optimizer"></span><h1>Optimizer<a class="headerlink" 
href="#module-singa.optimizer" title="Permalink to this headline">¶</a></h1>
+<p>This module includes a set of optimizers for updating model parameters.</p>
+<p>Example usage:</p>
+<div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span><span class="kn">from</span> <span 
class="nn">singa</span> <span class="k">import</span> <span 
class="n">optimizer</span>
+<span class="kn">from</span> <span class="nn">singa</span> <span 
class="k">import</span> <span class="n">tensor</span>
+
+<span class="n">sgd</span> <span class="o">=</span> <span 
class="n">optimizer</span><span class="o">.</span><span 
class="n">SGD</span><span class="p">(</span><span class="n">lr</span><span 
class="o">=</span><span class="mf">0.01</span><span class="p">,</span> <span 
class="n">momentum</span><span class="o">=</span><span 
class="mf">0.9</span><span class="p">,</span> <span 
class="n">weight_decay</span><span class="o">=</span><span 
class="mf">1e-4</span><span class="p">)</span>
+<span class="n">p</span> <span class="o">=</span> <span 
class="n">tensor</span><span class="o">.</span><span 
class="n">Tensor</span><span class="p">((</span><span class="mi">3</span><span 
class="p">,</span><span class="mi">5</span><span class="p">))</span>
+<span class="n">p</span><span class="o">.</span><span 
class="n">uniform</span><span class="p">(</span><span class="o">-</span><span 
class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span 
class="p">)</span>
+<span class="n">g</span> <span class="o">=</span> <span 
class="n">tensor</span><span class="o">.</span><span 
class="n">Tensor</span><span class="p">((</span><span class="mi">3</span><span 
class="p">,</span><span class="mi">5</span><span class="p">))</span>
+<span class="n">g</span><span class="o">.</span><span 
class="n">gaussian</span><span class="p">(</span><span class="mi">0</span><span 
class="p">,</span> <span class="mf">0.01</span><span class="p">)</span>
+
+<span class="n">sgd</span><span class="o">.</span><span 
class="n">apply</span><span class="p">(</span><span class="mi">1</span><span 
class="p">,</span> <span class="n">g</span><span class="p">,</span> <span 
class="n">p</span><span class="p">,</span> <span 
class="s1">&#39;param&#39;</span><span class="p">)</span>  <span class="c1"># 
use the global lr=0.1 for epoch 1</span>
+<span class="n">sgd</span><span class="o">.</span><span 
class="n">apply_with_lr</span><span class="p">(</span><span 
class="mi">2</span><span class="p">,</span> <span class="mf">0.03</span><span 
class="p">,</span> <span class="n">g</span><span class="p">,</span> <span 
class="n">p</span><span class="p">,</span> <span 
class="s1">&#39;param&#39;</span><span class="p">)</span>  <span class="c1"># 
use lr=0.03 for epoch 2</span>
+</pre></div>
+</div>
+<dl class="class">
+<dt id="singa.optimizer.Optimizer">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">Optimizer</code><span 
class="sig-paren">(</span><em>lr=None</em>, <em>momentum=None</em>, 
<em>weight_decay=None</em>, <em>regularizer=None</em>, 
<em>constraint=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Optimizer" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <code class="xref py py-class docutils literal 
notranslate"><span class="pre">object</span></code></p>
+<p>The base python optimizer class.</p>
+<p>Typically, an optimizer is used as follows:</p>
+<ol class="arabic simple">
+<li><p>construct the optimizer</p></li>
+<li><p>(optional) register each parameter with its specs.</p></li>
+<li><p>use the optimizer to update parameter values given parameter gradients
+and other optional info</p></li>
+</ol>
+<p>The subclasses should override the apply_with_lr function to do the real
+parameter udpate.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>lr</strong> (<em>float</em>) – a constant value for the 
learning rate</p></li>
+<li><p><strong>momentum</strong> (<em>float</em>) – a constant value for the 
momentum value</p></li>
+<li><p><strong>weight_decay</strong> (<em>float</em>) – the coefficent for 
L2 regularizer, which is
+mutually exclusive with ‘regularizer’.</p></li>
+<li><p><strong>regularizer</strong> – an instance of Regularizer or 
RegularizerConf; If set,
+regularization would be applied in apply_with_lr().
+Users can also do regularization outside.</p></li>
+<li><p><strong>constraint</strong> – an instance of Constraint or 
ConstraintConf; If set,
+constraint would be applied inside apply_with_lr(). Users can
+also apply constraint outside.</p></li>
+</ul>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.Optimizer.register">
+<code class="descname">register</code><span 
class="sig-paren">(</span><em>name</em>, <em>specs</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Optimizer.register" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Register the param specs, including creating regularizer and
+constraint per param object. Param specific regularizer and constraint
+have higher priority than the global ones. If all parameters share the
+same setting for learning rate, regularizer and constraint, then there
+is no need to call this function.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>name</strong> (<em>str</em>) – parameter name</p></li>
+<li><p><strong>specs</strong> (<em>ParamSpec</em>) – protobuf obj, including 
regularizer and
+constraint, multipliers for learning rate and weight decay.</p></li>
+</ul>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.optimizer.Optimizer.apply_regularizer_constraint">
+<code class="descname">apply_regularizer_constraint</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>name=None</em>, <em>step=-1</em><span class="sig-paren">)</span><a 
class="headerlink" 
href="#singa.optimizer.Optimizer.apply_regularizer_constraint" title="Permalink 
to this definition">¶</a></dt>
+<dd><p>Apply regularization and constraint if available.</p>
+<p>If there are both global regularizer (constraint) and param specific
+regularizer (constraint), it would use the param specific one.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>value</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter value 
Tensor</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient 
Tensor</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – to get parameter specific 
regularizer or constraint</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>the updated gradient Tensor</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.optimizer.Optimizer.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name=None</em>, <em>step=-1</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Optimizer.apply_with_lr" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Do update of parameters with given learning rate if the grad is not
+empty.</p>
+<p>The subclass optimizer must override this function.
+This function do nothing if the grad is empty.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>lr</strong> (<em>float</em>) – learning rate</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to index 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.optimizer.Optimizer.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>grad</em>, <em>value</em>, 
<em>name=None</em>, <em>step=-1</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.optimizer.Optimizer.apply" title="Permalink to 
this definition">¶</a></dt>
+<dd><p>Do update assuming the learning rate generator is set.</p>
+<p>The subclass optimizer does not need to override this function.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to retrieval 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – training iteration ID within 
one epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.SGD">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">SGD</code><span class="sig-paren">(</span><em>lr=None</em>, 
<em>momentum=None</em>, <em>weight_decay=None</em>, <em>regularizer=None</em>, 
<em>constraint=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.SGD" title="Permalink to this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Optimizer" 
title="singa.optimizer.Optimizer"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Optimizer</span></code></a></p>
+<p>The vallina Stochasitc Gradient Descent algorithm with momentum.</p>
+<p>See the base Optimizer for all arguments.</p>
+<dl class="method">
+<dt id="singa.optimizer.SGD.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name</em>, <em>step=-1</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.SGD.apply_with_lr" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Do update of parameters with given learning rate if the grad is not
+empty.</p>
+<p>The subclass optimizer must override this function.
+This function do nothing if the grad is empty.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>lr</strong> (<em>float</em>) – learning rate</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to index 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.Nesterov">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">Nesterov</code><span 
class="sig-paren">(</span><em>lr=None</em>, <em>momentum=0.9</em>, 
<em>weight_decay=None</em>, <em>regularizer=None</em>, 
<em>constraint=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Nesterov" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Optimizer" 
title="singa.optimizer.Optimizer"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Optimizer</span></code></a></p>
+<p>The SGD with Nesterov momentum.</p>
+<p>See the base Optimizer for all arguments.</p>
+<dl class="method">
+<dt id="singa.optimizer.Nesterov.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name</em>, <em>step=-1</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Nesterov.apply_with_lr" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Do update of parameters with given learning rate if the grad is not
+empty.</p>
+<p>The subclass optimizer must override this function.
+This function do nothing if the grad is empty.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>lr</strong> (<em>float</em>) – learning rate</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to index 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.RMSProp">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">RMSProp</code><span 
class="sig-paren">(</span><em>rho=0.9</em>, <em>epsilon=1e-08</em>, 
<em>lr=None</em>, <em>weight_decay=None</em>, <em>regularizer=None</em>, 
<em>constraint=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.RMSProp" title="Permalink to this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Optimizer" 
title="singa.optimizer.Optimizer"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Optimizer</span></code></a></p>
+<p>RMSProp optimizer.</p>
+<p>See the base Optimizer for all constructor args.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>rho</strong> (<em>float</em>) – float within [0, 1]</p></li>
+<li><p><strong>epsilon</strong> (<em>float</em>) – small value for 
preventing numeric error</p></li>
+</ul>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.RMSProp.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name</em>, <em>step=-1</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.RMSProp.apply_with_lr" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Do update of parameters with given learning rate if the grad is not
+empty.</p>
+<p>The subclass optimizer must override this function.
+This function do nothing if the grad is empty.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>lr</strong> (<em>float</em>) – learning rate</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to index 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.AdaGrad">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">AdaGrad</code><span 
class="sig-paren">(</span><em>epsilon=1e-08</em>, <em>lr=None</em>, 
<em>weight_decay=None</em>, <em>lr_gen=None</em>, <em>regularizer=None</em>, 
<em>constraint=None</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.AdaGrad" title="Permalink to this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Optimizer" 
title="singa.optimizer.Optimizer"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Optimizer</span></code></a></p>
+<p>AdaGrad optimizer.</p>
+<p>See the base Optimizer for all constructor args.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>epsilon</strong> (<em>float</em>) – small 
number for preventing numeric error.</p>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.AdaGrad.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name</em>, <em>step=-1</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.AdaGrad.apply_with_lr" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Do update of parameters with given learning rate if the grad is not
+empty.</p>
+<p>The subclass optimizer must override this function.
+This function do nothing if the grad is empty.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>epoch</strong> (<em>int</em>) – training epoch ID</p></li>
+<li><p><strong>lr</strong> (<em>float</em>) – learning rate</p></li>
+<li><p><strong>grad</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – parameter gradient</p></li>
+<li><p><strong>value</strong> (<em>Tesnor</em>) – parameter value</p></li>
+<li><p><strong>name</strong> (<em>string</em>) – paramter name to index 
parameter specific
+updating rules (including regularizer and constraint)</p></li>
+<li><p><strong>step</strong> (<em>int</em>) – iteration ID within one 
epoch</p></li>
+</ul>
+</dd>
+<dt class="field-even">Returns</dt>
+<dd class="field-even"><p>updated parameter value</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.Adam">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">Adam</code><span 
class="sig-paren">(</span><em>beta_1=0.9</em>, <em>beta_2=0.999</em>, 
<em>epsilon=1e-08</em>, <em>lr=None</em>, <em>weight_decay=None</em>, 
<em>regularizer=None</em>, <em>constraint=None</em><span 
class="sig-paren">)</span><a class="headerlink" href="#singa.optimizer.Adam" 
title="Permalink to this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Optimizer" 
title="singa.optimizer.Optimizer"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Optimizer</span></code></a></p>
+<p>Adam optimizer.</p>
+<p>See the base Optimizer for all constructor args.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>beta_1</strong> (<em>float</em>) – coefficient of 
momentum</p></li>
+<li><p><strong>beta_2</strong> (<em>float</em>) – coefficient of aggregated 
squared gradient</p></li>
+<li><p><strong>epsilon</strong> (<em>float</em>) – small value for 
preventing numeric error</p></li>
+</ul>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.Adam.apply_with_lr">
+<code class="descname">apply_with_lr</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>lr</em>, <em>grad</em>, 
<em>value</em>, <em>name</em>, <em>step</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.optimizer.Adam.apply_with_lr" title="Permalink 
to this definition">¶</a></dt>
+<dd><p>Update one parameter object.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>step</strong> (<em>int</em>) – the 
accumulated training iterations, not the iteration ID</p>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.Regularizer">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">Regularizer</code><a class="headerlink" 
href="#singa.optimizer.Regularizer" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <code class="xref py py-class docutils literal 
notranslate"><span class="pre">object</span></code></p>
+<p>Base Python regularizer for parameter gradients.</p>
+<dl class="method">
+<dt id="singa.optimizer.Regularizer.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Regularizer.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.CppRegularizer">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">CppRegularizer</code><span 
class="sig-paren">(</span><em>conf</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.optimizer.CppRegularizer" title="Permalink to 
this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" 
href="#singa.optimizer.Regularizer" title="singa.optimizer.Regularizer"><code 
class="xref py py-class docutils literal notranslate"><span 
class="pre">singa.optimizer.Regularizer</span></code></a></p>
+<p>Wrapper for regularizer implemented using C++.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>conf</strong> (<em>RegularizerConf</em>) – 
protobuf message for the configuration.</p>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.CppRegularizer.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.CppRegularizer.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.L2Regularizer">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">L2Regularizer</code><span 
class="sig-paren">(</span><em>coefficient</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.L2Regularizer" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" 
href="#singa.optimizer.Regularizer" title="singa.optimizer.Regularizer"><code 
class="xref py py-class docutils literal notranslate"><span 
class="pre">singa.optimizer.Regularizer</span></code></a></p>
+<p>L2 regularization</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>coefficient</strong> (<em>float</em>) – 
regularization coefficient.</p>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.L2Regularizer.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.L2Regularizer.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.Constraint">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">Constraint</code><a class="headerlink" 
href="#singa.optimizer.Constraint" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <code class="xref py py-class docutils literal 
notranslate"><span class="pre">object</span></code></p>
+<p>Base Python constraint class for paramter gradients</p>
+<dl class="method">
+<dt id="singa.optimizer.Constraint.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.Constraint.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.CppConstraint">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">CppConstraint</code><span 
class="sig-paren">(</span><em>conf</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.optimizer.CppConstraint" title="Permalink to 
this definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Constraint" 
title="singa.optimizer.Constraint"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Constraint</span></code></a></p>
+<p>Wrapper for constraints implemented using C++.</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><p><strong>conf</strong> (<em>ConstraintConf</em>) – 
protobuf message for the configuration.</p>
+</dd>
+</dl>
+<dl class="method">
+<dt id="singa.optimizer.CppConstraint.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.CppConstraint.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
+<dl class="class">
+<dt id="singa.optimizer.L2Constraint">
+<em class="property">class </em><code 
class="descclassname">singa.optimizer.</code><code 
class="descname">L2Constraint</code><span 
class="sig-paren">(</span><em>threshold=None</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.L2Constraint" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Bases: <a class="reference internal" href="#singa.optimizer.Constraint" 
title="singa.optimizer.Constraint"><code class="xref py py-class docutils 
literal notranslate"><span 
class="pre">singa.optimizer.Constraint</span></code></a></p>
+<p>Rescale the gradient to make the L2 norm &lt;= a given threshold</p>
+<dl class="method">
+<dt id="singa.optimizer.L2Constraint.apply">
+<code class="descname">apply</code><span 
class="sig-paren">(</span><em>epoch</em>, <em>value</em>, <em>grad</em>, 
<em>step=-1</em><span class="sig-paren">)</span><a class="headerlink" 
href="#singa.optimizer.L2Constraint.apply" title="Permalink to this 
definition">¶</a></dt>
+<dd></dd></dl>
+
+</dd></dl>
+
 </div>
 
 

Modified: incubator/singa/site/trunk/docs/snapshot.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/snapshot.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/snapshot.html (original)
+++ incubator/singa/site/trunk/docs/snapshot.html Sat Jun 29 14:42:24 2019
@@ -104,6 +104,7 @@
 <li class="toctree-l1 current"><a class="reference internal" 
href="index.html">Documentation</a><ul class="current">
 <li class="toctree-l2"><a class="reference internal" 
href="installation.html">Installation</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="software_stack.html">Software Stack</a></li>
+<li class="toctree-l2"><a class="reference internal" 
href="benchmark.html">Benchmark for Distributed training</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="device.html">Device</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="tensor.html">Tensor</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="layer.html">Layer</a></li>
@@ -203,8 +204,51 @@
           <div role="main" class="document" itemscope="itemscope" 
itemtype="http://schema.org/Article";>
            <div itemprop="articleBody">
             
-  <div class="section" id="snapshot">
-<h1>Snapshot<a class="headerlink" href="#snapshot" title="Permalink to this 
headline">¶</a></h1>
+  <div class="section" id="module-singa.snapshot">
+<span id="snapshot"></span><h1>Snapshot<a class="headerlink" 
href="#module-singa.snapshot" title="Permalink to this headline">¶</a></h1>
+<p>This script includes io::snapshot class and its methods.</p>
+<p>Example usages:</p>
+<div class="highlight-default notranslate"><div 
class="highlight"><pre><span></span><span class="kn">from</span> <span 
class="nn">singa</span> <span class="k">import</span> <span 
class="n">snapshot</span>
+
+<span class="n">sn1</span> <span class="o">=</span> <span 
class="n">snapshot</span><span class="o">.</span><span 
class="n">Snapshot</span><span class="p">(</span><span 
class="s1">&#39;param&#39;</span><span class="p">,</span> <span 
class="kc">False</span><span class="p">)</span>
+<span class="n">params</span> <span class="o">=</span> <span 
class="n">sn1</span><span class="o">.</span><span class="n">read</span><span 
class="p">()</span>  <span class="c1"># read all params as a dictionary</span>
+
+<span class="n">sn2</span> <span class="o">=</span> <span 
class="n">snapshot</span><span class="o">.</span><span 
class="n">Snapshot</span><span class="p">(</span><span 
class="s1">&#39;param_new&#39;</span><span class="p">,</span> <span 
class="kc">False</span><span class="p">)</span>
+<span class="k">for</span> <span class="n">k</span><span class="p">,</span> 
<span class="n">v</span> <span class="ow">in</span> <span 
class="n">params</span><span class="o">.</span><span 
class="n">iteritems</span><span class="p">():</span>
+    <span class="n">sn2</span><span class="o">.</span><span 
class="n">write</span><span class="p">(</span><span class="n">k</span><span 
class="p">,</span> <span class="n">v</span><span class="p">)</span>
+</pre></div>
+</div>
+<dl class="class">
+<dt id="singa.snapshot.Snapshot">
+<em class="property">class </em><code 
class="descclassname">singa.snapshot.</code><code 
class="descname">Snapshot</code><span class="sig-paren">(</span><em>f</em>, 
<em>mode</em>, <em>buffer_size=10</em><span class="sig-paren">)</span><a 
class="headerlink" href="#singa.snapshot.Snapshot" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Class and member functions for singa::Snapshot.</p>
+<dl class="method">
+<dt id="singa.snapshot.Snapshot.read">
+<code class="descname">read</code><span class="sig-paren">(</span><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.snapshot.Snapshot.read" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Call read method to load all (param_name, param_val)</p>
+<dl class="field-list simple">
+<dt class="field-odd">Returns</dt>
+<dd class="field-odd"><p>a dict of (parameter name, parameter Tensor)</p>
+</dd>
+</dl>
+</dd></dl>
+
+<dl class="method">
+<dt id="singa.snapshot.Snapshot.write">
+<code class="descname">write</code><span 
class="sig-paren">(</span><em>param_name</em>, <em>param_val</em><span 
class="sig-paren">)</span><a class="headerlink" 
href="#singa.snapshot.Snapshot.write" title="Permalink to this 
definition">¶</a></dt>
+<dd><p>Call Write method to write a parameter</p>
+<dl class="field-list simple">
+<dt class="field-odd">Parameters</dt>
+<dd class="field-odd"><ul class="simple">
+<li><p><strong>param_name</strong> (<em>string</em>) – name of the 
parameter</p></li>
+<li><p><strong>param_val</strong> (<a class="reference internal" 
href="tensor.html#singa.tensor.Tensor" 
title="singa.tensor.Tensor"><em>Tensor</em></a>) – value tensor of the 
parameter</p></li>
+</ul>
+</dd>
+</dl>
+</dd></dl>
+
+</dd></dl>
+
 </div>
 
 

Modified: incubator/singa/site/trunk/docs/software_stack.html
URL: 
http://svn.apache.org/viewvc/incubator/singa/site/trunk/docs/software_stack.html?rev=1862313&r1=1862312&r2=1862313&view=diff
==============================================================================
--- incubator/singa/site/trunk/docs/software_stack.html (original)
+++ incubator/singa/site/trunk/docs/software_stack.html Sat Jun 29 14:42:24 2019
@@ -36,7 +36,7 @@
   <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
     <link rel="index" title="Index" href="../genindex.html" />
     <link rel="search" title="Search" href="../search.html" />
-    <link rel="next" title="Device" href="device.html" />
+    <link rel="next" title="Benchmark for Distributed training" 
href="benchmark.html" />
     <link rel="prev" title="Installation" href="installation.html" />
     <link href="../_static/style.css" rel="stylesheet" type="text/css">
     <!--link href="../_static/fontawesome-all.min.css" rel="stylesheet" 
type="text/css"-->
@@ -109,6 +109,7 @@
 <li class="toctree-l3"><a class="reference internal" href="#io">IO</a></li>
 </ul>
 </li>
+<li class="toctree-l2"><a class="reference internal" 
href="benchmark.html">Benchmark for Distributed training</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="device.html">Device</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="tensor.html">Tensor</a></li>
 <li class="toctree-l2"><a class="reference internal" 
href="layer.html">Layer</a></li>
@@ -249,19 +250,19 @@ tensor operations on its execution units
 <p>Depending on the hardware and the programming language, SINGA have 
implemented
 the following specific device classes:</p>
 <ul class="simple">
-<li><strong>CudaGPU</strong> represents an Nvidia GPU card. The execution 
units are the CUDA streams.</li>
-<li><strong>CppCPU</strong> represents a normal CPU. The execution units are 
the CPU threads.</li>
-<li><strong>OpenclGPU</strong> represents normal GPU card from both Nvidia and 
AMD.
+<li><p><strong>CudaGPU</strong> represents an Nvidia GPU card. The execution 
units are the CUDA streams.</p></li>
+<li><p><strong>CppCPU</strong> represents a normal CPU. The execution units 
are the CPU threads.</p></li>
+<li><p><strong>OpenclGPU</strong> represents normal GPU card from both Nvidia 
and AMD.
 The execution units are the CommandQueues. Given that OpenCL is compatible with
 many hardware devices, e.g. FPGA and ARM, the OpenclGPU has the potential to be
-extended for other devices.</li>
+extended for other devices.</p></li>
 </ul>
 <p>Different types of devices use different programming languages to write the 
kernel
 functions for tensor operations,</p>
 <ul class="simple">
-<li>CppMath (tensor_math_cpp.h) implements the tensor operations using Cpp for 
CppCPU</li>
-<li>CudaMath (tensor_math_cuda.h) implements the tensor operations using CUDA 
for CudaGPU</li>
-<li>OpenclMath (tensor_math_opencl.h) implements the tensor operations using 
OpenCL for OpenclGPU</li>
+<li><p>CppMath (tensor_math_cpp.h) implements the tensor operations using Cpp 
for CppCPU</p></li>
+<li><p>CudaMath (tensor_math_cuda.h) implements the tensor operations using 
CUDA for CudaGPU</p></li>
+<li><p>OpenclMath (tensor_math_opencl.h) implements the tensor operations 
using OpenCL for OpenclGPU</p></li>
 </ul>
 <p>In addition, different types of data, such as float32 and float16, could be 
supported by adding
 the corresponding tensor functions.</p>
@@ -285,31 +286,31 @@ would be described in the <a class="refe
 <p>On top of the Tensor and Device abstractions, SINGA provides some higher 
level
 classes for machine learning modules.</p>
 <ul class="simple">
-<li><a class="reference external" href="layer.html">Layer</a> and its 
subclasses are specific for neural networks. Every layer provides
+<li><p><a class="reference external" href="layer.html">Layer</a> and its 
subclasses are specific for neural networks. Every layer provides
 functions for forward propagating features and backward propagating gradients 
w.r.t the training loss functions.
 They wraps the complex layer operations so that users can easily create neural 
nets
-by connecting a set of layers.</li>
-<li><a class="reference external" href="initializer.html">Initializer</a> and 
its subclasses provide variant methods of initializing
-model parameters (stored in Tensor instances), following Uniform, Gaussian, 
etc.</li>
-<li><a class="reference external" href="loss.html">Loss</a> and its subclasses 
defines the training objective loss functions.
+by connecting a set of layers.</p></li>
+<li><p><a class="reference external" href="initializer.html">Initializer</a> 
and its subclasses provide variant methods of initializing
+model parameters (stored in Tensor instances), following Uniform, Gaussian, 
etc.</p></li>
+<li><p><a class="reference external" href="loss.html">Loss</a> and its 
subclasses defines the training objective loss functions.
 Both functions of computing the loss values and computing the gradient of the 
prediction w.r.t the
-objective loss are implemented. Example loss functions include squared error 
and cross entropy.</li>
-<li><a class="reference external" href="metric.html">Metric</a> and its 
subclasses provide the function to measure the
-performance of the model, e.g., the accuracy.</li>
-<li><a class="reference external" href="optimizer.html">Optimizer</a> and its 
subclasses implement the methods for updating
-model parameter values using parameter gradients, including SGD, AdaGrad, 
RMSProp etc.</li>
+objective loss are implemented. Example loss functions include squared error 
and cross entropy.</p></li>
+<li><p><a class="reference external" href="metric.html">Metric</a> and its 
subclasses provide the function to measure the
+performance of the model, e.g., the accuracy.</p></li>
+<li><p><a class="reference external" href="optimizer.html">Optimizer</a> and 
its subclasses implement the methods for updating
+model parameter values using parameter gradients, including SGD, AdaGrad, 
RMSProp etc.</p></li>
 </ul>
 </div>
 <div class="section" id="io">
 <h2>IO<a class="headerlink" href="#io" title="Permalink to this 
headline">¶</a></h2>
 <p>The IO module consists of classes for data loading, data preprocessing and 
message passing.</p>
 <ul class="simple">
-<li>Reader and its subclasses load string records from disk files</li>
-<li>Writer and its subclasses write string records to disk files</li>
-<li>Encoder and its subclasses encode Tensor instances into string records</li>
-<li>Decoder and its subclasses decodes string records into Tensor 
instances</li>
-<li>Endpoint represents a communication endpoint which provides functions for 
passing messages to each other.</li>
-<li>Message represents communication messages between Endpoint instances. It 
carries both meta data and payload.</li>
+<li><p>Reader and its subclasses load string records from disk files</p></li>
+<li><p>Writer and its subclasses write string records to disk files</p></li>
+<li><p>Encoder and its subclasses encode Tensor instances into string 
records</p></li>
+<li><p>Decoder and its subclasses decodes string records into Tensor 
instances</p></li>
+<li><p>Endpoint represents a communication endpoint which provides functions 
for passing messages to each other.</p></li>
+<li><p>Message represents communication messages between Endpoint instances. 
It carries both meta data and payload.</p></li>
 </ul>
 </div>
 </div>
@@ -322,7 +323,7 @@ model parameter values using parameter g
   
     <div class="rst-footer-buttons" role="navigation" aria-label="footer 
navigation">
       
-        <a href="device.html" class="btn btn-neutral float-right" 
title="Device" accesskey="n" rel="next">Next <span class="fa 
fa-arrow-circle-right"></span></a>
+        <a href="benchmark.html" class="btn btn-neutral float-right" 
title="Benchmark for Distributed training" accesskey="n" rel="next">Next <span 
class="fa fa-arrow-circle-right"></span></a>
       
       
         <a href="installation.html" class="btn btn-neutral float-left" 
title="Installation" accesskey="p" rel="prev"><span class="fa 
fa-arrow-circle-left"></span> Previous</a>


Reply via email to