SINGA-348 Support autograd MLP Example

1. update python singa Tensor to include more fields for autograd.
2. add the Operation class which provides the forward and backward
function for each operation like matmul, relu, softmax.
3. add functions to do dependency check and backward propagation
(automatically) using
the Operations created from forward (manually).
4. add MLP.py to test test the autograd functions using a MLP example.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/e09dff45
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/e09dff45
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/e09dff45

Branch: refs/heads/master
Commit: e09dff45ba21c4d9b042bc4cfe04aa7eb50fd4e9
Parents: 163452e
Author: xuewanqi <36396136+xuewa...@users.noreply.github.com>
Authored: Mon Mar 19 12:29:14 2018 +0800
Committer: Wang Wei <dcs...@nus.edu.sg>
Committed: Thu Apr 12 16:59:47 2018 +0800

----------------------------------------------------------------------
 examples/MLP.py | 108 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 108 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/e09dff45/examples/MLP.py
----------------------------------------------------------------------
diff --git a/examples/MLP.py b/examples/MLP.py
new file mode 100644
index 0000000..54ae1ad
--- /dev/null
+++ b/examples/MLP.py
@@ -0,0 +1,108 @@
+
+from singa import tensor
+from singa import engine
+from singa import singa_wrap as singa
+import numpy as np
+
+def print_singa_tensor(x):
+    np_array = x.GetFloatValue(int(x.Size()))
+    print(np_array.reshape(x.shape()))
+    return
+
+if __name__ =='__main__':
+
+    #prepare numpy training data
+    # generate the boundary
+    f = lambda x: (5 * x + 1)
+    bd_x = np.linspace(-1., 1, 200)
+    bd_y = f(bd_x)
+    # generate the training data
+    x = np.random.uniform(-1, 1, 400)
+    y = f(x) + 2 * np.random.randn(len(x))
+    # convert training data to 2d space
+    label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)])
+    data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)
+
+    def to_categorical(y, num_classes=None):
+        """Converts a class vector (integers) to binary class matrix.
+
+        E.g. for use with categorical_crossentropy.
+
+        # Arguments
+            y: class vector to be converted into a matrix
+                (integers from 0 to num_classes).
+            num_classes: total number of classes.
+
+        # Returns
+            A binary matrix representation of the input.
+        """
+        y = np.array(y, dtype='int')
+        input_shape = y.shape
+        if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
+            input_shape = tuple(input_shape[:-1])
+        y = y.ravel()
+        if not num_classes:
+            num_classes = np.max(y) + 1
+        n = y.shape[0]
+        categorical = np.zeros((n, num_classes))
+        categorical[np.arange(n), y] = 1
+        output_shape = input_shape + (num_classes,)
+        categorical = np.reshape(categorical, output_shape)
+        return categorical
+
+    label=to_categorical(label,2).astype(np.float32)
+    print 'train_data_shape:',data.shape,'train_label_shape:',label.shape
+
+    # send numpy data to singa_tensor
+    tr_data=singa.Tensor((400,2))
+    tr_data.CopyFloatDataFromHostPtr(data.flatten())
+
+    tr_label=singa.Tensor((400,2))
+    tr_label.CopyFloatDataFromHostPtr(label.flatten())
+
+    w_0=singa.Tensor((2,3))
+    singa.Gaussian(float(0), float(0.1), w_0)
+    b_0=singa.Tensor((1,3))
+    b_0.SetFloatValue(float(0))
+
+    w_1=singa.Tensor((3,2))
+    singa.Gaussian(float(0), float(0.1), w_1)
+    b_1=singa.Tensor((1,2))
+    b_1.SetFloatValue(float(0))
+
+
+    # initialize Tensor using singa_tensor
+    inputs=tensor.Tensor(data=tr_data,requires_grad=False,grad_outlet=False)
+    target=tensor.Tensor(data=tr_label,requires_grad=False,grad_outlet=False)
+
+    weight_0=tensor.Tensor(data=w_0,requires_grad=True,grad_outlet=True)
+    bias_0=tensor.Tensor(data=b_0,requires_grad=True,grad_outlet=True)
+
+    weight_1=tensor.Tensor(data=w_1,requires_grad=True,grad_outlet=True)
+    bias_1=tensor.Tensor(data=b_1,requires_grad=True,grad_outlet=True)
+
+    def update(lr,param,grad): #param:Tensor grad:singa_tensor
+        grad *= float(lr)
+        assert param.singa_tensor.shape() == grad.shape()
+        param.singa_tensor = singa.__sub__(param.singa_tensor,grad)
+        return
+
+    lr=0.05
+    for i in range(1001):
+        outputs=tensor.dot(inputs,weight_0)
+        outputs=tensor.add_bias(bias_0,outputs)
+        outputs=tensor.relu(outputs)
+        outputs = tensor.dot(outputs, weight_1)
+        outputs = tensor.add_bias(bias_1, outputs)
+        outputs=tensor.softmax(outputs)
+
+        loss=tensor.cross_entropy(outputs,target)
+
+        grads=float(1)
+        in_grads = engine.gradients(loss, grads)
+
+        for param in in_grads:
+            update(lr,param,in_grads[param])
+
+        if (i % 100 == 0):
+            print 'training loss = ' ,float(tensor.To_numpy(loss.singa_tensor))
\ No newline at end of file

Reply via email to