This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-singa.git


The following commit(s) were added to refs/heads/master by this push:
     new a43f293  SINGA-474 sum operator
     new c26e769  Merge pull request #505 from ShichengChen/sum
a43f293 is described below

commit a43f293d8b5f064e3e087cf1b504f052f0223777
Author: ShichengChen <[email protected]>
AuthorDate: Wed Aug 14 22:02:59 2019 +0800

    SINGA-474 sum operator
---
 python/singa/autograd.py      | 20 ++++++++++++++++++++
 test/python/test_operation.py | 44 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 64 insertions(+)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 15d1318..9d4a6cc 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -516,6 +516,26 @@ class SoftMax(Operation):
 def softmax(x, axis=0):
     return SoftMax(axis)(x)[0]
 
+class Sum(Operation):
+    def __init__(self):
+        super(Sum, self).__init__()
+
+    def forward(self, *l):
+        if training:
+            self.l = len(l)
+        assert(len(l)>0);
+        x = singa.Tensor(list(l[0].shape()),l[0].device())
+        x.SetFloatValue(0.0)
+        for i in range(len(l)):
+            x+=l[i]
+        return x
+
+    def backward(self, dy):
+        return [dy]*self.l
+
+
+def sum(*l):
+    return Sum()(*l)[0]
 
 class CrossEntropy(Operation):
     def __init__(self):
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index cfb0c4c..a8c6697 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -97,6 +97,50 @@ class TestPythonOperation(unittest.TestCase):
         y_without_bias = conv_without_bias_0(gpu_input_tensor)
         self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
 
+    def test_sum_cpu(self):
+        x = 
np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32)
+        x1 = 
np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32)
+        y = x+x1
+        dy = np.ones((3, 2), dtype = np.float32)
+        grad0=dy
+        grad1=dy
+        x = tensor.from_numpy(x)
+        x1 = tensor.from_numpy(x1)
+        dy = tensor.from_numpy(dy)
+        x.to_device(cpu_dev)
+        x1.to_device(cpu_dev)
+        dy.to_device(cpu_dev)
+
+        result = autograd.sum(x,x1)
+        dx0,dx1 = result.creator.backward(dy.data)
+
+
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, 
decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)),
 grad0, decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)),
 grad1, decimal=5)
+
+    def test_sum_gpu(self):
+        x = 
np.array([0.1,-1.0,0.4,4.0,-0.9,9.0]).reshape(3,2).astype(np.float32)
+        x1 = 
np.array([0.1,1.0,0.4,4.0,0.9,9.0]).reshape(3,2).astype(np.float32)
+        y = x+x1
+        dy = np.ones((3, 2), dtype = np.float32)
+        grad0=dy
+        grad1=dy
+        x = tensor.from_numpy(x)
+        x1 = tensor.from_numpy(x1)
+        dy = tensor.from_numpy(dy)
+        x.to_device(gpu_dev)
+        x1.to_device(gpu_dev)
+        dy.to_device(gpu_dev)
+
+        result = autograd.sum(x,x1)
+        dx0,dx1 = result.creator.backward(dy.data)
+
+
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, 
decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx0)),
 grad0, decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)),
 grad1, decimal=5)
+
     def test_conv2d_cpu(self):
         # (in_channels, out_channels, kernel_size)
         conv_1 = autograd.Conv2d(3, 1, 2)

Reply via email to