This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-singa.git


The following commit(s) were added to refs/heads/master by this push:
     new a018b2d  SINGA-474 fix origin conflict
     new ed8ff36  Merge pull request #516 from ShichengChen/operatorConflict
a018b2d is described below

commit a018b2d84f4407bddc10e486f86cfe611c445aee
Author: ShichengChen <[email protected]>
AuthorDate: Thu Aug 15 11:55:09 2019 +0800

    SINGA-474 fix origin conflict
---
 python/singa/autograd.py      |  5 +----
 test/python/test_operation.py | 28 ++++++++++++++++++++++++++--
 2 files changed, 27 insertions(+), 6 deletions(-)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 64f7342..a480aab 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -460,8 +460,7 @@ def clip(x,min,max):
 class Identity(Operation):
     def __init__(self):
         super(Identity, self).__init__()
-            x(CTensor): equal to input tensor
-        """
+    def forward(self,x):
         return x
 
     def backward(self, dy):
@@ -2186,13 +2185,11 @@ class Log(Operation):
         if training:
             self.input = x
         return singa.Log(x)
-​
     def backward(self, dy):
         dx = singa.PowFloat(self.input,-1)
         dx = singa.__mul__(dy, dx)
         return dx
 
-​
 def log(x):
     return Log()(x)[0]
 
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index 7f3eebc..8111e11 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -459,7 +459,7 @@ class TestPythonOperation(unittest.TestCase):
 
         np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT)
         self.check_shape(dx.shape(), (3, 2))
-       
+
     def test_Cos_cpu(self):
         X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 
2).astype(np.float32)
         XT = np.cos(X)
@@ -1221,12 +1221,20 @@ class TestPythonOperation(unittest.TestCase):
         dy.to_device(gpu_dev)
 
         result = autograd.transpose(x,(1,2,0))
+        dx = result.creator.backward(dy.data)
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, 
decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)),
 grad, decimal=5)
 
 
     def test_Sign_cpu(self):
         X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 
2).astype(np.float32)
         XT = np.sign(X)
+        DY = np.ones((3, 2), dtype = np.float32)
 
+        x = tensor.from_numpy(X)
+        dy = tensor.from_numpy(DY)
+        x.to_device(gpu_dev)
+        dy.to_device(gpu_dev)
         result = autograd.sign(x)
         dx = result.creator.backward(dy.data)
         DX = np.multiply(DY,0)
@@ -1238,16 +1246,27 @@ class TestPythonOperation(unittest.TestCase):
     def test_Sign_gpu(self):
         X = np.array([0.8, -1.2, 3.3, -3.6, -0.5, 0.5]).reshape(3, 
2).astype(np.float32)
         XT = np.sign(X)
-        
+        DY = np.ones((3, 2), dtype = np.float32)
+
+        x = tensor.from_numpy(X)
+        dy = tensor.from_numpy(DY)
+        x.to_device(gpu_dev)
+        dy.to_device(gpu_dev)
         result = autograd.sign(x)
         dx = result.creator.backward(dy.data)
         DX = np.multiply(DY,0)
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, 
decimal=5)
+        
np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)),
 DX, decimal=5)
 
     def test_Log_cpu(self):
         X = np.array([0.1,1.0,0.4,1.4,0.9,2.0]).reshape(3,2).astype(np.float32)
         XT = np.log(X)
         DY = np.ones((3, 2), dtype = np.float32)
 
+        x = tensor.from_numpy(X)
+        dy = tensor.from_numpy(DY)
+        x.to_device(gpu_dev)
+        dy.to_device(gpu_dev)
         result = autograd.log(x)
         dx = result.creator.backward(dy.data)
         #dx = 1/x
@@ -1260,7 +1279,12 @@ class TestPythonOperation(unittest.TestCase):
     def test_Log_gpu(self):
         X = np.array([0.1,1.0,0.4,1.4,0.9,2.0]).reshape(3,2).astype(np.float32)
         XT = np.log(X)
+        DY = np.ones((3, 2), dtype = np.float32)
 
+        x = tensor.from_numpy(X)
+        dy = tensor.from_numpy(DY)
+        x.to_device(gpu_dev)
+        dy.to_device(gpu_dev)
         result = autograd.log(x)
         dx = result.creator.backward(dy.data)
         #dx = 1/x

Reply via email to