You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2019/06/27 08:42:32 UTC
[incubator-singa] branch master updated: fixed bugs for python test
operation abs, exp, leakyrelu ref. SINGA-463
This is an automated email from the ASF dual-hosted git repository.
wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-singa.git
The following commit(s) were added to refs/heads/master by this push:
new 73478fb fixed bugs for python test operation abs, exp, leakyrelu ref. SINGA-463
new c818afa Merge pull request #471 from dcslin/SINGA-463
73478fb is described below
commit 73478fb9cad3290128790a8fa9b39453e301c54e
Author: slin004 <13...@users.noreply.github.com>
AuthorDate: Thu Jun 27 15:05:45 2019 +0800
fixed bugs for python test operation abs, exp, leakyrelu ref. SINGA-463
---
python/singa/autograd.py | 5 ++---
test/python/test_operation.py | 29 +++++++++++++----------------
2 files changed, 15 insertions(+), 19 deletions(-)
diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index c9b43ce..e6453a2 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -26,7 +26,6 @@ import math
from .tensor import Tensor
from . import singa_wrap as singa
-# from .tensor import einsum
CTensor = singa.Tensor
training = False
@@ -1605,8 +1604,8 @@ class LeakyRelu(Operation):
# TODO(wangwei) check the correctness
dx1 = singa.GTFloat(self.input, 0.0)
dx2 = singa.LTFloat(self.input, 0.0)
- dx2 = singa.MultFloat(x1, self.a)
- dx = singa.__add__(x1, x2)
+ dx2 = singa.MultFloat(dx2, self.a)
+ dx = singa.__add__(dx1, dx2)
return singa.__mul__(dy, dx)
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index 4d49287..7205ed0 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -278,7 +278,7 @@ class TestPythonOperation(unittest.TestCase):
loss= autograd.mse_loss(x,t)
dx=loss.creator.backward()[0]
- loss_np=tensor.to_numpy(loss)
+ loss_np=tensor.to_numpy(loss)[0]
self.assertAlmostEqual(loss_np, 0.0366666, places=4)
self.check_shape(dx.shape(), (3, 2))
@@ -289,25 +289,23 @@ class TestPythonOperation(unittest.TestCase):
x.to_device(gpu_dev)
result=autograd.abs(x)
- Err=XT-result
- dx=result.creator.backward()[0]
+ dx=result.creator.backward(x.data)
- for ii in Err.flatten():
- self.assertAlmostEquals(ii,0., places=3)
+ np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT)
self.check_shape(dx.shape(), (3, 2))
def test_Exp(self):
X=np.array([0.8,-1.2,3.3,-3.6,-0.5,0.5]).reshape(3,2).astype(np.float32)
- XT=np.array([2.2255409,0.22313017,27.112638,0.02732372,0.60653067,1.6487212]).reshape(3,2).astype(np.float32)
+ XT=np.exp(X)
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
result=autograd.exp(x)
- Err=XT-result
- dx=result.creator.backward()[0]
+ print("exp")
+ print(result)
+ dx=result.creator.backward(x.data)
- for ii in Err.flatten():
- self.assertAlmostEquals(ii,0., places=3)
+ np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT, decimal=5)
self.check_shape(dx.shape(), (3, 2))
def test_LeakyRelu(self):
@@ -316,14 +314,13 @@ class TestPythonOperation(unittest.TestCase):
x=tensor.from_numpy(X)
x.to_device(gpu_dev)
- result=autograd.LeakyRelu(x)
- Err=XT-result
- dx=result.creator.backward()[0]
+ result=autograd.leakyrelu(x)
- for ii in Err.flatten():
- self.assertAlmostEquals(ii,0., places=3)
+ dx=result.creator.backward(x.data)
+
+ np.testing.assert_array_almost_equal(tensor.to_numpy(result), XT)
self.check_shape(dx.shape(), (3, 2))
-
+
if __name__ == '__main__':
unittest.main()