You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2019/08/15 01:35:51 UTC

[incubator-singa] branch master updated: SINGA-474 greater operator

This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-singa.git


The following commit(s) were added to refs/heads/master by this push:
     new 153c623  SINGA-474 greater operator
     new 76ca649  Merge pull request #481 from ShichengChen/greater
153c623 is described below

commit 153c6235e1f289794794caded5604140faba13d1
Author: ShichengChen <c3...@gmail.com>
AuthorDate: Wed Aug 14 21:34:18 2019 +0800

    SINGA-474 greater operator
---
 python/singa/autograd.py      | 29 +++++++++++++++++++++++++++++
 test/python/test_operation.py | 24 ++++++++++++++++++++++++
 2 files changed, 53 insertions(+)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 15d1318..cc581fb 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -393,7 +393,36 @@ class Matmul(Operation):
 def matmul(x, w):
     return Matmul()(x, w)[0]
 
+class Greater(Operation):
+    def __init__(self):
+        super(Greater, self).__init__()
+
+    def forward(self, x,y):
+        """Do forward propgation.
+        Store the [x>y] if requires gradient.
+        Args:
+            x (CTensor): matrix
+            y (CTensor): matrix
+        Returns:
+            a CTensor for the result
+        """
+        cur = singa.GTFloat(singa.__sub__(x,y),0)
+        if training:
+            self.cache = cur
+        return cur
+
+    def backward(self, dy):
+        """
+        Args:
+            dy (CTensor): data for the dL / dy, L is the loss
+        Returns:
+            a tuple for (dx0, dx1)
+        """
+        assert 0,('no backward function for greater')
+        return None
 
+def greater(x,y):
+    return Greater()(x,y)[0]
 class AddBias(Operation):
     """
     Add Bias to each row / column of the Tensor, depending on the axis arg.
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index cfb0c4c..1aaefea 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -74,6 +74,30 @@ class TestPythonOperation(unittest.TestCase):
                                               _tuple_to_string(expect))
                          )
 
+
+    def test_Greater_cpu(self):
+        x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32)
+        x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32)
+        y = np.greater(x0,x1)
+        x0 = tensor.from_numpy(x0)
+        x1 = tensor.from_numpy(x1)
+        x0.to_device(cpu_dev)
+        x1.to_device(cpu_dev)
+
+        result = autograd.greater(x0,x1)
+
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5)
+    def test_Greater_gpu(self):
+        x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5, 0.9]).reshape(3, 2).astype(np.float32)
+        x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3, 2).astype(np.float32)
+        y = np.greater(x0,x1)
+        x0 = tensor.from_numpy(x0)
+        x1 = tensor.from_numpy(x1)
+        x0.to_device(gpu_dev)
+        x1.to_device(gpu_dev)
+        result = autograd.greater(x0,x1)
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5)
+
     def test_conv2d_gpu(self):
         # (in_channels, out_channels, kernel_size)
         conv_0 = autograd.Conv2d(3, 1, 2)