You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2019/08/16 14:39:01 UTC

[incubator-singa] branch master updated: SINGA-474 hardSigmoid operator

This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-singa.git


The following commit(s) were added to refs/heads/master by this push:
     new 034bc19  SINGA-474 hardSigmoid operator
     new 8187aeb  Merge pull request #519 from ShichengChen/operator4
034bc19 is described below

commit 034bc19c29d372c96e84e53e404c0a06f364d71d
Author: ShichengChen <c3...@gmail.com>
AuthorDate: Thu Aug 15 16:38:30 2019 +0800

    SINGA-474 hardSigmoid operator
---
 python/singa/autograd.py      | 35 +++++++++++++++++++++++++++++++++++
 test/python/test_operation.py | 22 ++++++++++++++++++++++
 2 files changed, 57 insertions(+)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 04189f6..2285f7e 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -2221,6 +2221,41 @@ def log(x):
     return Log()(x)[0]
 
 
+class HardSigmoid(Operation):
+    def __init__(self,alpha=0.2,gamma=0.5):
+        super(HardSigmoid, self).__init__()
+        self.alpha=alpha
+        self.gamma=gamma
+
+    def forward(self, x):
+        """Do forward propgation.
+        #y = max(0, min(1, alpha * x + gamma))
+        Args:
+            x (CTensor): matrix
+        Returns:
+            a CTensor for the result
+        """
+        x = singa.AddFloat(singa.MultFloat(x,self.alpha),self.gamma)
+        if training:
+            self.cache = x
+
+        x = singa.ReLU(x)
+        mask1 = singa.LTFloat(x, 1.0)
+        mask2 = singa.GEFloat(x, 1.0)
+
+        ans = singa.__add__(singa.__mul__(x, mask1),mask2)
+        return singa.ReLU(ans)
+
+    def backward(self, dy):
+        mask0 = singa.GTFloat(self.cache, 0.0)
+        mask1 = singa.LTFloat(self.cache, 1.0)
+        mask = singa.__mul__(mask0,mask1)
+        return singa.__mul__(singa.MultFloat(mask, self.alpha),dy)
+
+def hardsigmoid(x,alpha=0.2,gamma=0.5):
+    return HardSigmoid(alpha,gamma)(x)[0]
+
+
 class Squeeze(Operation):
     def __init__(self,axis=[]):
         super(Squeeze, self).__init__()
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index 5cb0021..4eed1f5 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -1587,5 +1587,27 @@ class TestPythonOperation(unittest.TestCase):
         np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx1)), DX1, decimal=5)
 
 
+def test_HardSigmoid(self):
+    def test_helper(gpu=False):
+        x = np.random.randn(3, 2)
+        #y = max(0, min(1, alpha * x + gamma))
+        a=0.2
+        g=0.5
+        y = np.clip(x * 0.2 + 0.5, 0, 1)
+        grad=(0<(np.clip(x * 0.2 + 0.5, 0, 1)) * (np.clip(x * 0.2 + 0.5, 0, 1)<1))*0.2
+        x = tensor.from_numpy(x)
+        if(gpu):
+            x.to_device(gpu_dev)
+        result = autograd.hardsigmoid(x,a,g)
+        dy = tensor.from_numpy(np.random.randn((3,2)).astype(np.float32))
+        dx = result.creator.backward(dy.data)
+        np.testing.assert_array_almost_equal(tensor.to_numpy(result), y, decimal=5)
+        np.testing.assert_array_almost_equal(tensor.to_numpy(tensor.from_raw_tensor(dx)), grad, decimal=5)
+    test_helper(False)
+    test_helper(True)
+
+
+
+
 if __name__ == '__main__':
     unittest.main()