You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2020/07/08 03:04:28 UTC

[singa] branch dev updated: fix test operation Tensor class non hashable erorr

This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/singa.git


The following commit(s) were added to refs/heads/dev by this push:
     new 6b9c573  fix test operation Tensor class non hashable erorr
     new b58384c  Merge pull request #753 from dcslin/hf-test-operaitons-hashable
6b9c573 is described below

commit 6b9c573a732d5a77ac7e90af8cf9ff9d02612c9f
Author: dcslin <13...@users.noreply.github.com>
AuthorDate: Mon Jun 29 07:27:11 2020 +0000

    fix test operation Tensor class non hashable erorr
---
 python/singa/autograd.py      | 6 +++++-
 test/python/test_operation.py | 4 ++--
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 3ba09c9..f391f24 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -117,7 +117,11 @@ def gradients(y, dy=None):
     """
     grads = {}  # mapping: x->dx if x.stores_grad
     for p, dp in backward(y, dy):
-        grads[p] = dp
+        # TODO: this fn is only helper for test case for now.
+        #   1. could implement __hash__ or
+        #   2. make grad as a attribute of tensor class
+        #      p.grad = dp
+        grads[id(p)] = dp
     return grads
 
 
diff --git a/test/python/test_operation.py b/test/python/test_operation.py
index f7f0b0b..83d9edf 100755
--- a/test/python/test_operation.py
+++ b/test/python/test_operation.py
@@ -451,7 +451,7 @@ class TestPythonOperation(unittest.TestCase):
 
         params = rnn.get_params()
         for key, param in params.items():
-            auto_grad = tensor.to_numpy(auto_grads[param])
+            auto_grad = tensor.to_numpy(auto_grads[id(param)])
 
             self.gradients_check(valinna_rnn_forward, param, auto_grad, dev=dev)
 
@@ -483,7 +483,7 @@ class TestPythonOperation(unittest.TestCase):
 
         params = rnn.get_params()
         for key, param in params.items():
-            auto_grad = tensor.to_numpy(auto_grads[param])
+            auto_grad = tensor.to_numpy(auto_grads[id(param)])
 
             self.gradients_check(lstm_forward, param, auto_grad, dev=dev)