You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2020/01/18 05:14:38 UTC

[singa] branch master updated: fix avoid CudnnXXHandle not defined in CPU

This is an automated email from the ASF dual-hosted git repository.

wangwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/singa.git


The following commit(s) were added to refs/heads/master by this push:
     new 970f266  fix avoid CudnnXXHandle not defined in CPU
     new 4a19465  Merge pull request #574 from dcslin/hf-cudnn-handle-not-in-cpu
970f266 is described below

commit 970f2666e8f36122b4b58c745c91349f3bda8a65
Author: dcslin <13...@users.noreply.github.com>
AuthorDate: Sat Jan 18 03:41:02 2020 +0000

    fix avoid CudnnXXHandle not defined in CPU
---
 python/singa/autograd.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/python/singa/autograd.py b/python/singa/autograd.py
index 37cb431..8e245c6 100644
--- a/python/singa/autograd.py
+++ b/python/singa/autograd.py
@@ -1199,7 +1199,7 @@ class _Conv2d(Operation):
             b = CTensor((self.handle.num_filters,), x.device())
             b.SetFloatValue(0.0)
 
-        if isinstance(self.handle, singa.CudnnConvHandle):
+        if singa.USE_CUDA:
             return singa.GpuConvForward(x, W, b, self.handle)
         else:
             return singa.CpuConvForward(x, W, b, self.handle)
@@ -1209,7 +1209,7 @@ class _Conv2d(Operation):
             self, "inputs"
         ), "Please set training as True before do BP. "
         
-        if isinstance(self.handle, singa.CudnnConvHandle):
+        if singa.USE_CUDA:
             dx = singa.GpuConvBackwardx(
                 dy, self.inputs[1], self.inputs[0], self.handle
             )
@@ -1572,7 +1572,7 @@ class _Pooling2d(Operation):
         self.handle = handle
 
     def forward(self, x):
-        if isinstance(self.handle, singa.CudnnPoolingHandle):
+        if singa.USE_CUDA:
             y = singa.GpuPoolingForward(self.handle, x)
         else:
             y = singa.CpuPoolingForward(self.handle, x)
@@ -1583,7 +1583,7 @@ class _Pooling2d(Operation):
         return y
 
     def backward(self, dy):
-        if isinstance(self.handle, singa.CudnnPoolingHandle):
+        if singa.USE_CUDA:
             dx = singa.GpuPoolingBackward(
                 self.handle, dy, self.cache[0], self.cache[1]
             )