You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@singa.apache.org by wa...@apache.org on 2016/12/02 05:13:17 UTC

[14/17] incubator-singa git commit: SINGA-268 Add IPython notebooks to the documentation

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4d7a8eeb/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/python/singa/layer.py b/python/singa/layer.py
index f0024c4..5d087af 100644
--- a/python/singa/layer.py
+++ b/python/singa/layer.py
@@ -21,7 +21,6 @@ Example usages::
     from singa import layer
     from singa import tensor
     from singa import device
-    from singa.model_pb2 import kTrain
 
     layer.engine = 'cudnn'  # to use cudnn layers
     dev = device.create_cuda_gpu()
@@ -31,7 +30,7 @@ Example usages::
     conv.to_device(dev)  # move the layer data onto a CudaGPU device
     x = tensor.Tensor((3, 32, 32), dev)
     x.uniform(-1, 1)
-    y = conv.foward(kTrain, x)
+    y = conv.foward(True, x)
 
     dy = tensor.Tensor()
     dy.reset_like(y)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4d7a8eeb/python/singa/tensor.py
----------------------------------------------------------------------
diff --git a/python/singa/tensor.py b/python/singa/tensor.py
index 6e59223..57ce563 100644
--- a/python/singa/tensor.py
+++ b/python/singa/tensor.py
@@ -17,31 +17,31 @@
 # =============================================================================
 """
 Example usage::
-
     import numpy as np
     from singa import tensor
     from singa import device
 
-    # create a tensor with shape (2,3), default CppCPU device and float32
-    x = tensor.Tensor((2,3))
+# create a tensor with shape (2,3), default CppCPU device and float32
+    x = tensor.Tensor((2, 3))
     x.set_value(0.4)
 
-    # create a tensor from a numpy array
-    y = tensor.from_numpy((3,3), dtype=np.float32)
-    y.uniform(-1, 1)
+# create a tensor from a numpy array
+    npy = np.zeros((3, 3), dtype=np.float32)
+    y = tensor.from_numpy(npy)
+
+    y.uniform(-1, 1)  # sample values from the uniform distribution
 
-    z = mult(x, y)  # gemm -> z of shape (2, 3)
+    z = tensor.mult(x, y)  # gemm -> z of shape (2, 3)
 
-    x += z # element-wise addition
+    x += z  # element-wise addition
 
-    dev = device.create_cuda_gpu()
+    dev = device.get_default_device()
     x.to_device(dev)  # move the data to a gpu device
 
-    r = relu(x)
+    r = tensor.relu(x)
 
     r.to_host()  # move the data back to host cpu
-    s = r.to_numpy()  # tensor -> numpy array, r must be on cpu
-
+    s = tensor.to_numpy(r)  # tensor -> numpy array, r must be on cpu
 
 There are two sets of tensor functions,