You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by du...@apache.org on 2017/04/22 01:19:01 UTC

incubator-systemml git commit: [MINOR] Code style updates for `nn`

Repository: incubator-systemml
Updated Branches:
  refs/heads/master 129f0f6b0 -> 63e28a37b


[MINOR] Code style updates for `nn`

This commit simply updates the `nn` test suite to make use of the fact
that DML no longer requires dummy variable assignment for functions that
do not return values.


Project: http://git-wip-us.apache.org/repos/asf/incubator-systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-systemml/commit/63e28a37
Tree: http://git-wip-us.apache.org/repos/asf/incubator-systemml/tree/63e28a37
Diff: http://git-wip-us.apache.org/repos/asf/incubator-systemml/diff/63e28a37

Branch: refs/heads/master
Commit: 63e28a37bde5399ed27c58cf09b990290e74e8e4
Parents: 129f0f6
Author: Mike Dusenberry <mw...@us.ibm.com>
Authored: Fri Apr 21 18:17:41 2017 -0700
Committer: Mike Dusenberry <mw...@us.ibm.com>
Committed: Fri Apr 21 18:17:41 2017 -0700

----------------------------------------------------------------------
 .../staging/SystemML-NN/nn/test/run_tests.dml   | 66 ++++++++++----------
 scripts/staging/SystemML-NN/nn/test/test.dml    | 20 +++---
 2 files changed, 43 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/63e28a37/scripts/staging/SystemML-NN/nn/test/run_tests.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/SystemML-NN/nn/test/run_tests.dml b/scripts/staging/SystemML-NN/nn/test/run_tests.dml
index ee4da68..d8173a9 100644
--- a/scripts/staging/SystemML-NN/nn/test/run_tests.dml
+++ b/scripts/staging/SystemML-NN/nn/test/run_tests.dml
@@ -30,37 +30,37 @@ print("Starting grad checks.")
 print("---")
 
 # Loss & loss-related functions
-tmp = grad_check::cross_entropy_loss()
-tmp = grad_check::l1_loss()
-tmp = grad_check::l1_reg()
-tmp = grad_check::l2_loss()
-tmp = grad_check::l2_reg()
-tmp = grad_check::log_loss()
+grad_check::cross_entropy_loss()
+grad_check::l1_loss()
+grad_check::l1_reg()
+grad_check::l2_loss()
+grad_check::l2_reg()
+grad_check::log_loss()
 print("")
 
 # Core layers
-tmp = grad_check::affine()
-tmp = grad_check::batch_norm1d()
-tmp = grad_check::batch_norm2d()
-tmp = grad_check::conv2d()
-tmp = grad_check::conv2d_builtin()
-tmp = grad_check::conv2d_simple()
-tmp = grad_check::dropout()
-tmp = grad_check::lstm()
-tmp = grad_check::max_pool2d()
-tmp = grad_check::max_pool2d_builtin()
-tmp = grad_check::max_pool2d_simple()
-tmp = grad_check::relu()
-tmp = grad_check::rnn()
-tmp = grad_check::scale_shift1d()
-tmp = grad_check::scale_shift2d()
-tmp = grad_check::sigmoid()
-tmp = grad_check::softmax()
-tmp = grad_check::tanh()
+grad_check::affine()
+grad_check::batch_norm1d()
+grad_check::batch_norm2d()
+grad_check::conv2d()
+grad_check::conv2d_builtin()
+grad_check::conv2d_simple()
+grad_check::dropout()
+grad_check::lstm()
+grad_check::max_pool2d()
+grad_check::max_pool2d_builtin()
+grad_check::max_pool2d_simple()
+grad_check::relu()
+grad_check::rnn()
+grad_check::scale_shift1d()
+grad_check::scale_shift2d()
+grad_check::sigmoid()
+grad_check::softmax()
+grad_check::tanh()
 print("")
 
 # Example model
-tmp = grad_check::two_layer_affine_l2_net()
+grad_check::two_layer_affine_l2_net()
 print("")
 
 print("---")
@@ -74,14 +74,14 @@ print("")
 print("Starting other tests.")
 print("---")
 
-tmp = test::batch_norm1d()
-tmp = test::batch_norm2d()
-tmp = test::conv2d()
-tmp = test::cross_entropy_loss()
-tmp = test::im2col()
-tmp = test::max_pool2d()
-tmp = test::padding()
-tmp = test::tanh()
+test::batch_norm1d()
+test::batch_norm2d()
+test::conv2d()
+test::cross_entropy_loss()
+test::im2col()
+test::max_pool2d()
+test::padding()
+test::tanh()
 
 print("---")
 print("Other tests complete -- look for any ERRORs or WARNINGs.")

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/63e28a37/scripts/staging/SystemML-NN/nn/test/test.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/SystemML-NN/nn/test/test.dml b/scripts/staging/SystemML-NN/nn/test/test.dml
index 5a0390f..a5cb497 100644
--- a/scripts/staging/SystemML-NN/nn/test/test.dml
+++ b/scripts/staging/SystemML-NN/nn/test/test.dml
@@ -64,7 +64,7 @@ batch_norm1d = function() {
   out = matrix(out, rows=1, cols=N*D)
   for (i in 1:length(out)) {
     rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                      as.scalar(target[1,i]), 1e-3, 1e-4)
+                                           as.scalar(target[1,i]), 1e-3, 1e-4)
   }
 }
 
@@ -102,9 +102,9 @@ conv2d = function() {
   out_builtin = matrix(out_builtin, rows=1, cols=N*F*Hout*Wout)
   for (i in 1:length(out)) {
     rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                      as.scalar(out_simple[1,i]), 1e-10, 1e-12)
+                                           as.scalar(out_simple[1,i]), 1e-10, 1e-12)
     rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                      as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
+                                           as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
   }
 }
 
@@ -247,9 +247,9 @@ max_pool2d = function() {
       out_builtin = matrix(out_builtin, rows=1, cols=N*C*Hout*Wout)
       for (i in 1:length(out)) {
         rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                          as.scalar(out_simple[1,i]), 1e-10, 1e-12)
+                                               as.scalar(out_simple[1,i]), 1e-10, 1e-12)
         rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                          as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
+                                               as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
       }
 
       #print("   - Testing backward")
@@ -267,9 +267,9 @@ max_pool2d = function() {
       dX_builtin = matrix(dX_builtin, rows=1, cols=N*C*Hin*Win)
       for (i in 1:length(dX)) {
         rel_error = test_util::check_rel_error(as.scalar(dX[1,i]),
-                                          as.scalar(dX_simple[1,i]), 1e-10, 1e-12)
+                                               as.scalar(dX_simple[1,i]), 1e-10, 1e-12)
         rel_error = test_util::check_rel_error(as.scalar(dX[1,i]),
-                                          as.scalar(dX_builtin[1,i]), 1e-10, 1e-12)
+                                               as.scalar(dX_builtin[1,i]), 1e-10, 1e-12)
       }
     }
   }
@@ -515,12 +515,12 @@ batch_norm2d = function() {
                    0.0294075   0.65676796 -1.53899395 -1.46057391 -0.71558321
                    0.61755812  1.36254871  0.18624771  1.36254871 -0.48032296
                   -0.71558321 -0.59795308 -1.30373383  1.28412855 -0.63716316
-                   0.18624771  0.30387771  0.06861746 -1.97030437  1.91148913", rows=1,
-                                                                                cols=N*C*Hin*Win)
+                   0.18624771  0.30387771  0.06861746 -1.97030437  1.91148913",
+                  rows=1, cols=N*C*Hin*Win)
   out = matrix(out, rows=1, cols=N*C*Hin*Win)
   for (i in 1:length(out)) {
     rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                      as.scalar(target[1,i]), 1e-3, 1e-4)
+                                           as.scalar(target[1,i]), 1e-3, 1e-4)
   }
 }