You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by du...@apache.org on 2017/04/26 21:42:29 UTC

[03/11] incubator-systemml git commit: [SYSTEMML-1524] Graduate `nn` library to `scripts/nn`

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/43c321d1/scripts/staging/SystemML-NN/nn/test/test.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/SystemML-NN/nn/test/test.dml b/scripts/staging/SystemML-NN/nn/test/test.dml
deleted file mode 100644
index a5cb497..0000000
--- a/scripts/staging/SystemML-NN/nn/test/test.dml
+++ /dev/null
@@ -1,549 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * Various tests, not including gradient checks.
- */
-source("nn/layers/batch_norm1d.dml") as batch_norm1d
-source("nn/layers/batch_norm2d.dml") as batch_norm2d
-source("nn/layers/conv2d.dml") as conv2d
-source("nn/layers/conv2d_builtin.dml") as conv2d_builtin
-source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
-source("nn/layers/max_pool2d.dml") as max_pool2d
-source("nn/layers/max_pool2d_builtin.dml") as max_pool2d_builtin
-source("nn/layers/tanh.dml") as tanh
-source("nn/test/conv2d_simple.dml") as conv2d_simple
-source("nn/test/max_pool2d_simple.dml") as max_pool2d_simple
-source("nn/test/util.dml") as test_util
-source("nn/util.dml") as util
-
-batch_norm1d = function() {
-  /*
-   * Test for the 1D batch normalization function.
-   */
-  print("Testing the 1D batch normalization function.")
-
-  # Generate data
-  N = 4  # Number of examples
-  D = 4  # Number of features
-  mode = 'train'  # execution mode
-  mu = 0.9  # momentum of moving averages
-  eps = 1e-5  # smoothing term
-  X = matrix(seq(1,16), rows=N, cols=D)
-
-  # Create layer
-  [gamma, beta, ema_mean, ema_var] = batch_norm1d::init(D)
-
-  # Forward
-  [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =
-      batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)
-
-  # Equivalency check
-  target = matrix("-1.34160721 -1.34160721 -1.34160733 -1.34160709
-                   -0.44720244 -0.44720244 -0.44720244 -0.44720232
-                    0.44720244  0.44720232  0.44720244  0.44720244
-                    1.34160733  1.34160721  1.34160733  1.34160733", rows=1, cols=N*D)
-  out = matrix(out, rows=1, cols=N*D)
-  for (i in 1:length(out)) {
-    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                           as.scalar(target[1,i]), 1e-3, 1e-4)
-  }
-}
-
-conv2d = function() {
-  /*
-   * Test for the 2D convolution functions.
-   */
-  print("Testing the 2D convolution functions.")
-
-  # Generate data
-  N = 2  # num examples
-  C = 3  # num channels
-  Hin = 5  # input height
-  Win = 5  # input width
-  F = 2  # num filters
-  Hf = 3  # filter height
-  Wf = 3  # filter width
-  stride = 1
-  pad = 1
-  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
-
-  # Create layer
-  [W, b] = conv2d::init(F, C, Hf, Wf)
-
-  # Forward
-  [out, Hout, Wout] = conv2d::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride, pad, pad)
-  [out_simple, Hout_simple, Wout_simple] = conv2d_simple::forward(X, W, b, C, Hin, Win, Hf, Wf,
-                                                                  stride, stride, pad, pad)
-  [out_builtin, Hout_builtin, Wout_builtin] = conv2d_builtin::forward(X, W, b, C, Hin, Win, Hf, Wf,
-                                                                      stride, stride, pad, pad)
-
-  # Equivalency check
-  out = matrix(out, rows=1, cols=N*F*Hout*Wout)
-  out_simple = matrix(out_simple, rows=1, cols=N*F*Hout*Wout)
-  out_builtin = matrix(out_builtin, rows=1, cols=N*F*Hout*Wout)
-  for (i in 1:length(out)) {
-    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                           as.scalar(out_simple[1,i]), 1e-10, 1e-12)
-    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                           as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
-  }
-}
-
-cross_entropy_loss = function() {
-  /*
-   * Test for the cross-entropy loss function.
-   *
-   * Here we make sure that the cross-entropy loss function does
-   * not propagate `infinity` values in the case that a prediction is
-`  * exactly equal to 0.
-   */
-  print("Testing the cross-entropy loss function with zero-valued predictions.")
-
-  # Generate data
-  N = 3 # num examples
-  K = 10 # num targets
-  pred = matrix(0, rows=N, cols=K)
-  y = rand(rows=N, cols=K, min=0, max=1, pdf="uniform")
-  y = y / rowSums(y)  # normalized probs
-
-  loss = cross_entropy_loss::forward(pred, y)
-
-  inf = 1/0
-  if (loss == inf) {
-    print("ERROR: The cross-entropy loss function ouptuts infinity for all-zero predictions.")
-  }
-}
-
-im2col = function() {
-  /*
-   * Test for the `im2col` and `col2im` functions.
-   */
-  print("Testing the im2col and col2im functions.")
-
-	# Generate data
-  C = 3  # num channels
-  Hin = 5  # input height
-  Win = 5  # input width
-  Hf = 3  # filter height
-  Wf = 3  # filter width
-  stride = 2
-  pad = (Hin * stride - Hin + Hf - stride) / 2
-  Hout = as.integer(floor((Hin + 2*pad - Hf)/stride + 1))
-  Wout = as.integer(floor((Win + 2*pad - Wf)/stride + 1))
-  x = rand(rows=C, cols=Hin*Win)
-
-  # pad
-  x_pad = util::pad_image(x, Hin, Win, pad, pad, 0)
-
-  # im2col
-  x_cols = util::im2col(x_pad, Hin+2*pad, Win+2*pad, Hf, Wf, stride, stride)
-
-  if (ncol(x_cols) != Hout*Wout) {
-    print("ERROR: im2col does not yield the correct output size: "
-          + ncol(x_cols)+" (actual) vs. "+Hout*Wout+" (correct).")
-  }
-
-  # col2im
-  x_pad2 = util::col2im(x_cols, C, Hin+2*pad, Win+2*pad, Hf, Wf, stride, stride, "none")
-
-  # Equivalency check
-  equivalent = test_util::all_equal(x_pad, x_pad2)
-  if (!equivalent) {
-    print("ERROR: im2col and then col2im does not yield the original image.")
-  }
-}
-
-padding = function() {
-  /*
-   * Test for the `pad_image` and `unpad_image` functions.
-   */
-  print("Testing the padding and unpadding functions.")
-
-  # Generate data
-  C = 3  # num channels
-  Hin = 5  # input height
-  Win = 5  # input width
-  pad = 3  # padding
-  x = rand(rows=C, cols=Hin*Win)
-
-  # Pad image
-  x_pad = util::pad_image(x, Hin, Win, pad, pad, 0)
-
-  # Check for padded rows & columns
-  for (c in 1:C) {
-    x_pad_slice = matrix(x_pad[c,], rows=Hin+2*pad, cols=Win+2*pad)
-    for (i in 1:pad) {
-      rowsum = sum(x_pad_slice[i,])
-      colsum = sum(x_pad_slice[,i])
-      if (rowsum != 0)
-        print("ERROR: Padding was not applied to row " + i + ".")
-      if (colsum != 0)
-        print("ERROR: Padding was not applied to column " + i + ".")
-    }
-  }
-
-  # Unpad image
-  x1 = util::unpad_image(x_pad, Hin, Win, pad, pad)
-
-  # Equivalency check
-  equivalent = test_util::all_equal(x, x1)
-  if (!equivalent) {
-    print("ERROR: Padding and then unpadding does not yield the original image.")
-  }
-}
-
-max_pool2d = function() {
-  /*
-   * Test for the 2D max pooling functions.
-   */
-  print("Testing the 2D max pooling functions.")
-
-  # Generate data
-  N = 2  # num examples
-  C = 3  # num channels
-  Hin = 8  # input height
-  Win = 8  # input width
-  Hf = 2  # filter height
-  Wf = 2  # filter width
-  stride = 2
-  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
-
-  for (padh in 0:3) {
-    for (padw in 0:3) {
-      print(" - Testing w/ padh="+padh+" & padw="+padw+".")
-      #if (1==1) {}  # force correct printing
-      #print("   - Testing forward")
-      [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, padh, padw)
-      [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf,
-                                                                          stride, stride,
-                                                                          padh, padw)
-      [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win,
-                                                                              Hf, Wf,
-                                                                              stride, stride,
-                                                                              padh, padw)
-
-      # Equivalency check
-      out = matrix(out, rows=1, cols=N*C*Hout*Wout)
-      out_simple = matrix(out_simple, rows=1, cols=N*C*Hout*Wout)
-      out_builtin = matrix(out_builtin, rows=1, cols=N*C*Hout*Wout)
-      for (i in 1:length(out)) {
-        rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                               as.scalar(out_simple[1,i]), 1e-10, 1e-12)
-        rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                               as.scalar(out_builtin[1,i]), 1e-10, 1e-12)
-      }
-
-      #print("   - Testing backward")
-      dout = rand(rows=N, cols=C*Hout*Wout, pdf="normal")
-      dX = max_pool2d::backward(dout, Hout, Wout, X, C, Hin, Win, Hf, Wf, stride, stride,
-                                padh, padw)
-      dX_simple = max_pool2d_simple::backward(dout, Hout_simple, Wout_simple, X, C, Hin, Win,
-                                              Hf, Wf, stride, stride, padh, padw)
-      dX_builtin = max_pool2d_builtin::backward(dout, Hout_builtin, Wout_builtin, X, C, Hin, Win,
-                                                Hf, Wf, stride, stride, padh, padw)
-
-      # Equivalency check
-      dX = matrix(dX, rows=1, cols=N*C*Hin*Win)
-      dX_simple = matrix(dX_simple, rows=1, cols=N*C*Hin*Win)
-      dX_builtin = matrix(dX_builtin, rows=1, cols=N*C*Hin*Win)
-      for (i in 1:length(dX)) {
-        rel_error = test_util::check_rel_error(as.scalar(dX[1,i]),
-                                               as.scalar(dX_simple[1,i]), 1e-10, 1e-12)
-        rel_error = test_util::check_rel_error(as.scalar(dX[1,i]),
-                                               as.scalar(dX_builtin[1,i]), 1e-10, 1e-12)
-      }
-    }
-  }
-
-  # ---
-  print(" - Testing for correct behavior against known answer w/ pad=0.")
-  # generate data
-  # -- channel 1
-  #  1  2  3  4
-  #  5  6  7  8
-  #  9 10 11 12
-  # 13 14 15 16
-  # -- channel 2
-  #  1  5  9 13
-  #  2  6 10 14
-  #  3  7 11 15
-  #  4  8 12 16
-  C = 2  # num channels
-  Hin = 4  # input height
-  Win = 4  # input width
-  X = matrix(seq(1,16,1), rows=Hin, cols=Win)
-  X = matrix(rbind(X, t(X)), rows=1, cols=C*Hin*Win)  # C=2
-  X = rbind(X, X)  # n=2
-  pad = 0
-
-  # forward
-  [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad)
-  [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf,
-                                                                      stride, stride, pad, pad)
-  [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf,
-                                                                          stride, stride, pad, pad)
-
-  # equivalency check
-  # -- channel 1
-  #   6  8
-  #  14 16
-  # -- channel 2
-  #  6  14
-  #  8  16
-  target = matrix("6 8 14 16 6 14 8 16", rows=1, cols=C*Hout*Wout)
-  target = rbind(target, target)  # n=2
-  tmp = test_util::check_all_equal(out, target)
-  tmp = test_util::check_all_equal(out_simple, target)
-  tmp = test_util::check_all_equal(out_builtin, target)
-
-  print(" - Testing for correct behavior against known answer w/ pad=1.")
-  # generate data
-  # -- channel 1
-  #  0  0  0  0  0  0
-  #  0  1  2  3  4  0
-  #  0  5  6  7  8  0
-  #  0  9 10 11 12  0
-  #  0 13 14 15 16  0
-  #  0  0  0  0  0  0
-  # -- channel 2
-  #  0  0  0  0  0  0
-  #  0  1  5  9 13  0
-  #  0  2  6 10 14  0
-  #  0  3  7 11 15  0
-  #  0  4  8 12 16  0
-  #  0  0  0  0  0  0
-  pad = 1
-
-  # forward
-  [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad)
-  [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf,
-                                                                      stride, stride, pad, pad)
-  [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf,
-                                                                          stride, stride, pad, pad)
-
-  # equivalency check
-  # -- channel 1
-  #  1  3  4
-  #  9 11 12
-  # 13 15 16
-  # -- channel 2
-  #  1  9 13
-  #  3 11 15
-  #  4 12 16
-  target = matrix("1 3 4 9 11 12 13 15 16 1 9 13 3 11 15 4 12 16", rows=1, cols=C*Hout*Wout)
-  target = rbind(target, target)  # n=2
-  tmp = test_util::check_all_equal(out, target)
-  tmp = test_util::check_all_equal(out_simple, target)
-  tmp = test_util::check_all_equal(out_builtin, target)
-
-  print(" - Testing for correct behavior against known answer w/ all negative matrix w/ pad=0.")
-  # generate data
-  # -- channel 1
-  #  -1  -2  -3  -4
-  #  -5  -6  -7  -8
-  #  -9 -10 -11 -12
-  # -13 -14 -15 -16
-  # -- channel 2
-  #  -1  -5  -9 -13
-  #  -2  -6 -10 -14
-  #  -3  -7 -11 -15
-  #  -4  -8 -12 -16
-  X = X * -1
-  pad = 0
-
-  # forward
-  [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad)
-  [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf,
-                                                                      stride, stride, pad, pad)
-  [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf,
-                                                                          stride, stride, pad, pad)
-
-  # equivalency check
-  # -- channel 1
-  #  -1  -3
-  #  -9 -11
-  # -- channel 2
-  #  -1  -9
-  #  -3 -11
-  target = matrix("-1 -3 -9 -11 -1 -9 -3 -11", rows=1, cols=C*Hout*Wout)
-  target = rbind(target, target)  # n=2
-  tmp = test_util::check_all_equal(out, target)
-  tmp = test_util::check_all_equal(out_simple, target)
-  tmp = test_util::check_all_equal(out_builtin, target)
-
-
-  print(" - Testing for correct behavior against known answer w/ all negative matrix w/ pad=1.")
-  # generate data
-  # -- channel 1
-  #  0   0   0   0   0  0
-  #  0  -1  -2  -3  -4  0
-  #  0  -5  -6  -7  -8  0
-  #  0  -9 -10 -11 -12  0
-  #  0 -13 -14 -15 -16  0
-  #  0   0   0   0   0  0
-  # -- channel 2
-  #  0   0   0   0   0  0
-  #  0  -1  -5  -9 -13  0
-  #  0  -2  -6 -10 -14  0
-  #  0  -3  -7 -11 -15  0
-  #  0  -4  -8 -12 -16  0
-  #  0   0   0   0   0  0
-  pad = 1
-
-  # forward
-  [out, Hout, Wout] = max_pool2d::forward(X, C, Hin, Win, Hf, Wf, stride, stride, pad, pad)
-  [out_simple, Hout_simple, Wout_simple] = max_pool2d_simple::forward(X, C, Hin, Win, Hf, Wf,
-                                                                      stride, stride, pad, pad)
-  [out_builtin, Hout_builtin, Wout_builtin] = max_pool2d_builtin::forward(X, C, Hin, Win, Hf, Wf,
-                                                                          stride, stride, pad, pad)
-
-  # equivalency check
-  # -- channel 1
-  #  0  0  0
-  #  0 -6  0
-  #  0  0  0
-  # -- channel 2
-  #  0  0  0
-  #  0 -6  0
-  #  0  0  0
-  target = matrix("-1 -2 -4 -5 -6 -8 -13 -14 -16 -1 -5 -13 -2 -6 -14 -4 -8 -16",
-                  rows=1, cols=C*Hout*Wout)
-  target = rbind(target, target)  # n=2
-  tmp = test_util::check_all_equal(out, target)
-  tmp = test_util::check_all_equal(out_simple, target)
-  tmp = test_util::check_all_equal(out_builtin, target)
-}
-
-batch_norm2d = function() {
-  /*
-   * Test for the 2D (spatial) batch normalization function.
-   */
-  print("Testing the 2D (spatial) batch normalization function.")
-
-  # Generate data
-  N = 2  # Number of examples
-  C = 3  # num channels
-  Hin = 4  # input height
-  Win = 5  # input width
-  mode = 'train'  # execution mode
-  mu = 0.9  # momentum of moving averages
-  eps = 1e-5  # smoothing term
-  X = matrix("70  29 23 55 72
-              42  98 68 48 39
-              34  73 44  6 40
-              74  18 18 53 53
-
-              63  85 72 61 72
-              32  36 23 29 63
-               9  43 43 49 43
-              31  43 89 94 50
-
-              62  12 32 41 87
-              25  48 99 52 61
-              12  83 60 55 34
-              30  42 68 88 51
-
-
-              67  59 62 67 84
-               8  76 24 19 57
-              10  89 63 72  2
-              59  56 16 15 70
-
-              32  69 55 39 93
-              84  36  4 30 40
-              70 100 36 76 59
-              69  15 40 24 34
-
-              51  67 11 13 32
-              66  85 55 85 38
-              32  35 17 83 34
-              55  58 52  0 99", rows=N, cols=C*Hin*Win)
-
-  # Create layer
-  [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)
-
-  # Forward
-  [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =
-      batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)
-
-  # Equivalency check
-  target = matrix("0.86215019 -0.76679718 -1.00517964  0.26619387  0.94161105
-                  -0.25030172  1.97460198  0.78268933 -0.01191914 -0.36949289
-                  -0.56814504  0.98134136 -0.17084086 -1.68059683 -0.32976246
-                   1.02107191 -1.20383179 -1.20383179  0.18673301  0.18673301
-
-                   0.50426388  1.41921711  0.87856293  0.42108631  0.87856293
-                  -0.78498828 -0.61863315 -1.15928721 -0.90975463  0.50426388
-                  -1.74153018 -0.32751167 -0.32751167 -0.07797909 -0.32751167
-                  -0.82657707 -0.32751167  1.58557224  1.79351616 -0.0363903
-
-                   0.4607178  -1.49978399 -0.71558321 -0.36269283  1.44096887
-                  -0.99005347 -0.08822262  1.91148913  0.06861746  0.42150795
-                  -1.49978399  1.28412855  0.38229787  0.18624771 -0.63716316
-                  -0.79400325 -0.32348287  0.69597805  1.48017895  0.0294075
-
-
-                   0.74295878  0.42511559  0.54430676  0.74295878  1.41837597
-                  -1.60113597  1.10053277 -0.96544927 -1.16410136  0.34565473
-                  -1.52167511  1.61702824  0.5840373   0.94161105 -1.83951855
-                   0.42511559  0.30592418 -1.28329265 -1.32302308  0.86215019
-
-                  -0.78498828  0.75379658  0.17155361 -0.4938668   1.75192738
-                   1.37762833 -0.61863315 -1.9494741  -0.86816585 -0.45227802
-                   0.79538536  2.04304862 -0.61863315  1.04491806  0.33790874
-                   0.75379658 -1.49199748 -0.45227802 -1.11769855 -0.70181072
-
-                   0.0294075   0.65676796 -1.53899395 -1.46057391 -0.71558321
-                   0.61755812  1.36254871  0.18624771  1.36254871 -0.48032296
-                  -0.71558321 -0.59795308 -1.30373383  1.28412855 -0.63716316
-                   0.18624771  0.30387771  0.06861746 -1.97030437  1.91148913",
-                  rows=1, cols=N*C*Hin*Win)
-  out = matrix(out, rows=1, cols=N*C*Hin*Win)
-  for (i in 1:length(out)) {
-    rel_error = test_util::check_rel_error(as.scalar(out[1,i]),
-                                           as.scalar(target[1,i]), 1e-3, 1e-4)
-  }
-}
-
-tanh = function() {
-  /*
-   * Test for the `tanh` forward function.
-   */
-  print("Testing the tanh forward function.")
-
-  # Generate data
-  N = 2  # num examples
-  C = 3  # num channels
-  X = rand(rows=N, cols=C, pdf="normal")
-
-  out = tanh::forward(X)
-  out_ref = (exp(X) - exp(-X)) / (exp(X) + exp(-X))
-
-  # Equivalency check
-  for (i in 1:nrow(out)) {
-    for (j in 1:ncol(out)) {
-      rel_error = test_util::check_rel_error(as.scalar(out[i,j]), as.scalar(out_ref[i,j]),
-                                             1e-10, 1e-12)
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/43c321d1/scripts/staging/SystemML-NN/nn/test/util.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/SystemML-NN/nn/test/util.dml b/scripts/staging/SystemML-NN/nn/test/util.dml
deleted file mode 100644
index e32a885..0000000
--- a/scripts/staging/SystemML-NN/nn/test/util.dml
+++ /dev/null
@@ -1,155 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * Test utility functions.
- */
-
-all_equal = function(matrix[double] X1, matrix[double] X2)
-    return(boolean equivalent) {
-  /*
-   * Determine if two matrices are equivalent.
-   *
-   * Inputs:
-   *  - X1: Inputs, of shape (any, any).
-   *  - X2: Inputs, of same shape as X1.
-   *
-   * Outputs:
-   *  - equivalent: Whether or not the two matrices are equivalent.
-   */
-  equivalent = as.logical(prod(X1 == X2))
-}
-
-check_all_equal = function(matrix[double] X1, matrix[double] X2)
-    return(boolean equivalent) {
-  /*
-   * Check if two matrices are equivalent, and report any issues.
-   *
-   * Issues an "ERROR" statement if elements of the two matrices are
-   * not equal.
-   *
-   * Inputs:
-   *  - X1: Inputs, of shape (any, any).
-   *  - X2: Inputs, of same shape as X1.
-   *
-   * Outputs:
-   *  - equivalent: Whether or not the two matrices are equivalent.
-   */
-  # Determine if matrices are equivalent
-  equivalent = all_equal(X1, X2)
-
-  # Evaluate relative error
-  if (!equivalent) {
-    print("ERROR: The two matrices are not equivalent.")
-  }
-}
-
-compute_rel_error = function(double x1, double x2)
-    return (double rel_error) {
-  /*
-   * Relative error measure between two values.
-   *
-   * Uses smoothing to avoid divide-by-zero errors.
-   *
-   * Inputs:
-   *  - x1: First value.
-   *  - x2: Second value.
-   *
-   * Outputs:
-   *  - rel_error: Relative error measure between the two values.
-   */
-  rel_error = abs(x1-x2) / max(1e-8, abs(x1)+abs(x2))
-}
-
-check_rel_error = function(double x1, double x2, double thresh_error, double thresh_warn)
-    return (double rel_error) {
-  /*
-   * Check and report any issues with the relative error measure between
-   * two values.
-   *
-   * Issues an "ERROR" statement for relative errors > thresh_error,
-   * indicating that the implementation is likely incorrect.
-   *
-   * Issues a "WARNING" statement for relative errors < thresh_error
-   * but > thresh_warn, indicating that the implementation may be
-   * incorrect.
-   *
-   * Inputs:
-   *  - x1: First value.
-   *  - x2: Second value.
-   *  - thresh_error: Error threshold.
-   *  - thresh_warn: Warning threshold.
-   *
-   * Outputs:
-   *  - rel_error: Relative error measure between the two values.
-   */
-  # Compute relative error
-  rel_error = compute_rel_error(x1, x2)
-
-  # Evaluate relative error
-  if (rel_error > thresh_error) {
-    print("ERROR: Relative error " + rel_error + " > " + thresh_error + " with " + x1 +
-          " vs " + x2 + ".")
-  }
-  else if (rel_error > thresh_warn & rel_error <= thresh_error) {
-    print("WARNING: Relative error " + rel_error + " > " + thresh_warn + " & <= " + thresh_error +
-          " with " + x1 + " vs " + x2 + ".")
-  }
-}
-
-check_rel_grad_error = function(double dw_a, double dw_n, double lossph, double lossmh)
-    return (double rel_error) {
-  /*
-   * Check and report any issues with the relative error measure between
-   * the analytical and numerical partial derivatives.
-   *
-   *  - Issues an "ERROR" statement for relative errors > 1e-2,
-   *  indicating that the gradient is likely incorrect.
-   *  - Issues a "WARNING" statement for relative errors < 1e-2
-   *  but > 1e-4, indicating that the may be incorrect.
-   *
-   * Inputs:
-   *  - dw_a: Analytical partial derivative wrt w.
-   *  - dw_n: Numerical partial derivative wrt w.
-   *  - lossph: Loss evaluated with w set to w+h.
-   *  - lossmh: Loss evaluated with w set to w-h.
-   *
-   * Outputs:
-   *  - rel_error: Relative error measure between the two derivatives.
-   */
-  # Compute relative error
-  rel_error = compute_rel_error(dw_a, dw_n)
-
-  # Evaluate relative error
-  thresh_error = 1e-2
-  thresh_warn = 1e-4
-  if (rel_error > thresh_error) {
-    print("ERROR: Relative error " + rel_error + " > " + thresh_error + " with " + dw_a +
-          " analytical vs " + dw_n + " numerical, with lossph " + lossph +
-          " and lossmh " + lossmh)
-  }
-  else if (rel_error > thresh_warn & rel_error <= thresh_error) {
-    print("WARNING: Relative error " + rel_error + " > " + thresh_warn + " & <= " + thresh_error +
-          " with " + dw_a + " analytical vs " + dw_n + " numerical, with lossph " + lossph +
-          " and lossmh " + lossmh)
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/43c321d1/scripts/staging/SystemML-NN/nn/util.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/SystemML-NN/nn/util.dml b/scripts/staging/SystemML-NN/nn/util.dml
deleted file mode 100644
index 3a73f08..0000000
--- a/scripts/staging/SystemML-NN/nn/util.dml
+++ /dev/null
@@ -1,202 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * Utility functions.
- */
-
-channel_sums = function(matrix[double] X, int C, int Hin, int Win)
-    return (matrix[double] out) {
-  /*
-   * Computes a channel-wise summation over a 4D input.
-   *
-   * Inputs:
-   *  - X: Inputs, of shape (N, C*Hin*Win).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   *
-   * Outputs:
-   *  - out: Outputs, of shape (C, 1).
-   */
-  # Here we sum each column, reshape to (C, Hin*Win), and sum each row to result in the summation
-  # for each channel.
-  out = rowSums(matrix(colSums(X), rows=C, cols=Hin*Win))  # shape (C, 1)
-}
-
-im2col = function(matrix[double] img, int Hin, int Win, int Hf, int Wf, int strideh, int stridew)
-    return (matrix[double] img_cols) {
-  /*
-   * Rearrange local image regions (patches) into columns.
-   *
-   * Assumes image has already been padded as necessary.
-   *
-   * Inputs:
-   *  - img: Input image, of shape (C, Hin*Win), where C is the number
-   *      of input channels (depth).
-   *  - Hin: Input height, including padding.
-   *  - Win: Input width, including padding.
-   *  - Hf: Filter height.
-   *  - Wf: Filter width.
-   *  - strideh: Stride over height.
-   *  - stridew: Stride over width.
-   *
-   * Outputs:
-   *  - img_cols: Local spatial regions (patches) of the image stretched
-   *      out into columns, of shape (C*Hf*Wf, Hout*Wout).
-   */
-  C = nrow(img)
-  Hout = as.integer(floor((Hin-Hf)/strideh + 1))
-  Wout = as.integer(floor((Win-Wf)/stridew + 1))
-
-  # Note: We start with `img_cols` transposed to allow for row-major
-  # left-indexing inside the loop, which is more performant.
-  img_cols = matrix(0, rows=Hout*Wout, cols=C*Hf*Wf)  # zeros
-  parfor (hout in 1:Hout, check=0) {  # all output rows
-    hin = (hout-1)*strideh + 1
-    parfor (wout in 1:Wout, check=0) {  # all output columns
-      win = (wout-1)*stridew + 1
-      # Extract a local patch of the input image corresponding spatially to the filter sizes.
-      img_patch = matrix(0, rows=C, cols=Hf*Wf)  # zeros
-      parfor (c in 1:C) {  # all channels
-        img_slice = matrix(img[c,], rows=Hin, cols=Win)  # reshape
-        img_patch[c,] = matrix(img_slice[hin:hin+Hf-1, win:win+Wf-1], rows=1, cols=Hf*Wf)
-      }
-      img_cols[(hout-1)*Wout + wout,] = t(matrix(img_patch, rows=C*Hf*Wf, cols=1))  # reshape
-    }
-  }
-  img_cols = t(img_cols)
-}
-
-col2im = function(matrix[double] img_cols, int C, int Hin, int Win, int Hf, int Wf,
-                  int strideh, int stridew, string reduction)
-    return (matrix[double] img) {
-  /*
-   * Create an image from columns of local image regions (patches).
-   *
-   * The reduction strategy determines how to deal with overlapping
-   * patches.  If it is set to "add", any overlapping patches will be
-   * added together when creating the image.  This is useful when
-   * computing gradients on the original image given gradients on the
-   * patches.  Otherwise, if "none" is provided, any overlapping
-   * patches will just override previous ones when creating the image.
-   * This is useful when recreating an image from the output of
-   * `im2col`.
-   *
-   * Assumes original image was already padded as necessary.
-   *
-   * Inputs:
-   *  - img_cols: Local spatial regions (patches) of the image stretched
-   *      out into columns, of shape (C*Hf*Wf, Hout*Wout).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height, including padding.
-   *  - Win: Input width, including padding.
-   *  - Hf: Filter height.
-   *  - Wf: Filter width.
-   *  - strideh: Stride over height.
-   *  - stridew: Stride over width.
-   *  - reduction: The reduction strategy to use for overlapping
-   *      patches.  Valid options are "add" and "none".
-   *
-   * Outputs:
-   *  - img: Input image, of shape (C, Hin*Win).
-   */
-  Hout = as.integer(floor((Hin-Hf)/strideh + 1))
-  Wout = as.integer(floor((Win-Wf)/stridew + 1))
-
-  img = matrix(0, rows=C, cols=Hin*Win)  # zeros
-  for (hout in 1:Hout) {  # all output rows
-    hin = (hout-1)*strideh + 1
-    for (wout in 1:Wout) {  # all output columns
-      win = (wout-1)*stridew + 1
-      # Extract a local patch of the input image corresponding spatially to the filter sizes.
-      img_patch = matrix(img_cols[,(hout-1)*Wout + wout], rows=C, cols=Hf*Wf)  # zeros
-      parfor (c in 1:C) {  # all channels
-        img_patch_slice = matrix(img_patch[c,], rows=Hf, cols=Wf)  # reshape
-        if (reduction == "add") {
-          img_slice = matrix(0, rows=Hin, cols=Win)
-          img_slice[hin:hin+Hf-1, win:win+Wf-1] = img_patch_slice
-          img[c,] = img[c,] + matrix(img_slice, rows=1, cols=Hin*Win)
-        } else {
-          img_slice = matrix(img[c,], rows=Hin, cols=Win)
-          img_slice[hin:hin+Hf-1, win:win+Wf-1] = img_patch_slice
-          img[c,] = matrix(img_slice, rows=1, cols=Hin*Win)
-        }
-      }
-    }
-  }
-}
-
-pad_image = function(matrix[double] img, int Hin, int Win, int padh, int padw, double pad_value)
-    return (matrix[double] img_padded) {
-  /*
-   * Pads an image along the height and width dimensions with zeros.
-   *
-   * Inputs:
-   *  - img: Input image, of shape (C, Hin*Win), where C is the number
-   *      of input channels (depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   *  - padh: Padding for top and bottom sides.
-   *  - padw: Padding for left and right sides.
-   *  - pad_value: Value to use for the padding.
-   *      A typical value is 0.
-   *
-   * Outputs:
-   *  - img_padded: The input image padded along the height and width
-   *      dimensions, of shape (C, (Hin+2*padh)*(Win+2*padw)).
-   */
-  C = nrow(img)
-  img_padded = matrix(0, rows=C, cols=(Hin+2*padh)*(Win+2*padw))  # zeros
-  parfor (c in 1:C) {
-    img_slice = matrix(img[c,], rows=Hin, cols=Win)  # depth slice C reshaped
-    img_padded_slice = matrix(pad_value, rows=Hin+2*padh, cols=Win+2*padw)
-    img_padded_slice[padh+1:padh+Hin, padw+1:padw+Win] = img_slice
-    img_padded[c,] = matrix(img_padded_slice, rows=1, cols=(Hin+2*padh)*(Win+2*padw))  # reshape
-  }
-}
-
-unpad_image = function(matrix[double] img_padded, int Hin, int Win, int padh, int padw)
-    return (matrix[double] img) {
-  /*
-   * Unpads an image along the height and width dimensions.
-   *
-   * Inputs:
-   *  - img_padded: The input image padded along the height and width
-   *      dimensions, of shape (C, (Hin+2*padh)*(Win+2*padw)).
-   *  - Hin: Input height of unpadded image.
-   *  - Win: Input width of unpadded image.
-   *  - padh: Padding for top and bottom sides.
-   *  - padw: Padding for left and right sides.
-   *
-   * Outputs:
-   *  - img: Input image, of shape (C, Hin*Win), where C is the number
-   *      of input channels (depth).
-   */
-  C = nrow(img_padded)
-  img = matrix(0, rows=C, cols=Hin*Win)
-  parfor (c in 1:C) {
-    img_padded_slice = matrix(img_padded[c,], rows=(Hin+2*padh), cols=(Win+2*padw))
-    img_slice = img_padded_slice[padh+1:padh+Hin, padw+1:padw+Win]
-    img[c,] = matrix(img_slice, rows=1, cols=Hin*Win)
-  }
-}
-