You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemds.apache.org by ba...@apache.org on 2021/06/07 11:28:26 UTC

[systemds] branch master updated (d43cc72 -> d9fc068)

This is an automated email from the ASF dual-hosted git repository.

baunsgaard pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/systemds.git.


    from d43cc72  [SYSTEMDS-2953] Fix robustness parfor program serialization
     new ffffc59  [MINOR] Remove notebooks
     new d9fc068  [SYSTEMDS-3012] Neural Network Examples Update/Remove

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 notebooks/databricks/MLContext.scala               | 205 --------
 notebooks/databricks/README.md                     |   9 -
 notebooks/systemds_dev.ipynb                       | 582 ---------------------
 scripts/nn/README.md                               |  23 +-
 .../Example - MNIST LeNet deprecated.ipynb         | 189 -------
 ...ple - MNIST Softmax Classifier deprecated.ipynb | 179 -------
 .../Example - Neural Collaborative Filtering.ipynb | 347 ------------
 .../Example-MNIST_2NN_Leaky_ReLu_Softmax.dml       |   6 -
 scripts/nn/examples/Example-MNIST_Softmax.dml      |   7 -
 scripts/nn/examples/README.md                      |  59 +--
 scripts/nn/examples/fm-binclass-dummy-data.dml     |   9 +-
 scripts/nn/examples/fm-regression-dummy-data.dml   |  10 +-
 scripts/nn/examples/get_mnist_data.sh              |  28 -
 scripts/nn/examples/mnist_2NN.dml                  | 179 -------
 scripts/nn/examples/mnist_lenet-train.dml          |   4 -
 scripts/nn/examples/mnist_lenet.dml                |   8 +-
 .../mnist_lenet_distrib_sgd-train-dummy-data.dml   |  26 +-
 scripts/nn/examples/mnist_lenet_distrib_sgd.dml    |  28 +-
 scripts/nn/examples/mnist_softmax.dml              |   2 +-
 scripts/staging/fm-binclass.dml                    |   4 +-
 scripts/staging/fm-regression.dml                  |   4 +-
 .../sysds/test/applications/nn/BaseTest.java       |  17 +-
 .../nn/{NNGradientTest.java => FMTests.java}       |  11 +-
 .../nn/{NNGradientTest.java => MNISTLeNet.java}    |   8 +-
 .../test/applications/nn/NNComponentTest.java      |   2 +-
 .../sysds/test/applications/nn/NNGradientTest.java |   2 +-
 .../applications/nn/NNMaxPool2dComponentTest.java  |   2 +-
 .../nn/{NNGradientTest.java => NNTests.java}       |  10 +-
 .../nn/{NNGradientTest.java => TestFolder.java}    |  12 +-
 29 files changed, 91 insertions(+), 1881 deletions(-)
 delete mode 100644 notebooks/databricks/MLContext.scala
 delete mode 100644 notebooks/databricks/README.md
 delete mode 100644 notebooks/systemds_dev.ipynb
 delete mode 100644 scripts/nn/examples/Example - MNIST LeNet deprecated.ipynb
 delete mode 100644 scripts/nn/examples/Example - MNIST Softmax Classifier deprecated.ipynb
 delete mode 100644 scripts/nn/examples/Example - Neural Collaborative Filtering.ipynb
 delete mode 100644 scripts/nn/examples/get_mnist_data.sh
 delete mode 100644 scripts/nn/examples/mnist_2NN.dml
 copy src/test/java/org/apache/sysds/test/applications/nn/{NNGradientTest.java => FMTests.java} (82%)
 copy src/test/java/org/apache/sysds/test/applications/nn/{NNGradientTest.java => MNISTLeNet.java} (87%)
 copy src/test/java/org/apache/sysds/test/applications/nn/{NNGradientTest.java => NNTests.java} (82%)
 copy src/test/java/org/apache/sysds/test/applications/nn/{NNGradientTest.java => TestFolder.java} (79%)

[systemds] 01/02: [MINOR] Remove notebooks

Posted by ba...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

baunsgaard pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/systemds.git

commit ffffc59bc5febb96bd523134dbc75817e15876f4
Author: baunsgaard <ba...@tugraz.at>
AuthorDate: Mon Jun 7 09:50:03 2021 +0200

    [MINOR] Remove notebooks
---
 notebooks/databricks/MLContext.scala | 205 ------------
 notebooks/databricks/README.md       |   9 -
 notebooks/systemds_dev.ipynb         | 582 -----------------------------------
 3 files changed, 796 deletions(-)

diff --git a/notebooks/databricks/MLContext.scala b/notebooks/databricks/MLContext.scala
deleted file mode 100644
index 55b6536..0000000
--- a/notebooks/databricks/MLContext.scala
+++ /dev/null
@@ -1,205 +0,0 @@
-// Databricks notebook source
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-
-// COMMAND ----------
-
-// MAGIC %md # Apache SystemDS on Databricks
-
-// COMMAND ----------
-
-// MAGIC %md ## Create a quickstart cluster
-// MAGIC 
-// MAGIC 1. In the sidebar, right-click the **Clusters** button and open the link in a new window.
-// MAGIC 1. On the Clusters page, click **Create Cluster**.
-// MAGIC 1. Name the cluster **Quickstart**.
-// MAGIC 1. In the Databricks Runtime Version drop-down, select **6.4 (Scala 2.11, Spark 2.4.5)**.
-// MAGIC 1. Click **Create Cluster**.
-// MAGIC 1. Attach `SystemDS.jar` file to the libraries
-
-// COMMAND ----------
-
-// MAGIC %md ## Attach the notebook to the cluster and run all commands in the notebook
-// MAGIC 
-// MAGIC 1. Return to this notebook. 
-// MAGIC 1. In the notebook menu bar, select **<img src="http://docs.databricks.com/_static/images/notebooks/detached.png"/></a> > Quickstart**.
-// MAGIC 1. When the cluster changes from <img src="http://docs.databricks.com/_static/images/clusters/cluster-starting.png"/></a> to <img src="http://docs.databricks.com/_static/images/clusters/cluster-running.png"/></a>, click **<img src="http://docs.databricks.com/_static/images/notebooks/run-all.png"/></a> Run All**.
-
-// COMMAND ----------
-
-// MAGIC %md ## Load SystemDS MLContext API
-
-// COMMAND ----------
-
-import org.apache.sysds.api.mlcontext._
-import org.apache.sysds.api.mlcontext.ScriptFactory._
-val ml = new MLContext(spark)
-
-// COMMAND ----------
-
-val habermanUrl = "http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data"
-val habermanList = scala.io.Source.fromURL(habermanUrl).mkString.split("\n")
-val habermanRDD = sc.parallelize(habermanList)
-val habermanMetadata = new MatrixMetadata(306, 4)
-val typesRDD = sc.parallelize(Array("1.0,1.0,1.0,2.0"))
-val typesMetadata = new MatrixMetadata(1, 4)
-val scriptUrl = "https://raw.githubusercontent.com/apache/systemds/master/scripts/algorithms/Univar-Stats.dml"
-val uni = dmlFromUrl(scriptUrl).in("A", habermanRDD, habermanMetadata).in("K", typesRDD, typesMetadata).in("$CONSOLE_OUTPUT", true)
-ml.execute(uni)
-
-// COMMAND ----------
-
-// MAGIC %md ### Create a neural network layer with (R-like) DML language
-
-// COMMAND ----------
-
-val s = """
-  source("scripts/nn/layers/relu.dml") as relu;
-  X = rand(rows=100, cols=10, min=-1, max=1);
-  R1 = relu::forward(X);
-  R2 = max(X, 0);
-  R = sum(R1==R2);
-  """
-
-val ret = ml.execute(dml(s).out("R")).getScalarObject("R").getDoubleValue();
-
-// COMMAND ----------
-
-// MAGIC %md ### Recommendation with Amazon review dataset
-
-// COMMAND ----------
-
-import java.net.URL
-import java.io.File
-import org.apache.commons.io.FileUtils
-
-FileUtils.copyURLToFile(new URL("http://snap.stanford.edu/data/amazon0601.txt.gz"), new File("/tmp/amazon0601.txt.gz"))
-
-// COMMAND ----------
-
-// MAGIC %sh
-// MAGIC gunzip -d /tmp/amazon0601.txt.gz
-
-// COMMAND ----------
-
-// To list the file system files. For more https://docs.databricks.com/data/filestore.html
-// File system: display(dbutils.fs.ls("file:/tmp"))
-// DBFS: display(dbutils.fs.ls("."))
-
-dbutils.fs.mv("file:/tmp/amazon0601.txt", "dbfs:/tmp/amazon0601.txt")
-
-// COMMAND ----------
-
-display(dbutils.fs.ls("/tmp"))
-// display(dbutils.fs.ls("file:/tmp"))
-
-// COMMAND ----------
-
-// move temporary files to databricks file system (DBFS)
-// dbutils.fs.mv("file:/databricks/driver/amazon0601.txt", "dbfs:/tmp/amazon0601.txt") 
-val df = spark.read.format("text").option("inferSchema", "true").option("header","true").load("dbfs:/tmp/amazon0601.txt")
-display(df)
-
-// COMMAND ----------
-
-// MAGIC %py
-// MAGIC 
-// MAGIC # The scala data processing pipeline can also be
-// MAGIC # implemented in python as shown in this block
-// MAGIC 
-// MAGIC # 
-// MAGIC # import pyspark.sql.functions as F
-// MAGIC # # https://spark.apache.org/docs/latest/sql-ref.html
-// MAGIC 
-// MAGIC # dataPath = "dbfs:/tmp/amazon0601.txt"
-// MAGIC 
-// MAGIC # X_train = (sc.textFile(dataPath)
-// MAGIC #     .filter(lambda l: not l.startswith("#"))
-// MAGIC #     .map(lambda l: l.split("\t"))
-// MAGIC #     .map(lambda prods: (int(prods[0]), int(prods[1]), 1.0))
-// MAGIC #     .toDF(("prod_i", "prod_j", "x_ij"))
-// MAGIC #     .filter("prod_i < 500 AND prod_j < 500") # Filter for memory constraints
-// MAGIC #     .cache())
-// MAGIC 
-// MAGIC # max_prod_i = X_train.select(F.max("prod_i")).first()[0]
-// MAGIC # max_prod_j = X_train.select(F.max("prod_j")).first()[0]
-// MAGIC # numProducts = max(max_prod_i, max_prod_j) + 1 # 0-based indexing
-// MAGIC # print("Total number of products: {}".format(numProducts))
-
-// COMMAND ----------
-
-// Reference: https://spark.apache.org/docs/latest/rdd-programming-guide.html
-val X_train = (sc.textFile("dbfs:/tmp/amazon0601.txt").filter(l => !(l.startsWith("#"))).map(l => l.split("\t"))
-                  .map(prods => (prods(0).toLong, prods(1).toLong, 1.0))
-                  .toDF("prod_i", "prod_j", "x_ij")
-                  .filter("prod_i < 500 AND prod_j < 500") // filter for memory constraints
-                  .cache())
-
-display(X_train)
-
-// COMMAND ----------
-
-// MAGIC %md #### Poisson Nonnegative Matrix Factorization
-
-// COMMAND ----------
-
-# Poisson Nonnegative Matrix Factorization
-
-val pnmf = """
-# data & args
-X = X+1 # change product IDs to be 1-based, rather than 0-based
-V = table(X[,1], X[,2])
-size = ifdef($size, -1)
-if(size > -1) {
-    V = V[1:size,1:size]
-}
-
-n = nrow(V)
-m = ncol(V)
-range = 0.01
-W = Rand(rows=n, cols=rank, min=0, max=range, pdf="uniform")
-H = Rand(rows=rank, cols=m, min=0, max=range, pdf="uniform")
-losses = matrix(0, rows=max_iter, cols=1)
-
-# run PNMF
-i=1
-while(i <= max_iter) {
-  # update params
-  H = (H * (t(W) %*% (V/(W%*%H))))/t(colSums(W)) 
-  W = (W * ((V/(W%*%H)) %*% t(H)))/t(rowSums(H))
-  
-  # compute loss
-  losses[i,] = -1 * (sum(V*log(W%*%H)) - as.scalar(colSums(W)%*%rowSums(H)))
-  i = i + 1;
-}
-  """
-
-val ret = ml.execute(dml(pnmf).in("X", X_train).in("max_iter", 100).in("rank", 10).out("W").out("H").out("losses"));
-
-// COMMAND ----------
-
-val W = ret.getMatrix("W")
-val H = ret.getMatrix("H")
-val losses = ret.getMatrix("losses")
-
-// COMMAND ----------
-
-val lossesDF = losses.toDF().sort("__INDEX")
-display(lossesDF)
diff --git a/notebooks/databricks/README.md b/notebooks/databricks/README.md
deleted file mode 100644
index f4ce275..0000000
--- a/notebooks/databricks/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-#### Setup Apache SystemDS on Databricks platform
-
-1. Create a new account at [databricks cloud](https://community.cloud.databricks.com/)
-2. In left-side navbar select **Clusters** > **`+ Create Cluster`** > Name the cluster! > **`Create Cluster`**
-3. Navigate to the created cluster configuration.
-    1. Select **Libraries**
-    2. Select **Install New** > **Library Source [`Upload`]** and **Library Type [`Jar`]**
-    3. Upload the `SystemDS.jar` file! > **`Install`**
-4. Attach a notebook to the cluster above.
diff --git a/notebooks/systemds_dev.ipynb b/notebooks/systemds_dev.ipynb
deleted file mode 100644
index 9ba218f..0000000
--- a/notebooks/systemds_dev.ipynb
+++ /dev/null
@@ -1,582 +0,0 @@
-{
-  "nbformat": 4,
-  "nbformat_minor": 0,
-  "metadata": {
-    "colab": {
-      "name": "SystemDS on Colaboratory.ipynb",
-      "provenance": [],
-      "collapsed_sections": [],
-      "toc_visible": true,
-      "include_colab_link": true
-    },
-    "kernelspec": {
-      "name": "python3",
-      "display_name": "Python 3"
-    }
-  },
-  "cells": [
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "XX60cA7YuZsw"
-      },
-      "source": [
-        "##### Copyright &copy; 2020 The Apache Software Foundation."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "8GEGDZ9GuZGp",
-        "cellView": "form"
-      },
-      "source": [
-        "# @title Apache Version 2.0 (The \"License\");\n",
-        "#-------------------------------------------------------------\n",
-        "#\n",
-        "# Licensed to the Apache Software Foundation (ASF) under one\n",
-        "# or more contributor license agreements.  See the NOTICE file\n",
-        "# distributed with this work for additional information\n",
-        "# regarding copyright ownership.  The ASF licenses this file\n",
-        "# to you under the Apache License, Version 2.0 (the\n",
-        "# \"License\"); you may not use this file except in compliance\n",
-        "# with the License.  You may obtain a copy of the License at\n",
-        "#\n",
-        "#   http://www.apache.org/licenses/LICENSE-2.0\n",
-        "#\n",
-        "# Unless required by applicable law or agreed to in writing,\n",
-        "# software distributed under the License is distributed on an\n",
-        "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
-        "# KIND, either express or implied.  See the License for the\n",
-        "# specific language governing permissions and limitations\n",
-        "# under the License.\n",
-        "#\n",
-        "#-------------------------------------------------------------"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "_BbCdLjRoy2A"
-      },
-      "source": [
-        "### Developer notebook for Apache SystemDS"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "zhdfvxkEq1BX"
-      },
-      "source": [
-        "Run this notebook online at [Google Colab ↗](https://colab.research.google.com/github/apache/systemds/blob/master/notebooks/systemds_dev.ipynb).\n",
-        "\n",
-        "\n"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "efFVuggts1hr"
-      },
-      "source": [
-        "This Jupyter/Colab-based tutorial will interactively walk through development setup and running SystemDS in both the\n",
-        "\n",
-        "A. standalone mode \\\n",
-        "B. with Apache Spark.\n",
-        "\n",
-        "Flow of the notebook:\n",
-        "1. Download and Install the dependencies\n",
-        "2. Go to section **A** or **B**"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "vBC5JPhkGbIV"
-      },
-      "source": [
-        "#### Download and Install the dependencies\n",
-        "\n",
-        "1. **Runtime:** Java (OpenJDK 8 is preferred)\n",
-        "2. **Build:** Apache Maven\n",
-        "3. **Backend:** Apache Spark (optional)"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "VkLasseNylPO"
-      },
-      "source": [
-        "##### Setup\n",
-        "\n",
-        "A custom function to run OS commands."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "4Wmf-7jfydVH"
-      },
-      "source": [
-        "# Run and print a shell command.\n",
-        "def run(command):\n",
-        "  print('>> {}'.format(command))\n",
-        "  !{command}\n",
-        "  print('')"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "kvD4HBMi0ohY"
-      },
-      "source": [
-        "##### Install Java\n",
-        "Let us install OpenJDK 8. More about [OpenJDK ↗](https://openjdk.java.net/install/)."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "8Xnb_ePUyQIL"
-      },
-      "source": [
-        "!apt-get update\n",
-        "!apt-get install openjdk-8-jdk-headless -qq > /dev/null\n",
-        "\n",
-        "# run the below command to replace the existing installation\n",
-        "!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java\n",
-        "\n",
-        "import os\n",
-        "os.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\n",
-        "\n",
-        "!java -version"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "BhmBWf3u3Q0o"
-      },
-      "source": [
-        "##### Install Apache Maven\n",
-        "\n",
-        "SystemDS uses Apache Maven to build and manage the project. More about [Apache Maven ↗](http://maven.apache.org/).\n",
-        "\n",
-        "Maven builds SystemDS using its project object model (POM) and a set of plugins. One would find `pom.xml` find the codebase!"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "I81zPDcblchL"
-      },
-      "source": [
-        "# Download the maven source.\n",
-        "maven_version = 'apache-maven-3.6.3'\n",
-        "maven_path = f\"/opt/{maven_version}\"\n",
-        "\n",
-        "if not os.path.exists(maven_path):\n",
-        "  run(f\"wget -q -nc -O apache-maven.zip https://downloads.apache.org/maven/maven-3/3.6.3/binaries/{maven_version}-bin.zip\")\n",
-        "  run('unzip -q -d /opt apache-maven.zip')\n",
-        "  run('rm -f apache-maven.zip')\n",
-        "\n",
-        "# Let's choose the absolute path instead of $PATH environment variable.\n",
-        "def maven(args):\n",
-        "  run(f\"{maven_path}/bin/mvn {args}\")\n",
-        "\n",
-        "maven('-v')"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "Xphbe3R43XLw"
-      },
-      "source": [
-        "##### Install Apache Spark (Optional, if you want to work with spark backend)\n"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "_WgEa00pTs3w"
-      },
-      "source": [
-        "NOTE: If spark is not downloaded. Let us make sure the version we are trying to download is officially supported at\n",
-        "https://spark.apache.org/downloads.html"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "3zdtkFkLnskx"
-      },
-      "source": [
-        "# Spark and Hadoop version\n",
-        "spark_version = 'spark-2.4.7'\n",
-        "hadoop_version = 'hadoop2.7'\n",
-        "spark_path = f\"/opt/{spark_version}-bin-{hadoop_version}\"\n",
-        "if not os.path.exists(spark_path):\n",
-        "  run(f\"wget -q -nc -O apache-spark.tgz https://downloads.apache.org/spark/{spark_version}/{spark_version}-bin-{hadoop_version}.tgz\")\n",
-        "  run('tar zxfv apache-spark.tgz -C /opt')\n",
-        "  run('rm -f apache-spark.tgz')\n",
-        "\n",
-        "os.environ[\"SPARK_HOME\"] = spark_path\n",
-        "os.environ[\"PATH\"] += \":$SPARK_HOME/bin\"\n"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "91pJ5U8k3cjk"
-      },
-      "source": [
-        "#### Get Apache SystemDS\n",
-        "\n",
-        "Apache SystemDS development happens on GitHub at [apache/systemds ↗](https://github.com/apache/systemds)"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "SaPIprmg3lKE"
-      },
-      "source": [
-        "!git clone https://github.com/apache/systemds systemds --depth=1\n",
-        "%cd systemds"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "40Fo9tPUzbWK"
-      },
-      "source": [
-        "##### Build the project"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "s0Iorb0ICgHa"
-      },
-      "source": [
-        "# Logging flags: -q only for ERROR; -X for DEBUG; -e for ERROR\n",
-        "# Option 1: Build only the java codebase\n",
-        "maven('clean package -q')\n",
-        "\n",
-        "# Option 2: For building along with python distribution\n",
-        "# maven('clean package -P distribution')"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "SUGac5w9ZRBQ"
-      },
-      "source": [
-        "### A. Working with SystemDS in **standalone** mode\n",
-        "\n",
-        "NOTE: Let's pay attention to *directories* and *relative paths*. :)\n",
-        "\n"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "g5Nk2Bb4UU2O"
-      },
-      "source": [
-        "##### 1. Set SystemDS environment variables\n",
-        "\n",
-        "These are useful for the `./bin/systemds` script."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "2ZnSzkq8UT32"
-      },
-      "source": [
-        "!export SYSTEMDS_ROOT=$(pwd)\n",
-        "!export PATH=$SYSTEMDS_ROOT/bin:$PATH"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "zyLmFCv6ZYk5"
-      },
-      "source": [
-        "##### 2. Download Haberman data\n",
-        "\n",
-        "Data source: https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival\n",
-        "\n",
-        "About: The survival of patients who had undergone surgery for breast cancer.\n",
-        "\n",
-        "Data Attributes:\n",
-        "1. Age of patient at time of operation (numerical)\n",
-        "2. Patient's year of operation (year - 1900, numerical)\n",
-        "3. Number of positive axillary nodes detected (numerical)\n",
-        "4. Survival status (class attribute)\n",
-        "    - 1 = the patient survived 5 years or longer\n",
-        "    - 2 = the patient died within 5 year"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "ZrQFBQehV8SF"
-      },
-      "source": [
-        "!mkdir ../data"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "E1ZFCTFmXFY_"
-      },
-      "source": [
-        "!wget -P ../data/ https://web.archive.org/web/20200725014530/https://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "FTo8Py_vOGpX"
-      },
-      "source": [
-        "# Display first 10 lines of the dataset\n",
-        "# Notice that the test is plain csv with no headers!\n",
-        "!sed -n 1,10p ../data/haberman.data"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "Oy2kgVdkaeWK"
-      },
-      "source": [
-        "##### 2.1 Set `metadata` for the data\n",
-        "\n",
-        "The data does not have any info on the value types. So, `metadata` for the data\n",
-        "helps know the size and format for the matrix data as `.mtd` file with the same\n",
-        "name and location as `.data` file."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "vfypIgJWXT6K"
-      },
-      "source": [
-        "# generate metadata file for the dataset\n",
-        "!echo '{\"rows\": 306, \"cols\": 4, \"format\": \"csv\"}' > ../data/haberman.data.mtd\n",
-        "\n",
-        "# generate type description for the data\n",
-        "!echo '1,1,1,2' > ../data/types.csv\n",
-        "!echo '{\"rows\": 1, \"cols\": 4, \"format\": \"csv\"}' > ../data/types.csv.mtd"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "7Vis3V31bA53"
-      },
-      "source": [
-        "##### 3. Find the algorithm to run with `systemds`"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "L_0KosFhbhun"
-      },
-      "source": [
-        "# Inspect the directory structure of systemds code base\n",
-        "!ls"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "R7C5DVM7YfTb"
-      },
-      "source": [
-        "# List all the scripts (also called top level algorithms!)\n",
-        "!ls scripts/algorithms"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "5PrxwviWJhNd"
-      },
-      "source": [
-        "# Lets choose univariate statistics script.\n",
-        "# Output the algorithm documentation\n",
-        "# start from line no. 22 onwards. Till 35th line the command looks like\n",
-        "!sed -n 22,35p ./scripts/algorithms/Univar-Stats.dml"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "zv_7wRPFSeuJ"
-      },
-      "source": [
-        "!./bin/systemds ./scripts/algorithms/Univar-Stats.dml -nvargs X=../data/haberman.data TYPES=../data/types.csv STATS=../data/univarOut.mtx CONSOLE_OUTPUT=TRUE"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "IqY_ARNnavrC"
-      },
-      "source": [
-        "##### 3.1 Let us inspect the output data"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "k-_eQg9TauPi"
-      },
-      "source": [
-        "# output first 10 lines only.\n",
-        "!sed -n 1,10p ../data/univarOut.mtx"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "o5VCCweiDMjf"
-      },
-      "source": [
-        "#### B. Run SystemDS with Apache Spark"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "6gJhL7lc1vf7"
-      },
-      "source": [
-        "#### Playground for DML scripts\n",
-        "\n",
-        "DML - A custom language designed for SystemDS with R-like syntax."
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "zzqeSor__U6M"
-      },
-      "source": [
-        "##### A test `dml` script to prototype algorithms\n",
-        "\n",
-        "Modify the code in the below cell and run to work develop data science tasks\n",
-        "in a high level language."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "t59rTyNbOF5b"
-      },
-      "source": [
-        "%%writefile ../test.dml\n",
-        "\n",
-        "# This code code acts as a playground for dml code\n",
-        "X = rand (rows = 20, cols = 10)\n",
-        "y = X %*% rand(rows = ncol(X), cols = 1)\n",
-        "lm(X = X, y = y)"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "VDfeuJYE1JfK"
-      },
-      "source": [
-        "Submit the `dml` script to Spark with `spark-submit`.\n",
-        "More about [Spark Submit ↗](https://spark.apache.org/docs/latest/submitting-applications.html)"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "YokktyNE1Cig"
-      },
-      "source": [
-        "!$SPARK_HOME/bin/spark-submit \\\n",
-        "    ./target/SystemDS.jar -f ../test.dml"
-      ],
-      "execution_count": null,
-      "outputs": []
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "gCMkudo_-8_8"
-      },
-      "source": [
-        "##### Run a binary classification example with sample data\n",
-        "\n",
-        "One would notice that no other script than simple dml is used in this example completely."
-      ]
-    },
-    {
-      "cell_type": "code",
-      "metadata": {
-        "id": "OSLq2cZb_SUl"
-      },
-      "source": [
-        "# Example binary classification task with sample data.\n",
-        "# !$SPARK_HOME/bin/spark-submit ./target/SystemDS.jar -f ./scripts/nn/examples/fm-binclass-dummy-data.dml"
-      ],
-      "execution_count": null,
-      "outputs": []
-    }
-  ]
-}
\ No newline at end of file

[systemds] 02/02: [SYSTEMDS-3012] Neural Network Examples Update/Remove

Posted by ba...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

baunsgaard pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/systemds.git

commit d9fc06820bb6e20867474471a02c73bbfc3030a5
Author: baunsgaard <ba...@tugraz.at>
AuthorDate: Mon Jun 7 11:42:09 2021 +0200

    [SYSTEMDS-3012] Neural Network Examples Update/Remove
    
    This commit update/fix the neural network examples, to fit to the current
    APIs, furthermore all the ipynb notebooks have been removed, since these
    only provide a wrapper around the download and execution of the scripts,
    while not actually providing any code in our python api.
    
    Closes #1298
---
 scripts/nn/README.md                               |  23 +-
 .../Example - MNIST LeNet deprecated.ipynb         | 189 -----------
 ...ple - MNIST Softmax Classifier deprecated.ipynb | 179 -----------
 .../Example - Neural Collaborative Filtering.ipynb | 347 ---------------------
 .../Example-MNIST_2NN_Leaky_ReLu_Softmax.dml       |   6 -
 scripts/nn/examples/Example-MNIST_Softmax.dml      |   7 -
 scripts/nn/examples/README.md                      |  59 +---
 scripts/nn/examples/fm-binclass-dummy-data.dml     |   9 +-
 scripts/nn/examples/fm-regression-dummy-data.dml   |  10 +-
 scripts/nn/examples/get_mnist_data.sh              |  28 --
 scripts/nn/examples/mnist_2NN.dml                  | 179 -----------
 scripts/nn/examples/mnist_lenet-train.dml          |   4 -
 scripts/nn/examples/mnist_lenet.dml                |   8 +-
 .../mnist_lenet_distrib_sgd-train-dummy-data.dml   |  26 +-
 scripts/nn/examples/mnist_lenet_distrib_sgd.dml    |  28 +-
 scripts/nn/examples/mnist_softmax.dml              |   2 +-
 scripts/staging/fm-binclass.dml                    |   4 +-
 scripts/staging/fm-regression.dml                  |   4 +-
 .../sysds/test/applications/nn/BaseTest.java       |  17 +-
 .../nn/{NNGradientTest.java => FMTests.java}       |  11 +-
 .../nn/{NNGradientTest.java => MNISTLeNet.java}    |   8 +-
 .../test/applications/nn/NNComponentTest.java      |   2 +-
 .../sysds/test/applications/nn/NNGradientTest.java |   2 +-
 .../applications/nn/NNMaxPool2dComponentTest.java  |   2 +-
 .../nn/{NNGradientTest.java => NNTests.java}       |  10 +-
 .../nn/{NNGradientTest.java => TestFolder.java}    |  12 +-
 26 files changed, 91 insertions(+), 1085 deletions(-)

diff --git a/scripts/nn/README.md b/scripts/nn/README.md
index 99f8845..e9a59a7 100644
--- a/scripts/nn/README.md
+++ b/scripts/nn/README.md
@@ -19,12 +19,10 @@ limitations under the License.
 
 # SystemDS-NN
 
-### A deep learning library for [Apache SystemDS](https://github.com/apache/systemds).
+This folder contains different primitives for neural network training, and predictions.
+## Neural net for regression with vanilla SGD:
 
-## Examples:
-#### Please see the [`examples`](examples) folder for more detailed examples, or view the following two quick examples.
-### Neural net for regression with vanilla SGD:
-```python
+```R
 # Imports
 source("nn/layers/affine.dml") as affine
 source("nn/layers/l2_loss.dml") as l2_loss
@@ -41,8 +39,8 @@ y = rand(rows=N, cols=t)
 # Create 2-layer network:
 ## affine1 -> relu1 -> affine2
 M = 64 # number of neurons
-[W1, b1] = affine::init(D, M)
-[W2, b2] = affine::init(M, t)
+[W1, b1] = affine::init(D, M, -1)
+[W2, b2] = affine::init(M, t, -1)
 
 # Initialize optimizer
 lr = 0.05  # learning rate
@@ -86,8 +84,9 @@ for (e in 1:epochs) {
 }
 ```
 
-### Neural net for multi-class classification with dropout and SGD w/ Nesterov momentum:
-```python
+## Neural net for multi-class classification with dropout and SGD w/ Nesterov momentum
+
+```R
 # Imports
 source("nn/layers/affine.dml") as affine
 source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
@@ -112,9 +111,9 @@ parfor (i in 1:N) {
 H1 = 64 # number of neurons in 1st hidden layer
 H2 = 64 # number of neurons in 2nd hidden layer
 p = 0.5  # dropout probability
-[W1, b1] = affine::init(D, H1)
-[W2, b2] = affine::init(H1, H2)
-[W3, b3] = affine::init(H2, t)
+[W1, b1] = affine::init(D, H1, -1)
+[W2, b2] = affine::init(H1, H2, -1)
+[W3, b3] = affine::init(H2, t, -1)
 
 # Initialize SGD w/ Nesterov momentum optimizer
 lr = 0.05  # learning rate
diff --git a/scripts/nn/examples/Example - MNIST LeNet deprecated.ipynb b/scripts/nn/examples/Example - MNIST LeNet deprecated.ipynb
deleted file mode 100644
index ea4fdc5..0000000
--- a/scripts/nn/examples/Example - MNIST LeNet deprecated.ipynb	
+++ /dev/null
@@ -1,189 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Quick Setup - Warning: Deprecated"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Create a SystemDS MLContext object\n",
-    "from systemds import MLContext, dml\n",
-    "ml = MLContext(sc)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download Data - MNIST"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The MNIST dataset contains labeled images of handwritten digits, where each example is a 28x28 pixel image of grayscale values in the range [0,255] stretched out as 784 pixels, and each label is one of 10 possible digits in [0,9].  Here, we download 60,000 training examples, and 10,000 test examples, where the format is \"label, pixel_1, pixel_2, ..., pixel_n\"."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%%sh\n",
-    "mkdir -p data/mnist/\n",
-    "cd data/mnist/\n",
-    "curl -O https://pjreddie.com/media/files/mnist_train.csv\n",
-    "curl -O https://pjreddie.com/media/files/mnist_test.csv"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## SystemDS \"LeNet\" Neural Network"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1. Train"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "script_string = \"\"\"\n",
-    "source(\"nn/examples/mnist_lenet.dml\") as mnist_lenet\n",
-    "\n",
-    "# Read training data\n",
-    "data = read($data, format=\"csv\")\n",
-    "n = nrow(data)\n",
-    "\n",
-    "# Extract images and labels\n",
-    "images = data[,2:ncol(data)]\n",
-    "labels = data[,1]\n",
-    "\n",
-    "# Scale images to [-1,1], and one-hot encode the labels\n",
-    "images = (images / 255.0) * 2 - 1\n",
-    "labels = table(seq(1, n), labels+1, n, 10)\n",
-    "\n",
-    "# Split into training (55,000 examples) and validation (5,000 examples)\n",
-    "X = images[5001:nrow(images),]\n",
-    "X_val = images[1:5000,]\n",
-    "y = labels[5001:nrow(images),]\n",
-    "y_val = labels[1:5000,]\n",
-    "\n",
-    "# Train\n",
-    "epochs = 10\n",
-    "[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win, epochs)\n",
-    "\"\"\"\n",
-    "script = (dml(script_string).input(\"$data\", \"data/mnist/mnist_train.csv\")\n",
-    "                            .input(C=1, Hin=28, Win=28)\n",
-    "                            .output(\"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\", \"W4\", \"b4\"))\n",
-    "W1, b1, W2, b2, W3, b3, W4, b4 = (ml.execute(script)\n",
-    "                                    .get(\"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\", \"W4\", \"b4\"))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 2. Compute Test Accuracy"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "script_string = \"\"\"\n",
-    "source(\"nn/examples/mnist_lenet.dml\") as mnist_lenet\n",
-    "\n",
-    "# Read test data\n",
-    "data = read($data, format=\"csv\")\n",
-    "n = nrow(data)\n",
-    "\n",
-    "# Extract images and labels\n",
-    "X_test = data[,2:ncol(data)]\n",
-    "y_test = data[,1]\n",
-    "\n",
-    "# Scale images to [-1,1], and one-hot encode the labels\n",
-    "X_test = (X_test / 255.0) * 2 - 1\n",
-    "y_test = table(seq(1, n), y_test+1, n, 10)\n",
-    "\n",
-    "# Eval on test set\n",
-    "probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\n",
-    "[loss, accuracy] = mnist_lenet::eval(probs, y_test)\n",
-    "\n",
-    "print(\"Test Accuracy: \" + accuracy)\n",
-    "\"\"\"\n",
-    "script = dml(script_string).input(**{\"$data\": \"data/mnist/mnist_train.csv\",\n",
-    "                                     \"C\": 1, \"Hin\": 28, \"Win\": 28,\n",
-    "                                     \"W1\": W1, \"b1\": b1,\n",
-    "                                     \"W2\": W2, \"b2\": b2,\n",
-    "                                     \"W3\": W3, \"b3\": b3,\n",
-    "                                     \"W4\": W4, \"b4\": b4})\n",
-    "ml.execute(script)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3. Extract Model Into Spark DataFrames For Future Use"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "W1_df = W1.toDF()\n",
-    "b1_df = b1.toDF()\n",
-    "W2_df = W2.toDF()\n",
-    "b2_df = b2.toDF()\n",
-    "W3_df = W3.toDF()\n",
-    "b3_df = b3.toDF()\n",
-    "W4_df = W4.toDF()\n",
-    "b4_df = b4.toDF()\n",
-    "W1_df, b1_df, W2_df, b2_df, W3_df, b3_df, W4_df, b4_df"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 + Spark 2.x + SystemDS",
-   "language": "python",
-   "name": "pyspark3_2.x"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.1"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
\ No newline at end of file
diff --git a/scripts/nn/examples/Example - MNIST Softmax Classifier deprecated.ipynb b/scripts/nn/examples/Example - MNIST Softmax Classifier deprecated.ipynb
deleted file mode 100644
index f0939f7..0000000
--- a/scripts/nn/examples/Example - MNIST Softmax Classifier deprecated.ipynb	
+++ /dev/null
@@ -1,179 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Quick Setup - Warning: Deprecated"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": false
-   },
-   "outputs": [],
-   "source": [
-    "# Create a SystemDS MLContext object\n",
-    "from systemds import MLContext, dml\n",
-    "ml = MLContext(sc)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download Data - MNIST"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The MNIST dataset contains labeled images of handwritten digits, where each example is a 28x28 pixel image of grayscale values in the range [0,255] stretched out as 784 pixels, and each label is one of 10 possible digits in [0,9].  Here, we download 60,000 training examples, and 10,000 test examples, where the format is \"label, pixel_1, pixel_2, ..., pixel_n\"."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": [
-    "%%sh\n",
-    "mkdir -p data/mnist/\n",
-    "cd data/mnist/\n",
-    "curl -O https://pjreddie.com/media/files/mnist_train.csv\n",
-    "curl -O https://pjreddie.com/media/files/mnist_test.csv"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## SystemDS Softmax Model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1. Train"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "training = \"\"\"\n",
-    "source(\"nn/examples/mnist_softmax.dml\") as mnist_softmax\n",
-    "\n",
-    "# Read training data\n",
-    "data = read($data, format=\"csv\")\n",
-    "n = nrow(data)\n",
-    "\n",
-    "# Extract images and labels\n",
-    "images = data[,2:ncol(data)]\n",
-    "labels = data[,1]\n",
-    "\n",
-    "# Scale images to [0,1], and one-hot encode the labels\n",
-    "images = images / 255.0\n",
-    "labels = table(seq(1, n), labels+1, n, 10)\n",
-    "\n",
-    "# Split into training (55,000 examples) and validation (5,000 examples)\n",
-    "X = images[5001:nrow(images),]\n",
-    "X_val = images[1:5000,]\n",
-    "y = labels[5001:nrow(images),]\n",
-    "y_val = labels[1:5000,]\n",
-    "\n",
-    "# Train\n",
-    "epochs = 1\n",
-    "[W, b] = mnist_softmax::train(X, y, X_val, y_val, epochs)\n",
-    "\"\"\"\n",
-    "script = dml(training).input(\"$data\", \"data/mnist/mnist_train.csv\").output(\"W\", \"b\")\n",
-    "W, b = ml.execute(script).get(\"W\", \"b\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 2. Compute Test Accuracy"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "testing = \"\"\"\n",
-    "source(\"nn/examples/mnist_softmax.dml\") as mnist_softmax\n",
-    "\n",
-    "# Read test data\n",
-    "data = read($data, format=\"csv\")\n",
-    "n = nrow(data)\n",
-    "\n",
-    "# Extract images and labels\n",
-    "X_test = data[,2:ncol(data)]\n",
-    "y_test = data[,1]\n",
-    "\n",
-    "# Scale images to [0,1], and one-hot encode the labels\n",
-    "X_test = X_test / 255.0\n",
-    "y_test = table(seq(1, n), y_test+1, n, 10)\n",
-    "\n",
-    "# Eval on test set\n",
-    "probs = mnist_softmax::predict(X_test, W, b)\n",
-    "[loss, accuracy] = mnist_softmax::eval(probs, y_test)\n",
-    "\n",
-    "print(\"Test Accuracy: \" + accuracy)\n",
-    "\"\"\"\n",
-    "script = dml(testing).input(\"$data\", \"data/mnist/mnist_test.csv\", W=W, b=b)\n",
-    "ml.execute(script)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3. Extract Model Into Spark DataFrames For Future Use"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "W_df = W.toDF()\n",
-    "b_df = b.toDF()\n",
-    "W_df, b_df"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.6.1"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 1
-}
\ No newline at end of file
diff --git a/scripts/nn/examples/Example - Neural Collaborative Filtering.ipynb b/scripts/nn/examples/Example - Neural Collaborative Filtering.ipynb
deleted file mode 100644
index 5c047fd..0000000
--- a/scripts/nn/examples/Example - Neural Collaborative Filtering.ipynb	
+++ /dev/null
@@ -1,347 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "# Neural Collaborative Filtering (NCF)\n",
-    "\n",
-    "This examples trains a neural network on the MovieLens data set using the concept of [Neural Collaborative Filtering (NCF)](https://dl.acm.org/doi/abs/10.1145/3038912.3052569) that is aimed at approaching recommendation problems using deep neural networks as opposed to common matrix factorization approaches."
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Setup and Imports"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import pandas as pd\n",
-    "import matplotlib.pyplot as plt\n",
-    "from sklearn.model_selection import train_test_split"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Download Data - MovieLens"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The MovieLens data set is provided by the Unniversity of Minnesota and the GroupLens Research Group:\n",
-    "\n",
-    "> This dataset (ml-latest-small) describes 5-star rating and free-text tagging activity from [MovieLens](http://movielens.org/), a movie recommendation service. It contains 100836 ratings and 3683 tag applications across 9742 movies. These data were created by 610 users between March 29, 1996 and September 24, 2018. This dataset was generated on September 26, 2018.<br/>\n",
-    "Users were selected at random for inclusion. All selected users had rated at least 20 movies. No demographic information is included. Each user is represented by an id, and no other information is provided.<br/>\n",
-    "The data are contained in the files links.csv, movies.csv, ratings.csv and tags.csv. More details about the contents and use of all these files follows.<br/>\n",
-    "This is a development dataset. As such, it may change over time and is not an appropriate dataset for shared research results. See available benchmark datasets if that is your intent.<br/>\n",
-    "This and other GroupLens data sets are publicly available for download at http://grouplens.org/datasets/."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Archive:  ml-latest-small.zip\n",
-      "   creating: ml-latest-small/\n",
-      "  inflating: ml-latest-small/links.csv  \n",
-      "  inflating: ml-latest-small/tags.csv  \n",
-      "  inflating: ml-latest-small/ratings.csv  \n",
-      "  inflating: ml-latest-small/README.txt  \n",
-      "  inflating: ml-latest-small/movies.csv  \n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
-      "                                 Dload  Upload   Total   Spent    Left  Speed\n",
-      "\r",
-      "  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r",
-      "  5  955k    5 50411    0     0  68679      0  0:00:14 --:--:--  0:00:14 68586\r",
-      "100  955k  100  955k    0     0   640k      0  0:00:01  0:00:01 --:--:--  640k\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%sh\n",
-    "DATASET=ml-latest-small\n",
-    "\n",
-    "mkdir -p data/$DATASET/\n",
-    "cd data/$DATASET\n",
-    "curl -O http://files.grouplens.org/datasets/movielens/$DATASET.zip\n",
-    "unzip $DATASET.zip"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Prepare Data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "data_loc = \"data/ml-latest-small/ml-latest-small/\"\n",
-    "negative_split = 1.5  # how many negatives for one positive\n",
-    "\n",
-    "# load interactions from MovieLens\n",
-    "raw_ratings = pd.read_csv(data_loc + \"ratings.csv\")\n",
-    "positives = pd.DataFrame(raw_ratings, columns=['userId', 'movieId'])\n",
-    "\n",
-    "# sample negatives\n",
-    "negatives = pd.DataFrame(columns=[\"userId\", \"movieId\"])\n",
-    "\n",
-    "while len(negatives) < len(positives) * negative_split:\n",
-    "    user = positives[\"userId\"].sample().values[0]\n",
-    "    movie = positives[\"movieId\"].sample().values[0]\n",
-    "    if len(positives.loc[(positives[\"userId\"] == user) & (positives[\"movieId\"] == movie)]) == 0:\n",
-    "        negatives = negatives.append({\"userId\": user, \"movieId\": movie}, ignore_index=True)\n",
-    "\n",
-    "# write out final data\n",
-    "targets = np.hstack([np.ones(len(positives)), np.zeros(len(negatives))])\n",
-    "all_ratings = np.vstack([positives, negatives])\n",
-    "\n",
-    "user_item_targets = np.hstack([all_ratings, targets[:, np.newaxis]])\n",
-    "\n",
-    "np.random.shuffle(user_item_targets)\n",
-    "\n",
-    "split = train_test_split(user_item_targets, train_size=0.8)\n",
-    "\n",
-    "np.savetxt(data_loc + \"sampled-train.csv\", split[0], delimiter=\",\")\n",
-    "np.savetxt(data_loc + \"sampled-test.csv\", split[1], delimiter=\",\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## SystemDS NCF implementation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### Train"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### with synthetic dummy data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Using user supplied systemds jar file target/SystemDS.jar\n",
-      "###############################################################################\n",
-      "#  SYSTEMDS_ROOT= .\n",
-      "#  SYSTEMDS_JAR_FILE= target/SystemDS.jar\n",
-      "#  CONFIG_FILE= --config ./target/testTemp/org/apache/sysds/api/mlcontext/MLContext/SystemDS-config.xml\n",
-      "#  LOG4JPROP= -Dlog4j.configuration=file:conf/log4j-silent.properties\n",
-      "#  CLASSPATH= target/SystemDS.jar:./lib/*:./target/lib/*\n",
-      "#  HADOOP_HOME= /Users/patrick/Uni Offline/Architectures of Machine Learning Systems (AMLS)/systemml/target/lib/hadoop\n",
-      "#\n",
-      "#  Running script scripts/nn/examples/ncf-dummy-data.dml locally with opts: \n",
-      "###############################################################################\n",
-      "Executing command:     java       -Xmx4g      -Xms4g      -Xmn400m   -cp target/SystemDS.jar:./lib/*:./target/lib/*   -Dlog4j.configuration=file:conf/log4j-silent.properties   org.apache.sysds.api.DMLScript   -f scripts/nn/examples/ncf-dummy-data.dml   -exec singlenode   --config ./target/testTemp/org/apache/sysds/api/mlcontext/MLContext/SystemDS-config.xml   \n",
-      "\n",
-      "NCF training starting with 1000 training samples, 100 validation samples, 50 items and 60 users...\n",
-      "Epoch: 1, Iter: 1, Train Loss: 0.6953457411615849, Train Accuracy: 0.5, Val Loss: 0.6995101788248107, Val Accuracy: 0.47\n",
-      "Epoch: 2, Iter: 1, Train Loss: 0.6667911468574823, Train Accuracy: 0.6875, Val Loss: 0.6992050630414124, Val Accuracy: 0.47\n",
-      "Epoch: 3, Iter: 1, Train Loss: 0.6570450250431727, Train Accuracy: 0.6875, Val Loss: 0.7014387912966833, Val Accuracy: 0.47\n",
-      "Epoch: 4, Iter: 1, Train Loss: 0.6521926651745862, Train Accuracy: 0.6875, Val Loss: 0.7053126102214489, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 5, Iter: 1, Train Loss: 0.6431405119563119, Train Accuracy: 0.6875, Val Loss: 0.7115121778198469, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 6, Iter: 1, Train Loss: 0.6353498336109219, Train Accuracy: 0.6875, Val Loss: 0.7193490066131873, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 7, Iter: 1, Train Loss: 0.6308046978859394, Train Accuracy: 0.6875, Val Loss: 0.7306240107462888, Val Accuracy: 0.48\n",
-      "Epoch: 8, Iter: 1, Train Loss: 0.6260145322748087, Train Accuracy: 0.75, Val Loss: 0.7435853055111923, Val Accuracy: 0.49\n",
-      "Epoch: 9, Iter: 1, Train Loss: 0.6163475345953953, Train Accuracy: 0.6875, Val Loss: 0.757023909929672, Val Accuracy: 0.5\n",
-      "Epoch: 10, Iter: 1, Train Loss: 0.6029424406867099, Train Accuracy: 0.6875, Val Loss: 0.7749021987872134, Val Accuracy: 0.51\n",
-      "Epoch: 11, Iter: 1, Train Loss: 0.5791958103856243, Train Accuracy: 0.8125, Val Loss: 0.7921418272873325, Val Accuracy: 0.51\n",
-      "Epoch: 12, Iter: 1, Train Loss: 0.5543597535155846, Train Accuracy: 0.8125, Val Loss: 0.8131440342665028, Val Accuracy: 0.5\n",
-      "Epoch: 13, Iter: 1, Train Loss: 0.5342062981571314, Train Accuracy: 0.8125, Val Loss: 0.8340415360672659, Val Accuracy: 0.45999999999999996\n",
-      "Epoch: 14, Iter: 1, Train Loss: 0.5156903349054259, Train Accuracy: 0.875, Val Loss: 0.8534000391024407, Val Accuracy: 0.47\n",
-      "Epoch: 15, Iter: 1, Train Loss: 0.5042912981017884, Train Accuracy: 0.8125, Val Loss: 0.873901869293276, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 16, Iter: 1, Train Loss: 0.48722704019844537, Train Accuracy: 0.8125, Val Loss: 0.898510539121238, Val Accuracy: 0.47\n",
-      "Epoch: 17, Iter: 1, Train Loss: 0.47048381704431463, Train Accuracy: 0.875, Val Loss: 0.9284775525937294, Val Accuracy: 0.48\n",
-      "Epoch: 18, Iter: 1, Train Loss: 0.45151030675588855, Train Accuracy: 0.875, Val Loss: 0.9574504971357228, Val Accuracy: 0.47\n",
-      "Epoch: 19, Iter: 1, Train Loss: 0.43940495503523824, Train Accuracy: 0.875, Val Loss: 0.9937811553464448, Val Accuracy: 0.45999999999999996\n",
-      "Epoch: 20, Iter: 1, Train Loss: 0.42553379542786246, Train Accuracy: 0.875, Val Loss: 1.0231502880025147, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 21, Iter: 1, Train Loss: 0.4163223594480222, Train Accuracy: 0.875, Val Loss: 1.0595479122098816, Val Accuracy: 0.45999999999999996\n",
-      "Epoch: 22, Iter: 1, Train Loss: 0.4050461773338017, Train Accuracy: 0.875, Val Loss: 1.0944624240337406, Val Accuracy: 0.48\n",
-      "Epoch: 23, Iter: 1, Train Loss: 0.3957080838041942, Train Accuracy: 0.875, Val Loss: 1.1315613394576827, Val Accuracy: 0.47\n",
-      "Epoch: 24, Iter: 1, Train Loss: 0.39252816032717697, Train Accuracy: 0.8125, Val Loss: 1.1608315131205158, Val Accuracy: 0.47\n",
-      "Epoch: 25, Iter: 1, Train Loss: 0.38656611677400526, Train Accuracy: 0.8125, Val Loss: 1.2010764396137235, Val Accuracy: 0.45999999999999996\n",
-      "Epoch: 26, Iter: 1, Train Loss: 0.3910140006546419, Train Accuracy: 0.8125, Val Loss: 1.2394434665872176, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 27, Iter: 1, Train Loss: 0.39012809759646405, Train Accuracy: 0.8125, Val Loss: 1.267704284952889, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 28, Iter: 1, Train Loss: 0.3986668930898999, Train Accuracy: 0.8125, Val Loss: 1.3134788291583197, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 29, Iter: 1, Train Loss: 0.39096586484137014, Train Accuracy: 0.8125, Val Loss: 1.3457368548231847, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 30, Iter: 1, Train Loss: 0.3913665786483714, Train Accuracy: 0.8125, Val Loss: 1.395200160764677, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 31, Iter: 1, Train Loss: 0.39306020872450564, Train Accuracy: 0.8125, Val Loss: 1.4547617764166234, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 32, Iter: 1, Train Loss: 0.3961123079325197, Train Accuracy: 0.8125, Val Loss: 1.4988918781732432, Val Accuracy: 0.45999999999999996\n",
-      "Epoch: 33, Iter: 1, Train Loss: 0.39167597788728836, Train Accuracy: 0.875, Val Loss: 1.5580225154760752, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 34, Iter: 1, Train Loss: 0.3936826951721131, Train Accuracy: 0.875, Val Loss: 1.592168642509798, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 35, Iter: 1, Train Loss: 0.39446093556125095, Train Accuracy: 0.8125, Val Loss: 1.6504423270813886, Val Accuracy: 0.43000000000000005\n",
-      "Epoch: 36, Iter: 1, Train Loss: 0.3917767876760818, Train Accuracy: 0.8125, Val Loss: 1.6894229810333048, Val Accuracy: 0.43000000000000005\n",
-      "Epoch: 37, Iter: 1, Train Loss: 0.3936299068718723, Train Accuracy: 0.8125, Val Loss: 1.7342536990495687, Val Accuracy: 0.43000000000000005\n",
-      "Epoch: 38, Iter: 1, Train Loss: 0.4086856463043926, Train Accuracy: 0.8125, Val Loss: 1.7709575584324264, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 39, Iter: 1, Train Loss: 0.3946728895715752, Train Accuracy: 0.8125, Val Loss: 1.8323990419424212, Val Accuracy: 0.43000000000000005\n",
-      "Epoch: 40, Iter: 1, Train Loss: 0.4092882424416999, Train Accuracy: 0.8125, Val Loss: 1.8647938002160964, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 41, Iter: 1, Train Loss: 0.4050641439255627, Train Accuracy: 0.8125, Val Loss: 1.891264442380163, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 42, Iter: 1, Train Loss: 0.4170644006779869, Train Accuracy: 0.8125, Val Loss: 1.9423174900115594, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 43, Iter: 1, Train Loss: 0.3923480753991977, Train Accuracy: 0.8125, Val Loss: 1.9731695043639572, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 44, Iter: 1, Train Loss: 0.40490676281916327, Train Accuracy: 0.8125, Val Loss: 2.010804834458905, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 45, Iter: 1, Train Loss: 0.40181821707001014, Train Accuracy: 0.8125, Val Loss: 2.051962004205519, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 46, Iter: 1, Train Loss: 0.40355348381441153, Train Accuracy: 0.8125, Val Loss: 2.0891022279849456, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 47, Iter: 1, Train Loss: 0.38715605504077866, Train Accuracy: 0.8125, Val Loss: 2.117280026954698, Val Accuracy: 0.44999999999999996\n",
-      "Epoch: 48, Iter: 1, Train Loss: 0.39836973023268446, Train Accuracy: 0.8125, Val Loss: 2.141835697116999, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 49, Iter: 1, Train Loss: 0.3901144594871556, Train Accuracy: 0.8125, Val Loss: 2.176511579483428, Val Accuracy: 0.43999999999999995\n",
-      "Epoch: 50, Iter: 1, Train Loss: 0.3917649057215277, Train Accuracy: 0.8125, Val Loss: 2.2288326304130806, Val Accuracy: 0.43999999999999995\n",
-      "NCF training completed after 50 epochs\n",
-      "SystemDS Statistics:\n",
-      "Total execution time:\t\t9.206 sec.\n",
-      "\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "20/05/28 15:04:03 INFO api.DMLScript: BEGIN DML run 05/28/2020 15:04:03\n",
-      "20/05/28 15:04:03 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n",
-      "20/05/28 15:04:13 INFO api.DMLScript: END DML run 05/28/2020 15:04:13\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "cd ../../..\n",
-    "bin/systemds target/SystemDS.jar scripts/nn/examples/ncf-dummy-data.dml > scripts/nn/examples/run_log.txt && cat scripts/nn/examples/run_log.txt"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### with real data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%%bash\n",
-    "cd ../../..\n",
-    "bin/systemds target/SystemDS.jar scripts/nn/examples/ncf-real-data.dml > scripts/nn/examples/run_log.txt && cat scripts/nn/examples/run_log.txt"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    " ### Plot training results"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 14,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nOzdd3iUVdrA4d+ZyaT3RkkhlNARkEgRFAGVpuKqiCgqrIgd0NXVteEq9l39cEUUBBFEXZXFVYqIIrKA9N4DoSQBQkjv0873x4whQkICTJhk8tzXNdfMvPV5h/DMmXPOe47SWiOEEMJzGdwdgBBCiNoliV4IITycJHohhPBwkuiFEMLDSaIXQggP5+XuACoTGRmpExIS3B2GEELUG5s2bTqltY6qbF2dTPQJCQls3LjR3WEIIUS9oZQ6UtU6qboRQggPJ4leCCE8nCR6IYTw [...]
-      "text/plain": [
-       "<Figure size 432x288 with 1 Axes>"
-      ]
-     },
-     "metadata": {
-      "needs_background": "light"
-     },
-     "output_type": "display_data"
-    }
-   ],
-   "source": [
-    "log_name = \"run_log\"\n",
-    "txt_name = log_name + \".txt\"\n",
-    "csv_name = log_name + \".csv\"\n",
-    "\n",
-    "# convert to CSV\n",
-    "with open(txt_name, \"r\") as txt_file:\n",
-    "    data = txt_file.readlines()\n",
-    "    csv_lines = list(map(lambda x: x.replace(\"Epoch: \", \"\")\n",
-    "                         .replace(\", Iter: \", \",\")\n",
-    "                         .replace(\", Train Loss: \", \",\")\n",
-    "                         .replace(\", Train Accuracy: \", \",\")\n",
-    "                         .replace(\", Val Loss: \", \",\")\n",
-    "                         .replace(\", Val Accuracy: \", \",\"),\n",
-    "                            filter(lambda x: \"Epoch: \" in x, data)))\n",
-    "    with open(csv_name, \"w\") as csv_file:\n",
-    "        csv_file.write(\"epoch,iter,train_loss,train_acc,val_loss,val_acc\\n\")\n",
-    "        for item in csv_lines:\n",
-    "            csv_file.write(\"%s\" % item)\n",
-    "\n",
-    "# plot\n",
-    "log = pd.read_csv(csv_name)\n",
-    "plot_log = log[log[\"iter\"] == 1]\n",
-    "\n",
-    "for val in [\"train_loss\", \"train_acc\", \"val_loss\", \"val_acc\"]:\n",
-    "    plt.plot(plot_log[\"epoch\"], plot_log[val], label=val)\n",
-    "\n",
-    "plt.legend()\n",
-    "plt.show()"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml b/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml
index 86a3308..0e16964 100644
--- a/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml
+++ b/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml
@@ -30,12 +30,6 @@
  *  - train: The file containing the training data
  *  - test: the file containing the test data
  *
- * The MNIST Data can be downloaded as follows:
- * mkdir -p data/mnist/
- * cd data/mnist/
- * curl -O https://pjreddie.com/media/files/mnist_train.csv
- * curl -O https://pjreddie.com/media/files/mnist_test.csv
- *
  * Sample Invocation
  *
  * systemds "<path to systemds repo>/systemds/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml"
diff --git a/scripts/nn/examples/Example-MNIST_Softmax.dml b/scripts/nn/examples/Example-MNIST_Softmax.dml
index ede1905..6a66669 100644
--- a/scripts/nn/examples/Example-MNIST_Softmax.dml
+++ b/scripts/nn/examples/Example-MNIST_Softmax.dml
@@ -19,13 +19,6 @@
 #
 #-------------------------------------------------------------
 
-/*
-* The MNIST Data can be downloaded as follows:
-* mkdir -p data/mnist/
-* cd data/mnist/
-* curl -O https://pjreddie.com/media/files/mnist_train.csv
-* curl -O https://pjreddie.com/media/files/mnist_test.csv
-*/
 
 source("nn/examples/mnist_softmax.dml") as mnist_softmax
 
diff --git a/scripts/nn/examples/README.md b/scripts/nn/examples/README.md
index fd3c1a2..ee7a8d0 100644
--- a/scripts/nn/examples/README.md
+++ b/scripts/nn/examples/README.md
@@ -19,63 +19,28 @@ limitations under the License.
 
 # SystemDS-NN Examples
 
-#### This folder contains scripts and PySpark Jupyter notebooks serving as examples of using the *SystemDS-NN* (`nn`) deep learning library.
+## MNIST Softmax Classifier
 
----
-
-# Examples
-### MNIST Softmax Classifier
-
-* This example trains a softmax classifier, which is essentially a multi-class logistic regression model, on the MNIST data.  The model will be trained on the *training* images, validated on the *validation* images, and tested for final performance metrics on the *test* images.
-* Notebook: `Example - MNIST Softmax Classifier.ipynb`.
+* This example trains a softmax classifier, which is essentially a multi-class logistic regression model, on the MNIST data.
+The model will be trained on the *training* images, validated on the *validation* images, and tested for final performance metrics on the *test* images.
 * DML Functions: `mnist_softmax.dml`
 * Training script: `mnist_softmax-train.dml`
 * Prediction script: `mnist_softmax-predict.dml`
 
-### MNIST "LeNet" Neural Net
+## MNIST "LeNet" Neural Net
 
-* This example trains a neural network on the MNIST data using a ["LeNet" architecture](http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf). The model will be trained on the *training* images, validated on the *validation* images, and tested for final performance metrics on the *test* images.
-* Notebook: `Example - MNIST LeNet.ipynb`.
+* This example trains a neural network on the MNIST data using a ["LeNet" architecture](http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf).
+The model will be trained on the *training* images, validated on the *validation* images, and tested for final performance metrics on the *test* images.
 * DML Functions: `mnist_lenet.dml`
 * Training script: `mnist_lenet-train.dml`
 * Prediction script: `mnist_lenet-predict.dml`
 
 ### Neural Collaborative Filtering
 
-* This example trains a neural network on the MovieLens data set using the concept of [Neural Collaborative Filtering (NCF)](https://dl.acm.org/doi/abs/10.1145/3038912.3052569) that is aimed at approaching recommendation problems using deep neural networks as opposed to common matrix factorization approaches.
-* As in the original paper, the targets are binary and only indicate whether a user has rated a movie or not. This makes the recommendation problem harder than working with the values of the ratings, but interaction data is in practice easier to collect.
+* This example trains a neural network on the MovieLens data set using the concept of [Neural Collaborative Filtering (NCF)](https://dl.acm.org/doi/abs/10.1145/3038912.3052569)
+that is aimed at approaching recommendation problems using deep neural networks as opposed to common matrix factorization approaches.
+* As in the original paper, the targets are binary and only indicate whether a user has rated a movie or not.
+This makes the recommendation problem harder than working with the values of the ratings, but interaction data is in practice easier to collect.
 * MovieLens only provides positive interactions in form of ratings. We therefore randomly sample negative interactions as suggested by the original paper.
-* The implementation works with a fixed layer architecture with two embedding layers at the beginning for users and items, three dense layers with ReLu activations in the middle and a sigmoid activation for the final classification.
-
----
-
-# Setup
-## Code
-* To run the examples, please first download and unzip the project via GitHub using the "Clone or download" button on the [homepage of the project](https://github.com/dusenberrymw/systemml-nn), *or* via the following commands:
-
-  ```
-  git clone https://github.com/dusenberrymw/systemml-nn.git
-  ```
-
-* Then, move into the `systemml-nn` folder via:
-  ```
-  cd systemml-nn
-  ```
-
-## Data
-* These examples use the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, which contains labeled 28x28 pixel images of handwritten digits in the range of 0-9.  There are 60,000 training images, and 10,000 testing images.  Of the 60,000 training images, 5,000 will be used as validation images.
-* **Download**:
-  * **Notebooks**: The data will be automatically downloaded as a step in either of the example notebooks.
-  * **Training scripts**: Please run `get_mnist_data.sh` to download the data separately.
-
-## Execution
-* These examples contain scripts written in SystemDS's R-like language (`*.dml`), as well as PySpark Jupyter notebooks (`*.ipynb`).  The scripts contain the math for the algorithms, enclosed in functions, and the notebooks serve as full, end-to-end examples of reading in data, training models using the functions within the scripts, and evaluating final performance.
-* **Notebooks**: To run the notebook examples, please install the SystemDS Python package with `pip install systemds`, and then startup Jupyter in the following manner from this directory (or for more information, please see [this great blog post](http://spark.tc/0-to-life-changing-application-with-apache-systemml/)):
-
-  ```
-  PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS="notebook" pyspark --master local[*] --driver-memory 3G --driver-class-path SystemDS.jar --jars SystemDS.jar
-  ```
-
-  Note that all printed output, such as training statistics, from the SystemDS scripts will be sent to the terminal in which Jupyter was started (for now...).
-
-* **Scripts**: To run the scripts from the command line using `spark-submit`, please see the comments located at the top of the `-train` and `-predict` scripts.
+* The implementation works with a fixed layer architecture with two embedding layers at the beginning for users and items,
+three dense layers with ReLu activations in the middle and a sigmoid activation for the final classification.
diff --git a/scripts/nn/examples/fm-binclass-dummy-data.dml b/scripts/nn/examples/fm-binclass-dummy-data.dml
index 20fe7e1..6aec5f8 100644
--- a/scripts/nn/examples/fm-binclass-dummy-data.dml
+++ b/scripts/nn/examples/fm-binclass-dummy-data.dml
@@ -30,12 +30,8 @@ X_val = rand(rows=100, cols=7);
 y_val = round(rand(rows=100, cols=1));
 
 # Train
-[w0, W, V, loss] = fm_binclass::train(X, y, X_val, y_val);
+[w0, W, V, loss] = fm_binclass::train(X, y, X_val, y_val, 2);
 
-# Write model out
-#write(w0, out_dir+"/w0");
-#write(W, out_dir+"/W");
-#write(V, out_dir+"/V");
 
 # eval on test set
 probs = fm_binclass::predict(X, w0, W, V);
@@ -43,6 +39,3 @@ probs = fm_binclass::predict(X, w0, W, V);
 
 # Output results
 print("Test Accuracy: " + accuracy)
-#write(accuracy, out_dir+"/accuracy")
-
-print("")
\ No newline at end of file
diff --git a/scripts/nn/examples/fm-regression-dummy-data.dml b/scripts/nn/examples/fm-regression-dummy-data.dml
index 7796918..7de728f 100644
--- a/scripts/nn/examples/fm-regression-dummy-data.dml
+++ b/scripts/nn/examples/fm-regression-dummy-data.dml
@@ -30,12 +30,7 @@ X_val = rand(rows=100, cols=d);
 y_val = rand(rows=100, cols=1);
 
 # Train
-[w0, W, V] = fm_regression::train(X, y, X_val, y_val);
-
-# Write model out
-#write(w0, out_dir+"/w0");
-#write(W, out_dir+"/W");
-#write(V, out_dir+"/V");
+[w0, W, V] = fm_regression::train(X, y, X_val, y_val, 10);
 
 # Evaluate
 probs = fm_regression::predict(X, w0, W, V);
@@ -43,6 +38,3 @@ probs = fm_regression::predict(X, w0, W, V);
 
 # Output results
 print("Test Accuracy: " + accuracy)
-#write(accuracy, out_dir+"/accuracy")
-
-print("")
diff --git a/scripts/nn/examples/get_mnist_data.sh b/scripts/nn/examples/get_mnist_data.sh
deleted file mode 100644
index deb0c40..0000000
--- a/scripts/nn/examples/get_mnist_data.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-DIR="$(cd "$(dirname "$0")" && pwd)"
-mkdir -p $DIR/data/mnist/
-cd $DIR/data/mnist/
-curl -O https://pjreddie.com/media/files/mnist_train.csv
-curl -O https://pjreddie.com/media/files/mnist_test.csv
-
diff --git a/scripts/nn/examples/mnist_2NN.dml b/scripts/nn/examples/mnist_2NN.dml
deleted file mode 100644
index 1307e91..0000000
--- a/scripts/nn/examples/mnist_2NN.dml
+++ /dev/null
@@ -1,179 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * MNIST 2NN Leaky Relu Example
- */
-
-# Imports
-source("nn/layers/affine.dml") as affine
-source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
-source("nn/layers/leaky_relu.dml") as leaky_relu
-source("nn/layers/softmax.dml") as softmax
-source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
-
-train = function(matrix[double] X, matrix[double] Y, matrix[double] X_val, 
-                 matrix[double] Y_val, int epochs)
-  return (matrix[double] W_1, matrix[double] b_1, matrix[double] W_2, 
-	        matrix[double] b_2, matrix[double] W_3, matrix[double] b_3)
-{
-  /*
-   * Trains a 2 hidden layer leaky relu softmax classifier.
-   *
-   * The input matrix, X, has N examples, each with D features.
-   * The targets, Y, have K classes, and are one-hot encoded.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - Y: Target matrix, of shape (N, K).
-   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
-   *  - Y_val: Target validation matrix, of shape (N, K).
-   *  - epochs: Total number of full training loops over the full data set.
-   *
-   * Outputs:
-   *  - W: Weights (parameters) matrix, of shape (D, M, 3).
-   *  - b: Biases vector, of shape (1, M, 3).
-   */
-
-  N = nrow(X)  # num examples
-  D = ncol(X)  # num features
-  K = ncol(Y)  # num classes
-
-  # Create the network:
-  # input -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax
-  [W_1, b_1] = affine::init(D, 200)
-  [W_2, b_2] = affine::init(200, 200)
-  [W_3, b_3] = affine::init(200, K)
-
-  # Initialize SGD
-  lr = 0.2  # learning rate
-  mu = 0  # momentum
-  decay = 0.99  # learning rate decay constant
-  vW_1 = sgd_nesterov::init(W_1)  # optimizer momentum state for W_1
-  vb_1 = sgd_nesterov::init(b_1)  # optimizer momentum state for b_1
-  vW_2 = sgd_nesterov::init(W_2)  # optimizer momentum state for W_2
-  vb_2 = sgd_nesterov::init(b_2)  # optimizer momentum state for b_2
-  vW_3 = sgd_nesterov::init(W_3)  # optimizer momentum state for W_3
-  vb_3 = sgd_nesterov::init(b_3)  # optimizer momentum state for b_3
-
-  # Optimize
-  print("Starting optimization")
-  batch_size = 50
-  iters = 1000
-  for (e in 1:epochs) {
-    for(i in 1:iters) {
-      # Get next batch
-      beg = ((i-1) * batch_size) %% N + 1
-      end = min(N, beg + batch_size - 1)
-      X_batch = X[beg:end,]
-      y_batch = Y[beg:end,]
-
-      # Compute forward pass
-      ## input D -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax
-      out_1 = affine::forward(X_batch, W_1, b_1)
-      out_1_leaky_relu = leaky_relu::forward(out_1)
-      out_2 = affine::forward(out_1_leaky_relu, W_2, b_2)
-      out_2_leaky_relu = leaky_relu::forward(out_2)
-      out_3 = affine::forward(out_2_leaky_relu, W_3, b_3)
-      probs = softmax::forward(out_3)
-
-      # Compute loss & accuracy for training & validation data
-      loss = cross_entropy_loss::forward(probs, y_batch)
-      accuracy = mean(rowIndexMax(probs) == rowIndexMax(y_batch))
-      probs_val = predict(X_val, W_1, b_1, W_2, b_2, W_3, b_3)
-      loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-      accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-      print("Epoch: " + e + ", Iter: " + i + ", Train Loss: " + loss + ", Train Accuracy: " +
-      accuracy + ", Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
-
-      # Compute backward pass
-      ## loss:
-      dprobs = cross_entropy_loss::backward(probs, y_batch)
-      dout_3 = softmax::backward(dprobs, out_3)
-      [dout_2_leaky_relu, dW_3, db_3] = affine::backward(dout_3, out_2_leaky_relu, W_3, b_3)
-      dout_2 = leaky_relu::backward(dout_2_leaky_relu, out_2)
-      [dout_1_leaky_relu, dW_2, db_2] = affine::backward(dout_2, out_1_leaky_relu, W_2, b_2)
-      dout_1 = leaky_relu::backward(dout_1_leaky_relu, out_1)
-      [dX_batch, dW_1, db_1] = affine::backward(dout_1, X_batch, W_1, b_1)
-
-      # Optimize with SGD
-      [W_3, vW_3] = sgd_nesterov::update(W_3, dW_3, lr, mu, vW_3)
-      [b_3, vb_3] = sgd_nesterov::update(b_3, db_3, lr, mu, vb_3)
-      [W_2, vW_2] = sgd_nesterov::update(W_2, dW_2, lr, mu, vW_2)
-      [b_2, vb_2] = sgd_nesterov::update(b_2, db_2, lr, mu, vb_2)
-      [W_1, vW_1] = sgd_nesterov::update(W_1, dW_1, lr, mu, vW_1)
-      [b_1, vb_1] = sgd_nesterov::update(b_1, db_1, lr, mu, vb_1)
-    }
-
-    # Anneal momentum towards 0.999
-    mu = mu + (0.999 - mu)/(1+epochs-e)
-    # Decay learning rate
-    lr = lr * decay
-  }
-}
-
-predict = function(matrix[double] X,
-                   matrix[double] W_1, matrix[double] b_1,
-                   matrix[double] W_2, matrix[double] b_2,
-                   matrix[double] W_3, matrix[double] b_3)
-    return (matrix[double] probs) {
-  /*
-   * Computes the class probability predictions of a softmax classifier.
-   *
-   * The input matrix, X, has N examples, each with D features.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - W: Weights (parameters) matrix, of shape (D, M).
-   *  - b: Biases vector, of shape (1, M).
-   *
-   * Outputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   */
-  # Compute forward pass
-  ## input -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax
-  out_1_leaky_relu = leaky_relu::forward(affine::forward(X, W_1, b_1))
-  out_2_leaky_relu = leaky_relu::forward(affine::forward(out_1_leaky_relu, W_2, b_2))
-  probs = softmax::forward(affine::forward(out_2_leaky_relu, W_3, b_3))
-}
-
-eval = function(matrix[double] probs, matrix[double] Y)
-    return (double loss, double accuracy) {
-  /*
-   * Evaluates a classifier.
-   *
-   * The probs matrix contains the class probability predictions
-   * of K classes over N examples.  The targets, Y, have K classes,
-   * and are one-hot encoded.
-   *
-   * Inputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   *  - Y: Target matrix, of shape (N, K).
-   *
-   * Outputs:
-   *  - loss: Scalar loss, of shape (1).
-   *  - accuracy: Scalar accuracy, of shape (1).
-   */
-  # Compute loss & accuracy
-  loss = cross_entropy_loss::forward(probs, Y)
-  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
-  accuracy = mean(correct_pred)
-}
diff --git a/scripts/nn/examples/mnist_lenet-train.dml b/scripts/nn/examples/mnist_lenet-train.dml
index b70fc91..54b6fd7 100644
--- a/scripts/nn/examples/mnist_lenet-train.dml
+++ b/scripts/nn/examples/mnist_lenet-train.dml
@@ -117,7 +117,3 @@ probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4
 # Output results
 print("Test Accuracy: " + accuracy)
 write(accuracy, out_dir+"/accuracy")
-
-print("")
-print("")
-
diff --git a/scripts/nn/examples/mnist_lenet.dml b/scripts/nn/examples/mnist_lenet.dml
index a882501..b0401e5 100644
--- a/scripts/nn/examples/mnist_lenet.dml
+++ b/scripts/nn/examples/mnist_lenet.dml
@@ -82,10 +82,10 @@ train = function(matrix[double] X, matrix[double] Y,
   N3 = 512  # num nodes in affine3
   # Note: affine4 has K nodes, which is equal to the number of target dimensions (num classes)
 
-  [W1, b1] = conv2d::init(F1, C, Hf, Wf)  # inputs: (N, C*Hin*Win)
-  [W2, b2] = conv2d::init(F2, F1, Hf, Wf)  # inputs: (N, F1*(Hin/2)*(Win/2))
-  [W3, b3] = affine::init(F2*(Hin/2/2)*(Win/2/2), N3)  # inputs: (N, F2*(Hin/2/2)*(Win/2/2))
-  [W4, b4] = affine::init(N3, K)  # inputs: (N, N3)
+  [W1, b1] = conv2d::init(F1, C, Hf, Wf, -1)  # inputs: (N, C*Hin*Win)
+  [W2, b2] = conv2d::init(F2, F1, Hf, Wf, -1)  # inputs: (N, F1*(Hin/2)*(Win/2))
+  [W3, b3] = affine::init(F2*(Hin/2/2)*(Win/2/2), N3, -1)  # inputs: (N, F2*(Hin/2/2)*(Win/2/2))
+  [W4, b4] = affine::init(N3, K, -1)  # inputs: (N, N3)
   W4 = W4 / sqrt(2)  # different initialization, since being fed into softmax, instead of relu
 
   # Initialize SGD w/ Nesterov momentum optimizer
diff --git a/scripts/nn/examples/mnist_lenet_distrib_sgd-train-dummy-data.dml b/scripts/nn/examples/mnist_lenet_distrib_sgd-train-dummy-data.dml
index 43f4120..2310e7c 100644
--- a/scripts/nn/examples/mnist_lenet_distrib_sgd-train-dummy-data.dml
+++ b/scripts/nn/examples/mnist_lenet_distrib_sgd-train-dummy-data.dml
@@ -69,16 +69,16 @@
 source("nn/examples/mnist_lenet_distrib_sgd.dml") as mnist_lenet
 
 # Read training data & settings
-N = ifdef($N, 1024)
-Nval = ifdef($Nval, 512)
-Ntest = ifdef($Ntest, 512)
+N = ifdef($N, 128)
+Nval = ifdef($Nval, 64)
+Ntest = ifdef($Ntest, 64)
 C = ifdef($C, 3)
 Hin = ifdef($Hin, 224)
 Win = ifdef($Win, 224)
 K = ifdef($K, 10)
-batch_size = ifdef($batch_size, 32)
-parallel_batches = ifdef($parallel_batches, 4)
-epochs = ifdef($epochs, 10)
+batch_size = ifdef($batch_size, 16)
+parallel_batches = ifdef($parallel_batches, 2)
+epochs = ifdef($epochs, 2)
 out_dir = ifdef($out_dir, ".")
 
 # Generate dummy data
@@ -90,24 +90,10 @@ out_dir = ifdef($out_dir, ".")
 [W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, Y, X_val, Y_val, C, Hin, Win, batch_size,
     parallel_batches, epochs)
 
-# Write model out
-write(W1, out_dir+"/W1")
-write(b1, out_dir+"/b1")
-write(W2, out_dir+"/W2")
-write(b2, out_dir+"/b2")
-write(W3, out_dir+"/W3")
-write(b3, out_dir+"/b3")
-write(W4, out_dir+"/W4")
-write(b4, out_dir+"/b4")
-
 # Eval on test set
 probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)
 [loss, accuracy] = mnist_lenet::eval(probs, Y_test)
 
 # Output results
 print("Test Accuracy: " + accuracy)
-write(accuracy, out_dir+"/accuracy")
-
-print("")
-print("")
 
diff --git a/scripts/nn/examples/mnist_lenet_distrib_sgd.dml b/scripts/nn/examples/mnist_lenet_distrib_sgd.dml
index b2919b2..9915558 100644
--- a/scripts/nn/examples/mnist_lenet_distrib_sgd.dml
+++ b/scripts/nn/examples/mnist_lenet_distrib_sgd.dml
@@ -81,15 +81,15 @@ train = function(matrix[double] X, matrix[double] Y,
   stride = 1
   pad = 2  # For same dimensions, (Hf - stride) / 2
 
-  F1 = 32  # num conv filters in conv1
-  F2 = 64  # num conv filters in conv2
-  N3 = 512  # num nodes in affine3
+  F1 = 16  # num conv filters in conv1
+  F2 = 16  # num conv filters in conv2
+  N3 = 128  # num nodes in affine3
   # Note: affine4 has K nodes, which is equal to the number of target dimensions (num classes)
 
-  [W1, b1] = conv2d::init(F1, C, Hf, Wf)  # inputs: (N, C*Hin*Win)
-  [W2, b2] = conv2d::init(F2, F1, Hf, Wf)  # inputs: (N, F1*(Hin/2)*(Win/2))
-  [W3, b3] = affine::init(F2*(Hin/2/2)*(Win/2/2), N3)  # inputs: (N, F2*(Hin/2/2)*(Win/2/2))
-  [W4, b4] = affine::init(N3, K)  # inputs: (N, N3)
+  [W1, b1] = conv2d::init(F1, C, Hf, Wf , -1)  # inputs: (N, C*Hin*Win)
+  [W2, b2] = conv2d::init(F2, F1, Hf, Wf , -1)  # inputs: (N, F1*(Hin/2)*(Win/2))
+  [W3, b3] = affine::init(F2*(Hin/2/2)*(Win/2/2), N3 , -1)  # inputs: (N, F2*(Hin/2/2)*(Win/2/2))
+  [W4, b4] = affine::init(N3, K , -1)  # inputs: (N, N3)
   W4 = W4 / sqrt(2)  # different initialization, since being fed into softmax, instead of relu
 
   # Initialize SGD w/ Nesterov momentum optimizer
@@ -150,13 +150,13 @@ train = function(matrix[double] X, matrix[double] Y,
                                                   stride, stride, pad, pad)
         outr1 = relu::forward(outc1)
         [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-                                                      strideh=2, stridew=2, pad=0, pad=0)
+                                                      strideh=2, stridew=2, padh=0, padw=0)
         ## layer 2: conv2 -> relu2 -> pool2
         [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,
                                                   stride, stride, pad, pad)
         outr2 = relu::forward(outc2)
         [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-                                                      strideh=2, stridew=2, pad=0, pad=0)
+                                                      strideh=2, stridew=2, padh=0, padw=0)
         ## layer 3:  affine3 -> relu3 -> dropout
         outa3 = affine::forward(outp2, W3, b3)
         outr3 = relu::forward(outa3)
@@ -177,13 +177,13 @@ train = function(matrix[double] X, matrix[double] Y,
         [doutp2, dW3, db3] = affine::backward(douta3, outp2, W3, b3)
         ## layer 2: conv2 -> relu2 -> pool2
         doutr2 = max_pool2d::backward(doutp2, Houtp2, Woutp2, outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-                                      strideh=2, stridew=2, pad=0, pad=0)
+                                      strideh=2, stridew=2, padh=0, padw=0)
         doutc2 = relu::backward(doutr2, outc2)
         [doutp1, dW2, db2] = conv2d::backward(doutc2, Houtc2, Woutc2, outp1, W2, b2, F1,
                                               Houtp1, Woutp1, Hf, Wf, stride, stride, pad, pad)
         ## layer 1: conv1 -> relu1 -> pool1
         doutr1 = max_pool2d::backward(doutp1, Houtp1, Woutp1, outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-                                      strideh=2, stridew=2, pad=0, pad=0)
+                                      strideh=2, stridew=2, padh=0, padw=0)
         doutc1 = relu::backward(doutr1, outc1)
         [dX_batch, dW1, db1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, W1, b1, C,
                                                 Hin, Win, Hf, Wf, stride, stride, pad, pad)
@@ -316,7 +316,7 @@ predict = function(matrix[double] X, int C, int Hin, int Win,
 
   # Compute predictions over mini-batches
   probs = matrix(0, rows=N, cols=K)
-  batch_size = 64
+  batch_size = 16
   iters = ceil(N / batch_size)
   parfor(i in 1:iters, check=0) {  # complains about `probs` as an inter-loop dependency
     # Get next batch
@@ -330,13 +330,13 @@ predict = function(matrix[double] X, int C, int Hin, int Win,
                                               pad, pad)
     outr1 = relu::forward(outc1)
     [outp1, Houtp1, Woutp1] = max_pool2d::forward(outr1, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, pad=0)
+                                                  strideh=2, stridew=2, padh=0, padw=0)
     ## layer 2: conv2 -> relu2 -> pool2
     [outc2, Houtc2, Woutc2] = conv2d::forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf,
                                               stride, stride, pad, pad)
     outr2 = relu::forward(outc2)
     [outp2, Houtp2, Woutp2] = max_pool2d::forward(outr2, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-                                                  strideh=2, stridew=2, pad=0, pad=0)
+                                                  strideh=2, stridew=2, padh=0, padw=0)
     ## layer 3:  affine3 -> relu3
     outa3 = affine::forward(outp2, W3, b3)
     outr3 = relu::forward(outa3)
diff --git a/scripts/nn/examples/mnist_softmax.dml b/scripts/nn/examples/mnist_softmax.dml
index 4a0664f..9d6fbb6 100644
--- a/scripts/nn/examples/mnist_softmax.dml
+++ b/scripts/nn/examples/mnist_softmax.dml
@@ -55,7 +55,7 @@ train = function(matrix[double] X, matrix[double] Y,
 
   # Create softmax classifier:
   # affine -> softmax
-  [W, b] = affine::init(D, K)
+  [W, b] = affine::init(D, K, -1)
   W = W / sqrt(2.0/(D)) * sqrt(1/(D))
 
   # Initialize SGD w/ Nesterov momentum optimizer
diff --git a/scripts/staging/fm-binclass.dml b/scripts/staging/fm-binclass.dml
index 4d6a57f..a67d326 100644
--- a/scripts/staging/fm-binclass.dml
+++ b/scripts/staging/fm-binclass.dml
@@ -32,7 +32,7 @@ source("nn/layers/sigmoid.dml") as sigmoid
 source("nn/layers/l2_reg.dml") as l2_reg
 source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
 
-train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val)
+train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val, int epochs)
     return (matrix[double] w0, matrix[double] W, matrix[double] V, double loss) {
   /*
    * Trains the FM model.
@@ -78,7 +78,7 @@ train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matri
     print("Starting optimization")
     batch_size = 10
     iters = ceil(1000 / batch_size)
-    epochs = 100; N = n;
+    N = n;
     for (e in 1:epochs) {
       for (i in 1:iters) {
         # Get the next batch
diff --git a/scripts/staging/fm-regression.dml b/scripts/staging/fm-regression.dml
index 875a6da..0122841 100644
--- a/scripts/staging/fm-regression.dml
+++ b/scripts/staging/fm-regression.dml
@@ -29,7 +29,7 @@ source("nn/layers/fm.dml") as fm
 source("nn/layers/l2_loss.dml") as l2_loss
 source("nn/layers/l2_reg.dml") as l2_reg
 
-train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val)
+train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val, int epochs)
     return (matrix[double] w0, matrix[double] W, matrix[double] V) {
   /*
    * Trains the FM model.
@@ -75,7 +75,7 @@ train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matri
     # Optimize
     print("Starting optimization")
     batch_size = 10
-    epochs = 100; N = n;
+    N = n;
     iters = ceil(N / batch_size)
 
     for (e in 1:epochs) {
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/BaseTest.java b/src/test/java/org/apache/sysds/test/applications/nn/BaseTest.java
index b5fd713..2fa9f55 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/BaseTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/BaseTest.java
@@ -27,23 +27,32 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.sysds.api.mlcontext.Script;
 import org.apache.sysds.test.functions.mlcontext.MLContextTestBase;
 
-public class BaseTest extends MLContextTestBase {
+public abstract class BaseTest extends MLContextTestBase {
 	protected static final Log LOG = LogFactory.getLog(BaseTest.class.getName());
 
 	private static final String ERROR_STRING = "ERROR:";
-	private static final String BASE_FILEPATH = "src/test/scripts/applications/nn/";
+
 
 	protected void run(String name) {
-		Script script = dmlFromFile(BASE_FILEPATH + name);
+		run(name, false);
+	}
+
+	protected void run(String name, boolean printStdOut) {
+		Script script = dmlFromFile(getBaseFilePath() + name);
 		String stdOut = executeAndCaptureStdOut(script).getRight();
+		if(printStdOut){
+			LOG.error(stdOut);
+		}
 		assertTrue(stdOut, !stdOut.contains(ERROR_STRING));
 	}
 
 	protected void run(String name, String[] var, Object[] val) {
-		Script script = dmlFromFile(BASE_FILEPATH + name);
+		Script script = dmlFromFile(getBaseFilePath() + name);
 		for(int i = 0; i < var.length; i++)
 			script.in(var[i], val[i]);
 		String stdOut = executeAndCaptureStdOut(script).getRight();
 		assertTrue(stdOut, !stdOut.contains(ERROR_STRING));
 	}
+
+	protected abstract String getBaseFilePath();
 }
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java b/src/test/java/org/apache/sysds/test/applications/nn/FMTests.java
similarity index 82%
copy from src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
copy to src/test/java/org/apache/sysds/test/applications/nn/FMTests.java
index e438133..4ca6536 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/FMTests.java
@@ -21,11 +21,14 @@ package org.apache.sysds.test.applications.nn;
 
 import org.junit.Test;
 
-public class NNGradientTest extends BaseTest {
-
+public class FMTests extends NNTests {
 	@Test
-	public void testNNLibrary_Gradients() {
-		run("run_tests_gradients.dml");
+	public void testBinClass() {
+		run("fm-binclass-dummy-data.dml");
 	}
 
+	@Test
+	public void testRegression() {
+		run("fm-regression-dummy-data.dml");
+	}
 }
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java b/src/test/java/org/apache/sysds/test/applications/nn/MNISTLeNet.java
similarity index 87%
copy from src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
copy to src/test/java/org/apache/sysds/test/applications/nn/MNISTLeNet.java
index e438133..39c323d 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/MNISTLeNet.java
@@ -21,11 +21,9 @@ package org.apache.sysds.test.applications.nn;
 
 import org.junit.Test;
 
-public class NNGradientTest extends BaseTest {
-
+public class MNISTLeNet extends NNTests {
 	@Test
-	public void testNNLibrary_Gradients() {
-		run("run_tests_gradients.dml");
+	public void testRegession() {
+		run("mnist_lenet_distrib_sgd-train-dummy-data.dml");
 	}
-
 }
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNComponentTest.java b/src/test/java/org/apache/sysds/test/applications/nn/NNComponentTest.java
index bc6d2c1..19fd805 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNComponentTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/NNComponentTest.java
@@ -21,7 +21,7 @@ package org.apache.sysds.test.applications.nn;
 
 import org.junit.Test;
 
-public class NNComponentTest extends BaseTest {
+public class NNComponentTest extends TestFolder {
 
 	@Test
 	public void batch_norm1d() {
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java b/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
index e438133..a5e20fb 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
@@ -21,7 +21,7 @@ package org.apache.sysds.test.applications.nn;
 
 import org.junit.Test;
 
-public class NNGradientTest extends BaseTest {
+public class NNGradientTest extends TestFolder {
 
 	@Test
 	public void testNNLibrary_Gradients() {
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNMaxPool2dComponentTest.java b/src/test/java/org/apache/sysds/test/applications/nn/NNMaxPool2dComponentTest.java
index 0be02b6..d4e9ec4 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNMaxPool2dComponentTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/NNMaxPool2dComponentTest.java
@@ -29,7 +29,7 @@ import org.junit.runners.Parameterized.Parameters;
 
 @RunWith(value = Parameterized.class)
 @net.jcip.annotations.NotThreadSafe
-public class NNMaxPool2dComponentTest extends BaseTest {
+public class NNMaxPool2dComponentTest extends TestFolder {
 
 	@Parameters
 	public static Collection<Object[]> data() {
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java b/src/test/java/org/apache/sysds/test/applications/nn/NNTests.java
similarity index 82%
copy from src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
copy to src/test/java/org/apache/sysds/test/applications/nn/NNTests.java
index e438133..54ebcb0 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/NNTests.java
@@ -19,13 +19,13 @@
 
 package org.apache.sysds.test.applications.nn;
 
-import org.junit.Test;
+public class NNTests extends BaseTest {
 
-public class NNGradientTest extends BaseTest {
+	private static final String BASE_FILEPATH = "scripts/nn/examples/";
 
-	@Test
-	public void testNNLibrary_Gradients() {
-		run("run_tests_gradients.dml");
+	@Override
+	protected String getBaseFilePath() {
+		return BASE_FILEPATH;
 	}
 
 }
diff --git a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java b/src/test/java/org/apache/sysds/test/applications/nn/TestFolder.java
similarity index 79%
copy from src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
copy to src/test/java/org/apache/sysds/test/applications/nn/TestFolder.java
index e438133..bb5db69 100644
--- a/src/test/java/org/apache/sysds/test/applications/nn/NNGradientTest.java
+++ b/src/test/java/org/apache/sysds/test/applications/nn/TestFolder.java
@@ -19,13 +19,13 @@
 
 package org.apache.sysds.test.applications.nn;
 
-import org.junit.Test;
+public class TestFolder extends BaseTest {
 
-public class NNGradientTest extends BaseTest {
+    private static final String BASE_FILEPATH = "src/test/scripts/applications/nn/";
 
-	@Test
-	public void testNNLibrary_Gradients() {
-		run("run_tests_gradients.dml");
-	}
+    @Override
+    protected String getBaseFilePath() {
+        return BASE_FILEPATH;
+    }
 
 }