You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by ja...@apache.org on 2018/05/09 07:56:48 UTC

systemml git commit: [SYSTEMML-1437] Factorization Machines regression script

Repository: systemml
Updated Branches:
  refs/heads/master d0b4373fa -> 23157be4d


[SYSTEMML-1437] Factorization Machines regression script

This patchs adds factorization machines regression script with a sample
dummy data script for evaluation.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/23157be4
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/23157be4
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/23157be4

Branch: refs/heads/master
Commit: 23157be4de7dd91e755cbc2a919984f068d6fd61
Parents: d0b4373
Author: Janardhan Pulivarthi <j1...@protonmail.com>
Authored: Wed May 9 13:17:32 2018 +0530
Committer: Janardhan Pulivarthi <j1...@protonmail.com>
Committed: Wed May 9 13:17:32 2018 +0530

----------------------------------------------------------------------
 .../nn/examples/fm-regression-dummy-data.dml    |  48 ++++++
 scripts/staging/fm-regression.dml               | 160 +++++++++++++++++++
 2 files changed, 208 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/23157be4/scripts/nn/examples/fm-regression-dummy-data.dml
----------------------------------------------------------------------
diff --git a/scripts/nn/examples/fm-regression-dummy-data.dml b/scripts/nn/examples/fm-regression-dummy-data.dml
new file mode 100644
index 0000000..7796918
--- /dev/null
+++ b/scripts/nn/examples/fm-regression-dummy-data.dml
@@ -0,0 +1,48 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+# Imports
+source("staging/fm-regression.dml") as fm_regression
+
+# generate dummy data (just a sample!)
+n = 1000; d = 9; k=2;
+X = rand(rows=n, cols=d);
+y = rand(rows=n, cols=1);
+X_val = rand(rows=100, cols=d);
+y_val = rand(rows=100, cols=1);
+
+# Train
+[w0, W, V] = fm_regression::train(X, y, X_val, y_val);
+
+# Write model out
+#write(w0, out_dir+"/w0");
+#write(W, out_dir+"/W");
+#write(V, out_dir+"/V");
+
+# Evaluate
+probs = fm_regression::predict(X, w0, W, V);
+[loss, accuracy] = fm_regression::eval(probs, y);
+
+# Output results
+print("Test Accuracy: " + accuracy)
+#write(accuracy, out_dir+"/accuracy")
+
+print("")

http://git-wip-us.apache.org/repos/asf/systemml/blob/23157be4/scripts/staging/fm-regression.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/fm-regression.dml b/scripts/staging/fm-regression.dml
new file mode 100644
index 0000000..92333a3
--- /dev/null
+++ b/scripts/staging/fm-regression.dml
@@ -0,0 +1,160 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+/**
+ * Factorization Machines for Regression.
+ */
+
+# Imports
+source("nn/optim/adam.dml") as adam
+source("nn/layers/fm.dml") as fm
+source("nn/layers/l2_loss.dml") as l2_loss
+source("nn/layers/l2_reg.dml") as l2_reg
+
+train = function(matrix[double] X, matrix[double] y, matrix[double] X_val, matrix[double] y_val)
+    return (matrix[double] w0, matrix[double] W, matrix[double] V) {
+  /*
+   * Trains the FM model.
+   *
+   * Inputs:
+   *  - X     : n examples with d features, of shape (n, d)
+   *  - y     : Target matrix, of shape (n, 1)
+   *  - X_val : Input validation data matrix, of shape (n, d)
+   *  - y_val : Target validation matrix, of shape (n, 1)
+   *
+   * Outputs:
+   *  - w0, W, V : updated model parameters.
+   *
+   * Network Architecture:
+   *
+   * X --> [model] --> out --> l2_loss::backward(out, y) --> dout
+   *
+   */
+
+    n = nrow(X) # num examples
+    d = ncol(X) # num features
+    k = 2       # factorization dimensionality,
+                #   only (=2) possible
+
+    # 1.initialize fm core
+    [w0, W, V] = fm::init(n, d, k);
+
+    # 2.initialize adam optimizer
+    ## Default values for some parameters
+    lr      = 0.001;
+    beta1   = 0.9;       # [0, 1)
+    beta2   = 0.999;     # [0, 1)
+    epsilon = 0.00000001;
+    t       = 0;
+
+    [mw0, vw0] = adam::init(w0);
+    [mW, vW]   = adam::init(W);
+    [mV, vV]   = adam::init(V);
+
+    # regularization
+    lambda = 5e-04
+
+    # Optimize
+    print("Starting optimization")
+    batch_size = 10
+    epochs = 100; N = n;
+    iters = ceil(N / batch_size)
+
+    for (e in 1:epochs) {
+      for (i in 1:iters) {
+        # Get the next batch
+        beg = ((i-1) * batch_size) %% N + 1
+        end = min(N, beg + batch_size - 1)
+        X_batch = X[beg:end,]
+        y_batch = y[beg:end,]
+
+        # 3.Send inputs through fm::forward
+        out = fm::forward(X_batch, w0, W, V);
+
+        # 4.compute gradients from a loss l2_loss::backward
+        dout = l2_loss::backward(out, y_batch)# (predictions, targets)
+
+        # Compute loss & accuracy for training & validation data every 100 iterations.
+        if (i %% 100 == 0) {
+          # Compute training loss & accuracy
+          [loss_data, accuracy] = eval(out, y_batch);
+          loss_reg_w0 = l2_reg::forward(w0, lambda)
+          loss_reg_W  = l2_reg::forward(W , lambda)
+          loss_reg_V  = l2_reg::forward(V , lambda)
+          loss = loss_data + loss_reg_w0 + loss_reg_W + loss_reg_V
+
+          # Compute validation loss & accuracy
+          probs_val = predict(X_val, w0, W, V)
+          [loss_val, accuracy_val] = eval(probs_val, y_val);
+
+          # Output results
+          print("Epoch: " + e + ", Iter: " + i + ", Train Loss: " + loss + ", Train Accuracy: "
+                 + accuracy + ", Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
+        }
+
+        # 5.Send the above result through fm::backward
+        [dw0, dW, dV] = fm::backward(dout, X_batch, w0, W, V);
+
+        # 6.update timestep
+        t = e * i - 1;
+
+        # 7.Call adam::update for all parameters
+        [w0,mw0,vw0] = adam::update(w0, dw0, lr, beta1, beta2, epsilon, t, mw0, vw0);
+        [W, mW, vW]  = adam::update(W, dW, lr, beta1, beta2, epsilon, t, mW, vW );
+        [V, mV, vV]  = adam::update(V, dV, lr, beta1, beta2, epsilon, t, mV, vV );
+
+      }
+    }
+}
+
+predict = function(matrix[double] X, matrix[double] w0, matrix[double] W, matrix[double] V)
+    return (matrix[double] out) {
+  /*
+   * Computes the predictions for the given inputs.
+   *
+   * Inputs:
+   *  - X : n examples with d features, of shape (n, d).
+   *  - w0, W, V : trained model parameters.
+   *
+   * Outputs:
+   *  - out : target vector, y.
+   */
+
+    # 1.Send inputs through fm::forward
+    out = fm::forward(X, w0, W, V);
+
+}
+
+eval = function(matrix[double] probs, matrix[double] y)
+    return (double loss, double accuracy) {
+   /*
+    * Computes loss and accuracy.
+    */
+
+    # compute the log loss
+    loss = l2_loss::forward(probs, y);
+
+    # compute accuracy
+    sqr_mean = mean( (probs - y)^2 )
+    accuracy = (sqr_mean)^0.5
+
+}
+