You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by du...@apache.org on 2017/07/18 00:20:44 UTC

[1/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

Repository: systemml
Updated Branches:
  refs/heads/master 62b64b32d -> 532da1bc5


http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/convnet.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/convnet.dml b/projects/breast_cancer/convnet.dml
deleted file mode 100644
index 6cbea39..0000000
--- a/projects/breast_cancer/convnet.dml
+++ /dev/null
@@ -1,495 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * Breast Cancer LeNet-like ConvNet Model
- */
-# Imports
-source("nn/layers/affine.dml") as affine
-source("nn/layers/conv2d_builtin.dml") as conv2d
-source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
-source("nn/layers/dropout.dml") as dropout
-source("nn/layers/l2_reg.dml") as l2_reg
-source("nn/layers/max_pool2d_builtin.dml") as max_pool2d
-source("nn/layers/relu.dml") as relu
-source("nn/layers/softmax.dml") as softmax
-#source("nn/optim/adam.dml") as adam
-source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
-
-train = function(matrix[double] X, matrix[double] Y,
-                 matrix[double] X_val, matrix[double] Y_val,
-                 int C, int Hin, int Win,
-                 double lr, double mu, double decay, double lambda,
-                 int batch_size, int epochs, int log_interval,
-                 string checkpoint_dir)
-    return (matrix[double] Wc1, matrix[double] bc1,
-            matrix[double] Wc2, matrix[double] bc2,
-            matrix[double] Wc3, matrix[double] bc3,
-            matrix[double] Wa1, matrix[double] ba1,
-            matrix[double] Wa2, matrix[double] ba2) {
-  /*
-   * Trains a convolutional net using a "LeNet"-like architecture.
-   *
-   * The input matrix, X, has N examples, each represented as a 3D
-   * volume unrolled into a single vector.  The targets, Y, have K
-   * classes, and are one-hot encoded.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, C*Hin*Win).
-   *  - Y: Target matrix, of shape (N, K).
-   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
-   *  - Y_val: Target validation matrix, of shape (N, K).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   *  - lr: Learning rate.
-   *  - mu: Momentum value.
-   *      Typical values are in the range of [0.5, 0.99], usually
-   *      started at the lower end and annealed towards the higher end.
-   *  - decay: Learning rate decay rate.
-   *  - lambda: Regularization strength.
-   *  - batch_size: Size of mini-batches to train on.
-   *  - epochs: Total number of full training loops over the full data set.
-   *  - log_interval: Interval, in iterations, between log outputs.
-   *  - checkpoint_dir: Directory to store model checkpoints.
-   *
-   * Outputs:
-   *  - Wc1: 1st layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
-   *  - bc1: 1st layer biases vector, of shape (F1, 1).
-   *  - Wc2: 2nd layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
-   *  - bc2: 2nd layer biases vector, of shape (F2, 1).
-   *  - Wc3: 3rd layer weights (parameters) matrix, of shape (F2*(Hin/4)*(Win/4), N3).
-   *  - bc3: 3rd layer biases vector, of shape (1, N3).
-   *  - Wa2: 4th layer weights (parameters) matrix, of shape (N3, K).
-   *  - ba2: 4th layer biases vector, of shape (1, K).
-   */
-  N = nrow(X)
-  K = ncol(Y)
-
-  # Create network:
-  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
-  #  -> affine1 -> relu1 -> dropout1 -> affine2 -> softmax
-  Hf = 3  # filter height
-  Wf = 3  # filter width
-  stride = 1
-  pad = 1  # For same dimensions, (Hf - stride) / 2
-  F1 = 32  # num conv filters in conv1
-  F2 = 32  # num conv filters in conv2
-  F3 = 32  # num conv filters in conv3
-  N1 = 512  # num nodes in affine1
-  # Note: affine2 has K nodes, which is equal to the number of target dimensions (num classes)
-  [Wc1, bc1] = conv2d::init(F1, C, Hf, Wf)  # inputs: (N, C*Hin*Win)
-  [Wc2, bc2] = conv2d::init(F2, F1, Hf, Wf)  # inputs: (N, F1*(Hin/2)*(Win/2))
-  [Wc3, bc3] = conv2d::init(F3, F2, Hf, Wf)  # inputs: (N, F2*(Hin/2^2)*(Win/2^2))
-  [Wa1, ba1] = affine::init(F3*(Hin/2^3)*(Win/2^3), N1)  # inputs: (N, F3*(Hin/2^3)*(Win/2^3))
-  [Wa2, ba2] = affine::init(N1, K)  # inputs: (N, N1)
-  Wa2 = Wa2 / sqrt(2)  # different initialization, since being fed into softmax, instead of relu
-
-  # TODO: Compare optimizers once training is faster.
-  # Initialize SGD w/ Nesterov momentum optimizer
-  vWc1 = sgd_nesterov::init(Wc1); vbc1 = sgd_nesterov::init(bc1)
-  vWc2 = sgd_nesterov::init(Wc2); vbc2 = sgd_nesterov::init(bc2)
-  vWc3 = sgd_nesterov::init(Wc3); vbc3 = sgd_nesterov::init(bc3)
-  vWa1 = sgd_nesterov::init(Wa1); vba1 = sgd_nesterov::init(ba1)
-  vWa2 = sgd_nesterov::init(Wa2); vba2 = sgd_nesterov::init(ba2)
-  #[mWc1, vWc1] = adam::init(Wc1)  # optimizer 1st & 2nd moment state for Wc1
-  #[mbc1, vbc1] = adam::init(bc1)  # optimizer 1st & 2nd moment state for bc1
-  #[mWc2, vWc2] = adam::init(Wc2)  # optimizer 1st & 2nd moment state for Wc2
-  #[mbc2, vbc2] = adam::init(bc2)  # optimizer 1st & 2nd moment state for bc2
-  #[mWc3, vWc3] = adam::init(Wc3)  # optimizer 1st & 2nd moment state for Wc3
-  #[mbc3, vbc3] = adam::init(bc3)  # optimizer 1st & 2nd moment state for bc3
-  #[mWa1, vWa1] = adam::init(Wa1)  # optimizer 1st & 2nd moment state for Wa1
-  #[mba1, vba1] = adam::init(ba1)  # optimizer 1st & 2nd moment state for ba1
-  #[mWa2, vWa2] = adam::init(Wa2)  # optimizer 1st & 2nd moment state for Wa2
-  #[mba2, vba2] = adam::init(ba2)  # optimizer 1st & 2nd moment state for ba2
-  #beta1 = 0.9
-  #beta2 = 0.999
-  #eps = 1e-8
-
-  # TODO: Enable starting val metrics once fast, distributed predictions are available.
-  # Starting validation loss & accuracy
-  #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
-  #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-  #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-  ## Output results
-  #print("Start: Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
-
-  # Optimize
-  print("Starting optimization")
-  iters = ceil(N / batch_size)
-  for (e in 1:epochs) {
-    for(i in 1:iters) {
-      # Get next batch
-      beg = ((i-1) * batch_size) %% N + 1
-      end = min(N, beg + batch_size - 1)
-      X_batch = X[beg:end,]
-      y_batch = Y[beg:end,]
-
-      # Compute forward pass
-      ## conv layer 1: conv1 -> relu1 -> pool1
-      [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, Wc1, bc1, C, Hin, Win, Hf, Wf,
-                                                stride, stride, pad, pad)
-      outc1r = relu::forward(outc1)
-      [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2, 0, 0)
-      ## conv layer 2: conv2 -> relu2 -> pool2
-      [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
-                                                stride, stride, pad, pad)
-      outc2r = relu::forward(outc2)
-      [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2, 0, 0)
-      ## conv layer 3: conv3 -> relu3 -> pool3
-      [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
-                                                stride, stride, pad, pad)
-      outc3r = relu::forward(outc3)
-      [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
-                                                       strideh=2, stridew=2, 0, 0)
-      ## affine layer 1:  affine1 -> relu1 -> dropout1
-      outa1 = affine::forward(outc3p, Wa1, ba1)
-      outa1r = relu::forward(outa1)
-      [outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
-      ## affine layer 2:  affine2 -> softmax
-      outa2 = affine::forward(outa1d, Wa2, ba2)
-      probs = softmax::forward(outa2)
-
-      # Compute data backward pass
-      ## loss:
-      dprobs = cross_entropy_loss::backward(probs, y_batch)
-      ## affine layer 2:  affine2 -> softmax
-      douta2 = softmax::backward(dprobs, outa2)
-      [douta1d, dWa2, dba2] = affine::backward(douta2, outa1d, Wa2, ba2)
-      ## layer 3:  affine3 -> relu3 -> dropout
-      ## affine layer 1:  affine1 -> relu1 -> dropout
-      douta1r = dropout::backward(douta1d, outa1r, 0.5, maskad1)
-      douta1 = relu::backward(douta1r, outa1)
-      [doutc3p, dWa1, dba1] = affine::backward(douta1, outc3p, Wa1, ba1)
-      ## conv layer 3: conv3 -> relu3 -> pool3
-      doutc3r = max_pool2d::backward(doutc3p, Houtc3p, Woutc3p, outc3r, F3, Houtc3, Woutc3,
-                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
-      doutc3 = relu::backward(doutc3r, outc3)
-      [doutc2p, dWc3, dbc3] = conv2d::backward(doutc3, Houtc3, Woutc3, outc2p, Wc3, bc2, F2,
-                                               Houtc2p, Woutc2p, Hf, Wf, stride, stride, pad, pad)
-      ## conv layer 2: conv2 -> relu2 -> pool2
-      doutc2r = max_pool2d::backward(doutc2p, Houtc2p, Woutc2p, outc2r, F2, Houtc2, Woutc2,
-                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
-      doutc2 = relu::backward(doutc2r, outc2)
-      [doutc1p, dWc2, dbc2] = conv2d::backward(doutc2, Houtc2, Woutc2, outc1p, Wc2, bc2, F1,
-                                               Houtc1p, Woutc1p, Hf, Wf, stride, stride, pad, pad)
-      ## conv layer 1: conv1 -> relu1 -> pool1
-      doutc1r = max_pool2d::backward(doutc1p, Houtc1p, Woutc1p, outc1r, F1, Houtc1, Woutc1,
-                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
-      doutc1 = relu::backward(doutc1r, outc1)
-      [dX_batch, dWc1, dbc1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, Wc1, bc1, C,
-                                                Hin, Win, Hf, Wf, stride, stride, pad, pad)
-
-      # Compute regularization backward pass
-      dWc1_reg = l2_reg::backward(Wc1, lambda)
-      dWc2_reg = l2_reg::backward(Wc2, lambda)
-      dWc3_reg = l2_reg::backward(Wc3, lambda)
-      dWa1_reg = l2_reg::backward(Wa1, lambda)
-      dWa2_reg = l2_reg::backward(Wa2, lambda)
-      dWc1 = dWc1 + dWc1_reg
-      dWc2 = dWc2 + dWc2_reg
-      dWc3 = dWc3 + dWc3_reg
-      dWa1 = dWa1 + dWa1_reg
-      dWa2 = dWa2 + dWa2_reg
-
-      # Optimize with SGD w/ Nesterov momentum
-      [Wc1, vWc1] = sgd_nesterov::update(Wc1, dWc1, lr, mu, vWc1)
-      [bc1, vbc1] = sgd_nesterov::update(bc1, dbc1, lr, mu, vbc1)
-      [Wc2, vWc2] = sgd_nesterov::update(Wc2, dWc2, lr, mu, vWc2)
-      [bc2, vbc2] = sgd_nesterov::update(bc2, dbc2, lr, mu, vbc2)
-      [Wc3, vWc3] = sgd_nesterov::update(Wc3, dWc3, lr, mu, vWc3)
-      [bc3, vbc3] = sgd_nesterov::update(bc3, dbc3, lr, mu, vbc3)
-      [Wa1, vWa1] = sgd_nesterov::update(Wa1, dWa1, lr, mu, vWa1)
-      [ba1, vba1] = sgd_nesterov::update(ba1, dba1, lr, mu, vba1)
-      [Wa2, vWa2] = sgd_nesterov::update(Wa2, dWa2, lr, mu, vWa2)
-      [ba2, vba2] = sgd_nesterov::update(ba2, dba2, lr, mu, vba2)
-      #t = e*i - 1
-      #[Wc1, mWc1, vWc1] = adam::update(Wc1, dWc1, lr, beta1, beta2, eps, t, mWc1, vWc1)
-      #[bc1, mbc1, vbc1] = adam::update(bc1, dbc1, lr, beta1, beta2, eps, t, mbc1, vbc1)
-      #[Wc2, mWc2, vWc2] = adam::update(Wc2, dWc2, lr, beta1, beta2, eps, t, mWc2, vWc2)
-      #[bc2, mbc2, vbc2] = adam::update(bc2, dbc2, lr, beta1, beta2, eps, t, mbc2, vbc2)
-      #[Wc3, mWc3, vWc3] = adam::update(Wc3, dWc3, lr, beta1, beta2, eps, t, mWc3, vWc3)
-      #[bc3, mbc3, vbc3] = adam::update(bc3, dbc3, lr, beta1, beta2, eps, t, mbc3, vbc3)
-      #[Wa1, mWa1, vWa1] = adam::update(Wa1, dWa1, lr, beta1, beta2, eps, t, mWa1, vWa1)
-      #[ba1, mba1, vba1] = adam::update(ba1, dba1, lr, beta1, beta2, eps, t, mba1, vba1)
-      #[Wa2, mWa2, vWa2] = adam::update(Wa2, dWa2, lr, beta1, beta2, eps, t, mWa2, vWa2)
-      #[ba2, mba2, vba2] = adam::update(ba2, dba2, lr, beta1, beta2, eps, t, mba2, vba2)
-
-      # Compute loss & accuracy for training & validation data every `log_interval` iterations.
-      if (i %% log_interval == 0) {
-        # Compute training loss & accuracy
-        loss_data = cross_entropy_loss::forward(probs, y_batch)
-        loss_reg_Wc1 = l2_reg::forward(Wc1, lambda)
-        loss_reg_Wc2 = l2_reg::forward(Wc2, lambda)
-        loss_reg_Wc3 = l2_reg::forward(Wc3, lambda)
-        loss_reg_Wa1 = l2_reg::forward(Wa1, lambda)
-        loss_reg_Wa2 = l2_reg::forward(Wa2, lambda)
-        loss = loss_data + loss_reg_Wc1 + loss_reg_Wc2 + loss_reg_Wc3 + loss_reg_Wa1 + loss_reg_Wa2
-        accuracy = mean(rowIndexMax(probs) == rowIndexMax(y_batch))
-
-        # TODO: Consider enabling val metrics here once fast, distributed predictions are available.
-        ## Compute validation loss & accuracy
-        #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
-        #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-        #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-
-        ## Output results
-        #print("Epoch: " + e + ", Iter: " + i + ", Train Loss: " + loss + ", Train Accuracy: "
-        #      + accuracy + ", Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val
-        #      + ", lr: " + lr + ", mu " + mu)
-        # Output results
-        print("Epoch: " + e + "/" + epochs + ", Iter: " + i + "/" + iters
-              + ", Train Loss: " + loss + ", Train Accuracy: " + accuracy)
-      }
-    }
-
-    # Compute validation loss & accuracy for validation data every epoch
-    probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
-    loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-    accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-
-    # Output results
-    print("Epoch: " + e + "/" + epochs + ", Val Loss: " + loss_val
-          + ", Val Accuracy: " + accuracy_val + ", lr: " + lr + ", mu " + mu)
-
-    # Checkpoint model
-    dir = checkpoint_dir + e + "/"
-    dummy = checkpoint(dir, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
-    str = "lr: " + lr + ", mu: " + mu + ", decay: " + decay + ", lambda: " + lambda
-          + ", batch_size: " + batch_size
-    name = dir + accuracy_val
-    write(str, name)
-
-    # Anneal momentum towards 0.999
-    mu = mu + (0.999 - mu)/(1+epochs-e)
-    # Decay learning rate
-    lr = lr * decay
-  }
-}
-
-checkpoint = function(string dir,
-                      matrix[double] Wc1, matrix[double] bc1,
-                      matrix[double] Wc2, matrix[double] bc2,
-                      matrix[double] Wc3, matrix[double] bc3,
-                      matrix[double] Wa1, matrix[double] ba1,
-                      matrix[double] Wa2, matrix[double] ba2) {
-  /*
-   * Save the model parameters.
-   *
-   * Inputs:
-   *  - dir: Directory in which to save model parameters.
-   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
-   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
-   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
-   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
-   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
-   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
-   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
-   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
-   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
-   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
-   *
-   * Outputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   */
-  write(Wc1, dir + "Wc1", format="binary")
-  write(bc1, dir + "bc1", format="binary")
-  write(Wc2, dir + "Wc2", format="binary")
-  write(bc2, dir + "bc2", format="binary")
-  write(Wc3, dir + "Wc3", format="binary")
-  write(bc3, dir + "bc3", format="binary")
-  write(Wa1, dir + "Wa1", format="binary")
-  write(ba1, dir + "ba1", format="binary")
-  write(Wa2, dir + "Wa2", format="binary")
-  write(ba2, dir + "ba2", format="binary")
-}
-
-predict = function(matrix[double] X, int C, int Hin, int Win,
-                   matrix[double] Wc1, matrix[double] bc1,
-                   matrix[double] Wc2, matrix[double] bc2,
-                   matrix[double] Wc3, matrix[double] bc3,
-                   matrix[double] Wa1, matrix[double] ba1,
-                   matrix[double] Wa2, matrix[double] ba2)
-    return (matrix[double] probs) {
-  /*
-   * Computes the class probability predictions of a convolutional
-   * net using the "LeNet" architecture.
-   *
-   * The input matrix, X, has N examples, each represented as a 3D
-   * volume unrolled into a single vector.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, C*Hin*Win).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
-   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
-   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
-   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
-   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
-   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
-   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
-   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
-   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
-   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
-   *
-   * Outputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   */
-  N = nrow(X)
-
-  # Network:
-  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
-  #  -> affine1 -> relu1 -> affine2 -> softmax
-  Hf = 3  # filter height
-  Wf = 3  # filter width
-  stride = 1
-  pad = 1  # For same dimensions, (Hf - stride) / 2
-
-  F1 = nrow(Wc1)  # num conv filters in conv1
-  F2 = nrow(Wc2)  # num conv filters in conv2
-  F3 = nrow(Wc3)  # num conv filters in conv3
-  N1 = ncol(Wa1)  # num nodes in affine1
-  K = ncol(Wa2)  # num nodes in affine2, equal to number of target dimensions (num classes)
-
-  # TODO: Implement fast, distributed conv & max pooling operators so that predictions
-  # can be computed in a full-batch, distributed manner.  Alternatively, improve `parfor`
-  # so that it can be efficiently used for parallel predictions.
-  ## Compute forward pass
-  ### conv layer 1: conv1 -> relu1 -> pool1
-  #[outc1, Houtc1, Woutc1] = conv2d::forward(X, Wc1, bc1, C, Hin, Win, Hf, Wf, stride, stride,
-  #                                          pad, pad)
-  #outc1r = relu::forward(outc1)
-  #[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2, 0, 0)
-  ### conv layer 2: conv2 -> relu2 -> pool2
-  #[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
-  #                                          stride, stride, pad, pad)
-  #outc2r = relu::forward(outc2)
-  #[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2, 0, 0)
-  ### conv layer 3: conv3 -> relu3 -> pool3
-  #[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
-  #                                          stride, stride, pad, pad)
-  #outc3r = relu::forward(outc3)
-  #[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
-  #                                                 strideh=2, stridew=2, 0, 0)
-  ### affine layer 1:  affine1 -> relu1 -> dropout
-  #outa1 = affine::forward(outc3p, Wa1, ba1)
-  #outa1r = relu::forward(outa1)
-  ##[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
-  ### affine layer 2:  affine2 -> softmax
-  #outa2 = affine::forward(outa1r, Wa2, ba2)
-  #probs = softmax::forward(outa2)
-
-  # Compute predictions over mini-batches
-  probs = matrix(0, rows=N, cols=K)
-  batch_size = 50
-  iters = ceil(N / batch_size)
-  for(i in 1:iters) {
-  # TODO: `parfor` should work here, possibly as an alternative to distributed predictions.
-  #parfor(i in 1:iters, check=0, mode=REMOTE_SPARK, resultmerge=REMOTE_SPARK) {
-    # Get next batch
-    beg = ((i-1) * batch_size) %% N + 1
-    end = min(N, beg + batch_size - 1)
-    X_batch = X[beg:end,]
-
-    # Compute forward pass
-    ## conv layer 1: conv1 -> relu1 -> pool1
-    [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, Wc1, bc1, C, Hin, Win, Hf, Wf,
-                                              stride, stride, pad, pad)
-    outc1r = relu::forward(outc1)
-    [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2, 0, 0)
-    ## conv layer 2: conv2 -> relu2 -> pool2
-    [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
-                                              stride, stride, pad, pad)
-    outc2r = relu::forward(outc2)
-    [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2, 0, 0)
-    ## conv layer 3: conv3 -> relu3 -> pool3
-    [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
-                                              stride, stride, pad, pad)
-    outc3r = relu::forward(outc3)
-    [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
-                                                     strideh=2, stridew=2, 0, 0)
-    ## affine layer 1:  affine1 -> relu1 -> dropout
-    outa1 = affine::forward(outc3p, Wa1, ba1)
-    outa1r = relu::forward(outa1)
-    #[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
-    ## affine layer 2:  affine2 -> softmax
-    outa2 = affine::forward(outa1r, Wa2, ba2)
-    probs_batch = softmax::forward(outa2)
-
-    # Store predictions
-    probs[beg:end,] = probs_batch
-  }
-}
-
-eval = function(matrix[double] probs, matrix[double] Y)
-    return (double loss, double accuracy) {
-  /*
-   * Evaluates a convolutional net using the "LeNet" architecture.
-   *
-   * The probs matrix contains the class probability predictions
-   * of K classes over N examples.  The targets, Y, have K classes,
-   * and are one-hot encoded.
-   *
-   * Inputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   *  - Y: Target matrix, of shape (N,
-   *
-   * Outputs:
-   *  - loss: Scalar loss, of shape (1).
-   *  - accuracy: Scalar accuracy, of shape (1).
-   */
-  # Compute loss & accuracy
-  loss = cross_entropy_loss::forward(probs, Y)
-  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
-  accuracy = mean(correct_pred)
-}
-
-generate_dummy_data = function()
-    return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {
-  /*
-   * Generate a dummy dataset similar to the breast cancer dataset.
-   *
-   * Outputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - Y: Target matrix, of shape (N, K).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   */
-  # Generate dummy input data
-  N = 1024  # num examples
-  C = 3  # num input channels
-  Hin = 256  # input height
-  Win = 256  # input width
-  K = 3  # num target classes
-  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
-  classes = round(rand(rows=N, cols=1, min=1, max=K, pdf="uniform"))
-  Y = table(seq(1, N), classes)  # one-hot encoding
-}
-

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/hyperparam_tuning.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/hyperparam_tuning.dml b/projects/breast_cancer/hyperparam_tuning.dml
index 4f054c3..c5e0382 100644
--- a/projects/breast_cancer/hyperparam_tuning.dml
+++ b/projects/breast_cancer/hyperparam_tuning.dml
@@ -23,7 +23,7 @@
  * Hyperparameter Tuning Script For LeNet-like CNN Model
  */
 # Imports
-source("cnn.dml") as clf
+source("breastcancer/convnet.dml") as clf
 
 # Read data
 # X = read("data/X_0.01_sample_binary")

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/nn
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/nn b/projects/breast_cancer/nn
deleted file mode 120000
index 9c0c967..0000000
--- a/projects/breast_cancer/nn
+++ /dev/null
@@ -1 +0,0 @@
-../../scripts/staging/SystemML-NN/nn
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/preprocess.py
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/preprocess.py b/projects/breast_cancer/preprocess.py
index 95b9f36..167fa61 100644
--- a/projects/breast_cancer/preprocess.py
+++ b/projects/breast_cancer/preprocess.py
@@ -69,7 +69,7 @@ num_partitions = 20000
 add_row_indices = True
 train_frac = 0.8
 split_seed = 24
-folder = "/home/MDM/breast_cancer/data"
+folder = "data"  # Linux-filesystem directory to read raw data
 save_folder = "data"  # Hadoop-supported directory in which to save DataFrames
 df_path = os.path.join(save_folder, "samples_{}_{}{}.parquet".format(
     "labels" if training else "testing", sample_size, "_grayscale" if grayscale else ""))

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/softmax_clf.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/softmax_clf.dml b/projects/breast_cancer/softmax_clf.dml
deleted file mode 100644
index 35fd545..0000000
--- a/projects/breast_cancer/softmax_clf.dml
+++ /dev/null
@@ -1,207 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-/*
- * Breast Cancer Softmax Model
- */
-# Imports
-source("nn/layers/affine.dml") as affine
-source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
-source("nn/layers/softmax.dml") as softmax
-#source("nn/optim/adam.dml") as adam
-source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
-
-train = function(matrix[double] X, matrix[double] Y,
-                 matrix[double] X_val, matrix[double] Y_val,
-                 double lr, double mu, double decay,
-                 int batch_size, int epochs, int log_interval)
-    return (matrix[double] W, matrix[double] b) {
-  /*
-   * Trains a softmax classifier.
-   *
-   * The input matrix, X, has N examples, each with D features.
-   * The targets, Y, have K classes, and are one-hot encoded.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - Y: Target matrix, of shape (N, K).
-   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
-   *  - Y_val: Target validation matrix, of shape (N, K).
-   *  - lr: Learning rate.
-   *  - mu: Momentum value.
-   *      Typical values are in the range of [0.5, 0.99], usually
-   *      started at the lower end and annealed towards the higher end.
-   *  - decay: Learning rate decay rate.
-   *  - batch_size: Size of mini-batches to train on.
-   *  - epochs: Total number of full training loops over the full data set.
-   *  - log_interval: Interval, in iterations, between log outputs.
-   *
-   * Outputs:
-   *  - W: Weights (parameters) matrix, of shape (D, K).
-   *  - b: Biases vector, of shape (1, K).
-   */
-  N = nrow(Y)  # num examples
-  D = ncol(X)  # num features
-  K = ncol(Y)  # num classes
-
-  # Create softmax classifier:
-  # affine -> softmax
-  [W, b] = affine::init(D, K)
-  W = W / sqrt(2.0/(D)) * sqrt(1/(D))
-
-  # Initialize SGD w/ Nesterov momentum optimizer
-  vW = sgd_nesterov::init(W)  # optimizer momentum state for W
-  vb = sgd_nesterov::init(b)  # optimizer momentum state for b
-  #[mW, vW] = adam::init(W)  # optimizer 1st & 2nd moment state for W
-  #[mb, vb] = adam::init(b)  # optimizer 1st & 2nd moment state for b
-
-  # Starting validation loss & accuracy
-  probs_val = predict(X_val, W, b)
-  loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-  accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-  # Output results
-  print("Start: Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
-
-  # Optimize
-  print("Starting optimization")
-  iters = ceil(N / batch_size)
-  for (e in 1:epochs) {
-    for(i in 1:iters) {
-      # Get next batch
-      beg = ((i-1) * batch_size) %% N + 1
-      end = min(N, beg + batch_size - 1)
-      #print("Epoch: " + e + ", Iter: " + i + ", X[" + beg + ":" + end + ",]")
-      X_batch = X[beg:end,]
-      Y_batch = Y[beg:end,]
-
-      # Compute forward pass
-      ## affine & softmax:
-      out = affine::forward(X_batch, W, b)
-      probs = softmax::forward(out)
-
-      # Compute backward pass
-      ## loss:
-      dprobs = cross_entropy_loss::backward(probs, Y_batch)
-      ## affine & softmax:
-      dout = softmax::backward(dprobs, out)
-      [dX_batch, dW, db] = affine::backward(dout, X_batch, W, b)
-
-      # Optimize with SGD w/ Nesterov momentum
-      [W, vW] = sgd_nesterov::update(W, dW, lr, mu, vW)
-      [b, vb] = sgd_nesterov::update(b, db, lr, mu, vb)
-      #[W, mW, vW] = adam::update(W, dW, lr, 0.9, 0.999, 1e-8, e*i-1, mW, vW)
-      #[b, mb, vb] = adam::update(b, db, lr, 0.9, 0.999, 1e-8, e*i-1, mb, vb)
-
-      # Compute loss & accuracy for training & validation data every `log_interval` iterations.
-      if (i %% log_interval == 0) {
-        #print("Eval time! - i: " + i)
-        # Compute training loss & accuracy
-        loss = cross_entropy_loss::forward(probs, Y_batch)
-        accuracy = mean(rowIndexMax(probs) == rowIndexMax(Y_batch))
-
-        # Compute validation loss & accuracy
-        probs_val = predict(X_val, W, b)
-        loss_val = cross_entropy_loss::forward(probs_val, Y_val)
-        accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
-
-        # Output results
-        print("Epoch: " + e + "/" + epochs + ", Iter: " + i + "/" + iters
-              + ", Train Loss: " + loss + ", Train Accuracy: " + accuracy + ", Val Loss: "
-              + loss_val + ", Val Accuracy: " + accuracy_val + ", lr: " + lr + ", mu " + mu)
-      }
-    }
-    # Anneal momentum towards 0.999
-    mu = mu + (0.999 - mu)/(1+epochs-e)
-    # Decay learning rate
-    lr = lr * decay
-  }
-}
-
-predict = function(matrix[double] X, matrix[double] W, matrix[double] b)
-    return (matrix[double] probs) {
-  /*
-   * Computes the class probability predictions of a softmax classifier.
-   *
-   * The input matrix, X, has N examples, each with D features.
-   *
-   * Inputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - W: Weights (parameters) matrix, of shape (D, K).
-   *  - b: Biases vector, of shape (1, K).
-   *
-   * Outputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   */
-  N = nrow(X)  # num examples
-  K = ncol(W)  # num classes
-
-  # Compute forward pass
-  ## affine & softmax:
-  out = affine::forward(X, W, b)
-  probs = softmax::forward(out)
-}
-
-eval = function(matrix[double] probs, matrix[double] Y)
-    return (double loss, double accuracy) {
-  /*
-   * Evaluates a softmax classifier.
-   *
-   * The probs matrix contains the class probability predictions
-   * of K classes over N examples.  The targets, Y, have K classes,
-   * and are one-hot encoded.
-   *
-   * Inputs:
-   *  - probs: Class probabilities, of shape (N, K).
-   *  - Y: Target matrix, of shape (N, K).
-   *
-   * Outputs:
-   *  - loss: Scalar loss, of shape (1).
-   *  - accuracy: Scalar accuracy, of shape (1).
-   */
-  # Compute loss & accuracy
-  loss = cross_entropy_loss::forward(probs, Y)
-  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
-  accuracy = mean(correct_pred)
-}
-
-generate_dummy_data = function()
-    return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {
-  /*
-   * Generate a dummy dataset similar to the breast cancer dataset.
-   *
-   * Outputs:
-   *  - X: Input data matrix, of shape (N, D).
-   *  - Y: Target matrix, of shape (N, K).
-   *  - C: Number of input channels (dimensionality of input depth).
-   *  - Hin: Input height.
-   *  - Win: Input width.
-   */
-  # Generate dummy input data
-  N = 1024  # num examples
-  C = 3  # num input channels
-  Hin = 256  # input height
-  Win = 256  # input width
-  T = 10  # num targets
-  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
-  classes = round(rand(rows=N, cols=1, min=1, max=T, pdf="uniform"))
-  Y = table(seq(1, N), classes)  # one-hot encoding
-}
-


[4/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

Posted by du...@apache.org.
http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb b/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
new file mode 100644
index 0000000..7e893f7
--- /dev/null
+++ b/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
@@ -0,0 +1,610 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Imports"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "%matplotlib inline\n",
+    "\n",
+    "import math\n",
+    "import os\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "from PIL import Image\n",
+    "import tensorflow as tf\n",
+    "import pyspark.sql.functions as F\n",
+    "\n",
+    "from breastcancer import input_data\n",
+    "\n",
+    "plt.rcParams['figure.figsize'] = (10, 6)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# from pyspark.sql import SparkSession\n",
+    "# spark = (SparkSession.builder.appName(\"KerasResNet50\").getOrCreate())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Settings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "size = 256\n",
+    "channels = 3\n",
+    "features = size * size * channels\n",
+    "classes = 3\n",
+    "p = 1\n",
+    "val_p = 1\n",
+    "use_caching = False\n",
+    "normalize_class_distribution = False\n",
+    "seed = 123"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Read in train & val data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Read and sample from full DataFrames\n",
+    "# TODO: Pull filenames out and simply pass them in as arguments.\n",
+    "# NOTE: ***Currently hacked read_* with updated data filenames.***\n",
+    "train_df = input_data.read_train_data(spark, size, channels, p, normalize_class_distribution, seed)\n",
+    "val_df = input_data.read_val_data(spark, size, channels, val_p, normalize_class_distribution, seed)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# # Save DataFrames (Optional)\n",
+    "# mode = \"error\"\n",
+    "# tr_sample_filename = os.path.join(\"data\", \"train_{}_sample_{}.parquet\".format(p, size))\n",
+    "# val_sample_filename = os.path.join(\"data\", \"val_{}_sample_{}.parquet\".format(val_p, size))\n",
+    "# train_df.write.mode(mode).save(tr_sample_filename, format=\"parquet\")\n",
+    "# val_df.write.mode(mode).save(val_sample_filename, format=\"parquet\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "if use_caching:\n",
+    "  train_df.cache()\n",
+    "  val_df.cache()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Explore class distributions.\n",
+    "for df in [train_df, val_df]:\n",
+    "  df.select(\"tumor_score\").groupBy(\"tumor_score\").count().show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "tc = train_df.count()\n",
+    "vc = val_df.count()\n",
+    "print(tc, vc)  # updated norm vs: 1801835 498183; original: 3560187 910918"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Sanity check that there are no duplicates.\n",
+    "if p < 1:\n",
+    "  assert train_df.dropDuplicates().count() == tc\n",
+    "if val_p < 1:\n",
+    "  assert val_df.dropDuplicates().count() == vc"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Normalize Staining"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def normalize_staining(x, beta=0.15, alpha=1, light_intensity=240):\n",
+    "  \"\"\"\n",
+    "  Normalize the staining of H&E histology slides.\n",
+    "  \n",
+    "  This function normalizes the staining of H&E histoloy slides.\n",
+    "  \n",
+    "  References:\n",
+    "    - Macenko, Marc, et al. \"A method for normalizing histology slides for\n",
+    "    quantitative analysis.\" Biomedical Imaging: From Nano to Macro, 2009.\n",
+    "    ISBI'09. IEEE International Symposium on. IEEE, 2009.\n",
+    "      - http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf\n",
+    "    - https://github.com/mitkovetta/staining-normalization/blob/master/normalizeStaining.m\n",
+    "  \"\"\"\n",
+    "  # Setup.\n",
+    "  x = np.asarray(x)\n",
+    "  h, w, c = x.shape\n",
+    "  x = x.reshape(-1, c).astype(np.float64)  # shape (H*W, C)\n",
+    "  \n",
+    "  # Reference stain vectors and stain saturations.  We will normalize all slides\n",
+    "  # to these references.  To create these, grab the stain vectors and stain\n",
+    "  # saturations from a desirable slide.\n",
+    "  ## Values in reference implementation for use with eigendecomposition approach.\n",
+    "  stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)\n",
+    "  max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)\n",
+    "  ## Values for use with SVD approach.  These were computed by (1) running the\n",
+    "  ## the eigendecomposition approach to normalize an image, (2) running the\n",
+    "  ## SVD approach on the normalized image, and (3) recording the stain vectors\n",
+    "  ## and max saturations for this (ideal) normalized image.\n",
+    "#   stain_ref = np.array([0.20730702, 0.56170196, 0.80308092, 0.72012455, 0.55864554, 0.4073224]).reshape(3,2)\n",
+    "#   max_sat_ref = np.array([0.99818645, 1.96029115]).reshape(2,1)\n",
+    "  \n",
+    "  # Convert RGB to OD.\n",
+    "  OD = -np.log((x+1)/light_intensity)  # shape (H*W, C)\n",
+    "#   OD = -np.log(x/255 + 1e-8)\n",
+    "  \n",
+    "  # Remove data with OD intensity less than beta.\n",
+    "  # I.e. remove transparent pixels.\n",
+    "  # Note: This needs to be checked per channel, rather than\n",
+    "  # taking an average over all channels for a given pixel.\n",
+    "  #OD_thresh = OD[np.logical_not(np.any(OD < beta, 1)), :]\n",
+    "  OD_thresh = OD[np.all(OD >= beta, 1), :]  # shape (K, C)\n",
+    "  \n",
+    "  # Calculate eigenvectors.\n",
+    "  eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T))  # np.cov results in inf/nans\n",
+    "#   U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)\n",
+    "  \n",
+    "  # Extract two largest eigenvectors.\n",
+    "  # Note: We swap the sign of the eigvecs here to be consistent\n",
+    "  # with other implementations.  Both +/- eigvecs are valid, with\n",
+    "  # the same eigenvalue, so this is okay.\n",
+    "  top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1\n",
+    "#   top_eigvecs = V[0:2, :].T * -1  # shape (C, 2)\n",
+    "  \n",
+    "  # Project thresholded optical density values onto plane spanned by\n",
+    "  # 2 largest eigenvectors.\n",
+    "  proj = np.dot(OD_thresh, top_eigvecs)  # shape (K, 2)\n",
+    "  \n",
+    "  # Calculate angle of each point wrt the first plane direction.\n",
+    "  # Note: the parameters are `np.arctan2(y, x)`\n",
+    "  angles = np.arctan2(proj[:, 1], proj[:, 0])  # shape (K,)\n",
+    "  \n",
+    "  # Find robust extremes (a and 100-a percentiles) of the angle.\n",
+    "  min_angle = np.percentile(angles, alpha)\n",
+    "  max_angle = np.percentile(angles, 100-alpha)\n",
+    "  \n",
+    "  # Convert min/max vectors (extremes) back to OD space.\n",
+    "#   extreme_angles = np.array(\n",
+    "#     [np.cos(min_angle), np.cos(max_angle), np.sin(min_angle), np.sin(max_angle)]\n",
+    "#   ).reshape(2,2)\n",
+    "#   stains = np.dot(top_eigvecs, extreme_angles)  # shape (C, 2)\n",
+    "  min_vec = np.dot(top_eigvecs, np.array([np.cos(min_angle), np.sin(min_angle)]).reshape(2,1))\n",
+    "  max_vec = np.dot(top_eigvecs, np.array([np.cos(max_angle), np.sin(max_angle)]).reshape(2,1))\n",
+    "  \n",
+    "  # Merge vectors with hematoxylin first, and eosin second, as a heuristic.\n",
+    "  if min_vec[0] > max_vec[0]:\n",
+    "    stains = np.hstack((min_vec, max_vec))\n",
+    "  else:\n",
+    "    stains = np.hstack((max_vec, min_vec))\n",
+    "\n",
+    "  # Calculate saturations of each stain.\n",
+    "  # Note: Here, we solve\n",
+    "  #    OD = VS\n",
+    "  #     S = V^{-1}OD\n",
+    "  # where `OD` is the matrix of optical density values of our image,\n",
+    "  # `V` is the matrix of stain vectors, and `S` is the matrix of stain\n",
+    "  # saturations.  Since this is an overdetermined system, we use the\n",
+    "  # least squares solver, rather than a direct solve.\n",
+    "  sats, _, _, _ = np.linalg.lstsq(stains, OD.T)\n",
+    "  \n",
+    "  # Normalize stain saturations.\n",
+    "  max_sat = np.percentile(sats, 99, axis=1, keepdims=True)\n",
+    "  sats = sats / max_sat * max_sat_ref\n",
+    "  \n",
+    "  # Recreate image.\n",
+    "  # Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will\n",
+    "  # not return the correct values due to the initital values being outside of [0,255].\n",
+    "  # To fix this, we round to the nearest integer, and then clip to [0,255], which is the\n",
+    "  # same behavior as Matlab.\n",
+    "  x_norm = np.exp(np.dot(-stain_ref, sats)) * light_intensity #- 1\n",
+    "#   x_norm = np.exp(np.dot(-stain_ref, sats)) * 255 - 1e-8\n",
+    "  x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)\n",
+    "  x_norm = x_norm.T.reshape(h,w,c)\n",
+    "  \n",
+    "  # Debug.\n",
+    "#   print(\"OD shape: \", OD.shape)\n",
+    "#   print(\"OD_thresh shape: \", OD_thresh.shape)\n",
+    "#   print(\"eigvals: \", eigvals)\n",
+    "#   print(\"sorted eigvals: \", np.argsort(eigvals))\n",
+    "#   print(\"top_eigvecs shape: \", top_eigvecs.shape)\n",
+    "#   print(\"top_eigvecs: \", top_eigvecs)\n",
+    "#   print(\"top 2 eigval indices: \", np.argsort(eigvals)[-2:])\n",
+    "#   print(\"proj shape: \", proj.shape)\n",
+    "#   print(\"proj mean: \", np.mean(proj, axis=0))\n",
+    "#   print(\"angles shape: \", angles.shape)\n",
+    "#   print(\"angles mean: \", np.mean(angles))\n",
+    "#   print(\"min/max angles: \", min_angle, max_angle)\n",
+    "#   print(\"min_vec shape: \", min_vec.shape)\n",
+    "#   print(\"min_vec mean: \", np.mean(min_vec))\n",
+    "#   print(\"max_vec mean: \", np.mean(max_vec))\n",
+    "#   print(\"stains shape: \", stains.shape)\n",
+    "#   print(\"stains: \", stains)\n",
+    "#   print(\"sats shape: \", sats.shape)\n",
+    "#   print(\"sats mean: \", np.mean(sats, axis=1))\n",
+    "#   print(\"max_sat shape: \", max_sat.shape)\n",
+    "#   print(\"max_sat: \", max_sat)\n",
+    "#   print(\"x_norm shape: \", x_norm.shape)\n",
+    "#   print(\"x_norm mean: \", np.mean(x_norm, axis=(0,1)))\n",
+    "#   print(\"x_norm min: \", np.min(x_norm, axis=(0,1)))\n",
+    "#   print(\"x_norm max: \", np.max(x_norm, axis=(0,1)))\n",
+    "#   print(x_norm.dtype)\n",
+    "#   print()\n",
+    "# #   x = x.reshape(h,w,c).astype(np.uint8)\n",
+    "  \n",
+    "  return x_norm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Compute image channel means"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# tr_means = input_data.compute_channel_means(train_df.rdd, channels, size)\n",
+    "# val_means = input_data.compute_channel_means(val_df.rdd, channels, size)\n",
+    "# print(tr_means.shape)\n",
+    "# print(tr_means, val_means)\n",
+    "# # Train: [ 194.27633667  145.3067627   181.27861023]\n",
+    "# # Val: [ 192.92971802  142.83534241  180.18870544]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def array_to_img(x, channels, size):\n",
+    "  x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  img = Image.fromarray(x.astype(np.uint8), 'RGB')\n",
+    "  return img\n",
+    "\n",
+    "def img_to_array(img):\n",
+    "  x = np.asarray(img).astype(np.float64)  # shape (H,W,C)\n",
+    "  x = x.transpose(2,0,1).ravel()  # shape (C*H*W)\n",
+    "  return x"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def filter_empty(row, beta=0.15, light_intensity=240):\n",
+    "  x = row.sample.values\n",
+    "#   x = array_to_img(x, channels, size)\n",
+    "  x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  h, w, c = x.shape\n",
+    "  x = x.reshape(-1, c)  # shape (H*W, C)\n",
+    "  OD = -np.log((x+1)/light_intensity)  # shape (H*W, C)\n",
+    "  # Remove data with OD intensity less than beta.\n",
+    "  # I.e. remove transparent pixels.\n",
+    "  OD_thresh = OD[np.all(OD >= beta, 1), :]\n",
+    "  return OD_thresh.size > 2*c"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Filter ~empty samples.\n",
+    "train_rdd = train_df.rdd.filter(filter_empty)\n",
+    "val_rdd = val_df.rdd.filter(filter_empty)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Sanity checks\n",
+    "\n",
+    "# first = train_df.first()\n",
+    "# s = first.sample.values\n",
+    "# i = array_to_img(s, channels, size)\n",
+    "# s2 = img_to_array(i)\n",
+    "# assert np.allclose(s, s2)\n",
+    "\n",
+    "# def assert_finite(row):\n",
+    "#   x = row.sample.values\n",
+    "#   x = x.reshape((channels,size,size)).transpose((1,2,0)) \n",
+    "#   h, w, c = x.shape\n",
+    "#   x = x.reshape(-1, c).astype(np.float64)\n",
+    "#   OD = -np.log((x+1)/240)\n",
+    "#   OD_thresh = OD[np.all(OD >= 0.15, 1), :]\n",
+    "#   assert np.all(np.isfinite(OD_thresh.T))\n",
+    "# train_df.rdd.foreach(assert_finite)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def compute_channel_means(rdd, channels, size):\n",
+    "  \"\"\"Compute the means of each color channel across the dataset.\"\"\"\n",
+    "  def helper(x):\n",
+    "    x = x.sample.values\n",
+    "#     x = array_to_img(x, channels, size)\n",
+    "    x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "    x = normalize_staining(x)\n",
+    "    x = np.asarray(x).astype(np.float64)  # shape (H,W,C)\n",
+    "    mu = np.mean(x, axis=(0,1))\n",
+    "    return mu\n",
+    "\n",
+    "  means = rdd.map(helper).collect()\n",
+    "  means = np.array(means)\n",
+    "  means = np.mean(means, axis=0)\n",
+    "  return means"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "tr_means = compute_channel_means(train_rdd, channels, size)\n",
+    "val_means = compute_channel_means(val_rdd, channels, size)\n",
+    "print(tr_means.shape)\n",
+    "print(tr_means, val_means)\n",
+    "# Means: [194.27633667  145.3067627  181.27861023]\n",
+    "# Means with norm: train [189.54944625  152.73427159  176.89543273] val [187.45282379  150.25695602  175.23754894]\n",
+    "# Means with norm on updated data:\n",
+    "#    [ 177.27269518  136.06809866  165.07305029] [ 176.21991047  134.39199187  163.81433421]\n",
+    "# Means with norm on updated data v3:\n",
+    "#    [ 183.36777842  138.81743141  166.07406199] [ 182.41870536  137.15523608  164.81227273]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Save every image as a JPEG"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def helper(row, channels, size, save_dir):\n",
+    "  tumor_score = row.tumor_score\n",
+    "  sample = row.sample.values\n",
+    "#   img = array_to_img(sample, channels, size)\n",
+    "  x = sample.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  x = normalize_staining(x)\n",
+    "  img = Image.fromarray(x.astype(np.uint8), 'RGB')\n",
+    "  filename = '{index}_{slide_num}_{hash}.jpeg'.format(\n",
+    "      index=row[\"__INDEX\"], slide_num=row.slide_num, hash=np.random.randint(1e4))\n",
+    "  class_dir = os.path.join(save_dir, str(tumor_score))\n",
+    "  path = os.path.join(class_dir, filename)\n",
+    "  img.save(path)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "tr_save_dir = \"images/{stage}/{p}\".format(stage=\"train_updated_norm_v3\", p=p)\n",
+    "val_save_dir = \"images/{stage}/{p}\".format(stage=\"val_updated_norm_v3\", p=val_p)\n",
+    "print(tr_save_dir, val_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%%bash -s \"$tr_save_dir\" \"$val_save_dir\"\n",
+    "for i in 1 2 3\n",
+    "do\n",
+    "  sudo mkdir -p $1/$i\n",
+    "  sudo mkdir -p $2/$i\n",
+    "done\n",
+    "sudo chmod 777 -R $1\n",
+    "sudo chmod 777 -R $2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Note: Use this if the DataFrame doesn't have an __INDEX column yet.\n",
+    "# train_df = train_df.withColumn(\"__INDEX\", F.monotonically_increasing_id())\n",
+    "# val_df = val_df.withColumn(\"__INDEX\", F.monotonically_increasing_id())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "train_df.rdd.filter(filter_empty).foreach(lambda row: helper(row, channels, size, tr_save_dir))\n",
+    "val_df.rdd.filter(filter_empty).foreach(lambda row: helper(row, channels, size, val_save_dir))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def show_random_image(save_dir):\n",
+    "  c = np.random.randint(1, 4)\n",
+    "  class_dir = os.path.join(save_dir, str(c))\n",
+    "  files = os.listdir(class_dir)\n",
+    "  i = np.random.randint(0, len(files))\n",
+    "  fname = os.path.join(class_dir, files[i])\n",
+    "  print(fname)\n",
+    "  img = Image.open(fname)\n",
+    "  plt.imshow(img)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "show_random_image(tr_save_dir)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 + Spark 2.x + SystemML",
+   "language": "python",
+   "name": "pyspark3_2.x"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/Preprocessing.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/Preprocessing.ipynb b/projects/breast_cancer/Preprocessing.ipynb
index 9c6850b..f7cd104 100644
--- a/projects/breast_cancer/Preprocessing.ipynb
+++ b/projects/breast_cancer/Preprocessing.ipynb
@@ -2,10 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML\n",
     "## Preprocessing\n",
@@ -14,10 +11,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Setup"
    ]
@@ -26,9 +20,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -58,10 +50,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Execute Preprocessing & Save"
    ]
@@ -70,9 +59,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -86,9 +73,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -105,23 +90,23 @@
     "add_row_indices = True\n",
     "train_frac = 0.8\n",
     "split_seed = 24\n",
-    "folder = \"/home/MDM/breast_cancer/data\"\n",
+    "folder = \"data\"  # Linux-filesystem directory to read raw data\n",
     "save_folder = \"data\"  # Hadoop-supported directory in which to save DataFrames\n",
     "df_path = os.path.join(save_folder, \"samples_{}_{}{}.parquet\".format(\n",
     "    \"labels\" if training else \"testing\", sample_size, \"_grayscale\" if grayscale else \"\"))\n",
     "train_df_path = os.path.join(save_folder, \"train_{}{}.parquet\".format(sample_size,\n",
     "    \"_grayscale\" if grayscale else \"\"))\n",
     "val_df_path = os.path.join(save_folder, \"val_{}{}.parquet\".format(sample_size,\n",
-    "    \"_grayscale\" if grayscale else \"\"))"
+    "    \"_grayscale\" if grayscale else \"\"))\n",
+    "\n",
+    "df_path, train_df_path, val_df_path"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -135,9 +120,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -149,9 +132,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -163,9 +144,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -178,9 +157,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -192,9 +169,7 @@
   {
    "cell_type": "markdown",
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "source": [
     "---"
@@ -202,20 +177,14 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Sample Data"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "### TODO: Wrap this in a function with appropriate default arguments"
    ]
@@ -224,9 +193,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -239,9 +206,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -249,6 +214,7 @@
     "p=0.01\n",
     "train_sample = train.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n",
     "val_sample = val.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n",
+    "\n",
     "train_sample, val_sample"
    ]
   },
@@ -256,9 +222,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -270,7 +234,7 @@
     "              .map(lambda r: (r[1] + 1, *r[0]))\n",
     "              .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n",
     "train_sample = train_sample.select(train_sample[\"__INDEX\"].astype(\"int\"),\n",
-    "                                   train_sample.slide_num.astype(\"int\"), \n",
+    "                                   train_sample.slide_num.astype(\"int\"),\n",
     "                                   train_sample.tumor_score.astype(\"int\"),\n",
     "                                   train_sample.molecular_score,\n",
     "                                   train_sample[\"sample\"])\n",
@@ -281,7 +245,7 @@
     "            .map(lambda r: (r[1] + 1, *r[0]))\n",
     "            .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n",
     "val_sample = val_sample.select(val_sample[\"__INDEX\"].astype(\"int\"),\n",
-    "                               val_sample.slide_num.astype(\"int\"), \n",
+    "                               val_sample.slide_num.astype(\"int\"),\n",
     "                               val_sample.tumor_score.astype(\"int\"),\n",
     "                               val_sample.molecular_score,\n",
     "                               val_sample[\"sample\"])\n",
@@ -293,23 +257,24 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "# Save train and validation DataFrames.\n",
-    "tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size, \"_grayscale\" if grayscale else \"\")\n",
-    "val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size, \"_grayscale\" if grayscale else \"\")\n",
-    "train_sample_path = os.path.join(\"save_folder\", tr_sample_filename)\n",
-    "val_sample_path = os.path.join(\"save_folder\", val_sample_filename)\n",
+    "tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size,\n",
+    "    \"_grayscale\" if grayscale else \"\")\n",
+    "val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size,\n",
+    "    \"_grayscale\" if grayscale else \"\")\n",
+    "train_sample_path = os.path.join(save_folder, tr_sample_filename)\n",
+    "val_sample_path = os.path.join(save_folder, val_sample_filename)\n",
     "save(train_sample, train_sample_path, sample_size, grayscale)\n",
     "save(val_sample, val_sample_path, sample_size, grayscale)"
    ]
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
    "display_name": "Python 3 + Spark 2.x + SystemML",
    "language": "python",
@@ -325,7 +290,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.0"
+   "version": "3.6.1"
   }
  },
  "nbformat": 4,

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/README.md
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/README.md b/projects/breast_cancer/README.md
index 179eb0f..b7402e8 100644
--- a/projects/breast_cancer/README.md
+++ b/projects/breast_cancer/README.md
@@ -19,33 +19,35 @@ limitations under the License.
 
 # Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML
 
-Note: This project is still a **work in progress**.
+Note: This project is still a **work in progress**.  There is also an [experimental branch](https://github.com/dusenberrymw/systemml/tree/breast_cancer_experimental2/projects/breast_cancer) with additional files and experiments.
 
 ## Overview
 The [Tumor Proliferation Assessment Challenge 2016 (TUPAC16)](http://tupac.tue-image.nl/) is a "Grand Challenge" that was created for the [2016 Medical Image Computing and Computer Assisted Intervention (MICCAI 2016)](http://miccai2016.org/en/) conference.  In this challenge, the goal is to develop state-of-the-art algorithms for automatic prediction of tumor proliferation scores from whole-slide histopathology images of breast tumors.
 
 ## Background
-Breast cancer is the leading cause of cancerous death in women in less-developed countries, and is the second leading cause of cancerous deaths in developed countries, accounting for 29% of all cancers in women within the U.S. [1]. Survival rates increase as early detection increases, giving incentive for pathologists and the medical world at large to develop improved methods for even earlier detection [2].  There are many forms of breast cancer including Ductal Carcinoma in Situ (DCIS), Invasive Ductal Carcinoma (IDC), Tubular Carcinoma of the Breast, Medullary Carcinoma of the Breast, Invasive Lobular Carcinoma, Inflammatory Breast Cancer and several others [3]. Within all of these forms of breast cancer, the rate in which breast cancer cells grow (proliferation), is a strong indicator of a patient’s prognosis. Although there are many means of determining the presence of breast cancer, tumor proliferation speed has been proven to help pathologists determine the treatment for the
  patient. The most common technique for determining the proliferation speed is through mitotic count (mitotic index) estimates, in which a pathologist counts the dividing cell nuclei in hematoxylin and eosin (H&E) stained slide preparations to determine the number of mitotic bodies.  Given this, the pathologist produces a proliferation score of either 1, 2, or 3, ranging from better to worse prognosis [4]. Unfortunately, this approach is known to have reproducibility problems due to the variability in counting, as well as the difficulty in distinguishing between different grades.
+Breast cancer is the leading cause of cancerous death in women in less-developed countries, and is the second leading cause of cancerous deaths in developed countries, accounting for 29% of all cancers in women within the U.S. [1]. Survival rates increase as early detection increases, giving incentive for pathologists and the medical world at large to develop improved methods for even earlier detection [2].  There are many forms of breast cancer including Ductal Carcinoma in Situ (DCIS), Invasive Ductal Carcinoma (IDC), Tubular Carcinoma of the Breast, Medullary Carcinoma of the Breast, Invasive Lobular Carcinoma, Inflammatory Breast Cancer and several others [3]. Within all of these forms of breast cancer, the rate in which breast cancer cells grow (proliferation), is a strong indicator of a patient’s prognosis. Although there are many means of determining the presence of breast cancer, tumor proliferation speed has been proven to help pathologists determine the best treatment fo
 r the patient. The most common technique for determining the proliferation speed is through mitotic count (mitotic index) estimates, in which a pathologist counts the dividing cell nuclei in hematoxylin and eosin (H&E) stained slide preparations to determine the number of mitotic bodies.  Given this, the pathologist produces a proliferation score of either 1, 2, or 3, ranging from better to worse prognosis [4]. Unfortunately, this approach is known to have reproducibility problems due to the variability in counting, as well as the difficulty in distinguishing between different grades.
 
-References:
-[1] http://emedicine.medscape.com/article/1947145-overview#a3
-[2] http://emedicine.medscape.com/article/1947145-overview#a7
-[3] http://emedicine.medscape.com/article/1954658-overview
-[4] http://emedicine.medscape.com/article/1947145-workup#c12
+References: <br />
+[1] http://emedicine.medscape.com/article/1947145-overview#a3 <br />
+[2] http://emedicine.medscape.com/article/1947145-overview#a7 <br />
+[3] http://emedicine.medscape.com/article/1954658-overview <br />
+[4] http://emedicine.medscape.com/article/1947145-workup#c12 <br />
 
 ## Goal & Approach
-In an effort to automate the process of classification, this project aims to develop a large-scale deep learning approach for predicting tumor scores directly from the pixels of whole-slide histopathology images.  Our proposed approach is based on a recent research paper from Stanford [1].  Starting with 500 extremely high-resolution tumor slide images with accompanying score labels, we aim to make use of Apache Spark in a preprocessing step to cut and filter the images into smaller square samples, generating 4.7 million samples for a total of ~7TB of data [2].  We then utilize Apache SystemML on top of Spark to develop and train a custom, large-scale, deep convolutional neural network on these samples, making use of the familiar linear algebra syntax and automatically-distributed execution of SystemML [3].  Our model takes as input the pixel values of the individual samples, and is trained to predict the correct tumor score classification for each one.  In addition to distributed l
 inear algebra, we aim to exploit task-parallelism via parallel for-loops for hyperparameter optimization, as well as hardware acceleration for faster training via a GPU-backed runtime.  Ultimately, we aim to develop a model that is sufficiently stronger than existing approaches for the task of breast cancer tumor proliferation score classification.
+In an effort to automate the process of classification, this project aims to develop a large-scale deep learning approach for predicting tumor scores directly from the pixels of whole-slide histopathology images.  Our proposed approach is based on a recent research paper from Stanford [1].  Starting with 500 extremely high-resolution tumor slide images with accompanying score labels, we aim to make use of Apache Spark in a preprocessing step to cut and filter the images into smaller square samples, generating 4.7 million samples for a total of ~7TB of data [2].  We then utilize Apache SystemML on top of Spark to develop and train a custom, large-scale, deep convolutional neural network on these samples, making use of the familiar linear algebra syntax and automatically-distributed execution of SystemML [3].  Our model takes as input the pixel values of the individual samples, and is trained to predict the correct tumor score classification for each one.  In addition to distributed l
 inear algebra, we aim to exploit task-parallelism via parallel for-loops for hyperparameter optimization, as well as hardware acceleration for faster training via a GPU-backed runtime.  We also explore a hybrid setup of using Keras for model training (currently transfer learning by fine-tuning a modified ResNet50 model) [4], and SystemML for distributed scoring of exported models.  Ultimately, we aim to develop a model that is sufficiently stronger than existing approaches for the task of breast cancer tumor proliferation score classification.
 
-References:
-[1] https://web.stanford.edu/group/rubinlab/pubs/2243353.pdf
-[2] See [`Preprocessing.ipynb`](Preprocessing.ipynb), and [`breastcancer/preprocessing.py`](breastcancer/preprocessing.py).
-[3] See [`MachineLearning.ipynb`](MachineLearning.ipynb), [`softmax_clf.dml`](softmax_clf.dml), and [`convnet.dml`](convnet.dml).
+References: <br />
+[1] https://web.stanford.edu/group/rubinlab/pubs/2243353.pdf <br />
+[2] [`Preprocessing.ipynb`](Preprocessing.ipynb), [`preprocess.py`](preprocess.py), [`breastcancer/preprocessing.py`](breastcancer/preprocessing.py) <br />
+[3] [`MachineLearning.ipynb`](MachineLearning.ipynb), [`softmax_clf.dml`](breastcancer/softmax_clf.dml), [`convnet.dml`](breastcancer/convnet.dml) <br />
+[4] [`MachineLearning-Keras-ResNet50.ipynb`](MachineLearning-Keras-ResNet50.ipynb)
 
 ![Approach](https://apache.github.io/systemml/img/projects/breast_cancer/approach.svg)
 
 ---
 
 ## Setup (*All nodes* unless other specified):
+* Spark 2.x (ideally bleeding-edge)
 * System Packages:
   * `sudo yum update`
   * `sudo yum install gcc ruby`
@@ -60,11 +62,14 @@ References:
   * `sudo yum install openslide`
 * Python packages:
   * `pip3 install -U matplotlib numpy pandas scipy jupyter ipython scikit-learn scikit-image flask openslide-python`
-* SystemML (only driver):
+* SystemML (bleeding-edge; only driver):
   * `git clone https://github.com/apache/systemml.git`
   * `cd systemml`
   * `mvn clean package`
   * `pip3 install -e src/main/python`
+* Keras (bleeding-edge; only driver):
+  * `pip3 install git+https://github.com/fchollet/keras.git`
+  * `pip3 install tensorflow-gpu` (or `pip3 install tensorflow` for CPU-only)
 * Add the following to the `data` folder (same location on *all* nodes):
   * `training_image_data` folder with the training slides.
   * `testing_image_data` folder with the testing slides.
@@ -72,12 +77,13 @@ References:
 * Layout:
   ```
   - MachineLearning.ipynb
+  - MachineLearning-Keras-ResNet50.ipynb
   - Preprocessing.ipynb
   - breastcancer/
+    - convnet.dml
+    - softmax_clf.dml
     - preprocessing.py
     - visualization.py
-  - convnet.dml
-  - nn/
   - ...
   - data/
     - training_ground_truth.csv
@@ -117,14 +123,14 @@ References:
     spark.executor.memory 50g
     ```
 
-  * Machine Learning:
+  * Machine Learning (SystemML):
     ```
     # Use all executor memory for JVM
     spark.executor.memory 100g
     ```
 
 * `cd` to this `breast_cancer` folder.
-* Start Jupyter + PySpark with the following command (could also use Yarn in client mode with `--master yarn --deploy-mode`):
+* Start Jupyter + PySpark with the following command (could also use Yarn in client mode with `--master yarn --deploy-mode client`):
   ```
   PYSPARK_PYTHON=python3 PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS="notebook" pyspark --master spark://MASTER_URL:7077 --driver-class-path $SYSTEMML_HOME/target/SystemML.jar --jars $SYSTEMML_HOME/target/SystemML.jar
   ```


[2/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

Posted by du...@apache.org.
http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/bin/clean_spark.sh
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/bin/clean_spark.sh b/projects/breast_cancer/bin/clean_spark.sh
new file mode 100755
index 0000000..d92ce87
--- /dev/null
+++ b/projects/breast_cancer/bin/clean_spark.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+rm -rf metastore_db/
+rm -rf derby.log
+rm -rf spark-warehouse/
+rm -rf scratch_space/

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/bin/monitor_gpu.sh
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/bin/monitor_gpu.sh b/projects/breast_cancer/bin/monitor_gpu.sh
new file mode 100755
index 0000000..b432e3f
--- /dev/null
+++ b/projects/breast_cancer/bin/monitor_gpu.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+watch -n 0.5 nvidia-smi

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/bin/remove_old_processes.sh
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/bin/remove_old_processes.sh b/projects/breast_cancer/bin/remove_old_processes.sh
new file mode 100755
index 0000000..2a7e903
--- /dev/null
+++ b/projects/breast_cancer/bin/remove_old_processes.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+ps -ef | grep `whoami` | grep "[p]ython" | awk '{print $2}' | xargs kill -9
+ps -ef | grep `whoami` | grep "[j]ava" | awk '{print $2}' | xargs kill -9

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/bin/run_tensorboard.sh
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/bin/run_tensorboard.sh b/projects/breast_cancer/bin/run_tensorboard.sh
new file mode 100755
index 0000000..8445858
--- /dev/null
+++ b/projects/breast_cancer/bin/run_tensorboard.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+tensorboard --logdir=experiments --reload_interval 5

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/breastcancer/convnet.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/breastcancer/convnet.dml b/projects/breast_cancer/breastcancer/convnet.dml
new file mode 100644
index 0000000..6cbea39
--- /dev/null
+++ b/projects/breast_cancer/breastcancer/convnet.dml
@@ -0,0 +1,495 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+/*
+ * Breast Cancer LeNet-like ConvNet Model
+ */
+# Imports
+source("nn/layers/affine.dml") as affine
+source("nn/layers/conv2d_builtin.dml") as conv2d
+source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
+source("nn/layers/dropout.dml") as dropout
+source("nn/layers/l2_reg.dml") as l2_reg
+source("nn/layers/max_pool2d_builtin.dml") as max_pool2d
+source("nn/layers/relu.dml") as relu
+source("nn/layers/softmax.dml") as softmax
+#source("nn/optim/adam.dml") as adam
+source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
+
+train = function(matrix[double] X, matrix[double] Y,
+                 matrix[double] X_val, matrix[double] Y_val,
+                 int C, int Hin, int Win,
+                 double lr, double mu, double decay, double lambda,
+                 int batch_size, int epochs, int log_interval,
+                 string checkpoint_dir)
+    return (matrix[double] Wc1, matrix[double] bc1,
+            matrix[double] Wc2, matrix[double] bc2,
+            matrix[double] Wc3, matrix[double] bc3,
+            matrix[double] Wa1, matrix[double] ba1,
+            matrix[double] Wa2, matrix[double] ba2) {
+  /*
+   * Trains a convolutional net using a "LeNet"-like architecture.
+   *
+   * The input matrix, X, has N examples, each represented as a 3D
+   * volume unrolled into a single vector.  The targets, Y, have K
+   * classes, and are one-hot encoded.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, C*Hin*Win).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
+   *  - Y_val: Target validation matrix, of shape (N, K).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   *  - lr: Learning rate.
+   *  - mu: Momentum value.
+   *      Typical values are in the range of [0.5, 0.99], usually
+   *      started at the lower end and annealed towards the higher end.
+   *  - decay: Learning rate decay rate.
+   *  - lambda: Regularization strength.
+   *  - batch_size: Size of mini-batches to train on.
+   *  - epochs: Total number of full training loops over the full data set.
+   *  - log_interval: Interval, in iterations, between log outputs.
+   *  - checkpoint_dir: Directory to store model checkpoints.
+   *
+   * Outputs:
+   *  - Wc1: 1st layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
+   *  - bc1: 1st layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd layer weights (parameters) matrix, of shape (F2*(Hin/4)*(Win/4), N3).
+   *  - bc3: 3rd layer biases vector, of shape (1, N3).
+   *  - Wa2: 4th layer weights (parameters) matrix, of shape (N3, K).
+   *  - ba2: 4th layer biases vector, of shape (1, K).
+   */
+  N = nrow(X)
+  K = ncol(Y)
+
+  # Create network:
+  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
+  #  -> affine1 -> relu1 -> dropout1 -> affine2 -> softmax
+  Hf = 3  # filter height
+  Wf = 3  # filter width
+  stride = 1
+  pad = 1  # For same dimensions, (Hf - stride) / 2
+  F1 = 32  # num conv filters in conv1
+  F2 = 32  # num conv filters in conv2
+  F3 = 32  # num conv filters in conv3
+  N1 = 512  # num nodes in affine1
+  # Note: affine2 has K nodes, which is equal to the number of target dimensions (num classes)
+  [Wc1, bc1] = conv2d::init(F1, C, Hf, Wf)  # inputs: (N, C*Hin*Win)
+  [Wc2, bc2] = conv2d::init(F2, F1, Hf, Wf)  # inputs: (N, F1*(Hin/2)*(Win/2))
+  [Wc3, bc3] = conv2d::init(F3, F2, Hf, Wf)  # inputs: (N, F2*(Hin/2^2)*(Win/2^2))
+  [Wa1, ba1] = affine::init(F3*(Hin/2^3)*(Win/2^3), N1)  # inputs: (N, F3*(Hin/2^3)*(Win/2^3))
+  [Wa2, ba2] = affine::init(N1, K)  # inputs: (N, N1)
+  Wa2 = Wa2 / sqrt(2)  # different initialization, since being fed into softmax, instead of relu
+
+  # TODO: Compare optimizers once training is faster.
+  # Initialize SGD w/ Nesterov momentum optimizer
+  vWc1 = sgd_nesterov::init(Wc1); vbc1 = sgd_nesterov::init(bc1)
+  vWc2 = sgd_nesterov::init(Wc2); vbc2 = sgd_nesterov::init(bc2)
+  vWc3 = sgd_nesterov::init(Wc3); vbc3 = sgd_nesterov::init(bc3)
+  vWa1 = sgd_nesterov::init(Wa1); vba1 = sgd_nesterov::init(ba1)
+  vWa2 = sgd_nesterov::init(Wa2); vba2 = sgd_nesterov::init(ba2)
+  #[mWc1, vWc1] = adam::init(Wc1)  # optimizer 1st & 2nd moment state for Wc1
+  #[mbc1, vbc1] = adam::init(bc1)  # optimizer 1st & 2nd moment state for bc1
+  #[mWc2, vWc2] = adam::init(Wc2)  # optimizer 1st & 2nd moment state for Wc2
+  #[mbc2, vbc2] = adam::init(bc2)  # optimizer 1st & 2nd moment state for bc2
+  #[mWc3, vWc3] = adam::init(Wc3)  # optimizer 1st & 2nd moment state for Wc3
+  #[mbc3, vbc3] = adam::init(bc3)  # optimizer 1st & 2nd moment state for bc3
+  #[mWa1, vWa1] = adam::init(Wa1)  # optimizer 1st & 2nd moment state for Wa1
+  #[mba1, vba1] = adam::init(ba1)  # optimizer 1st & 2nd moment state for ba1
+  #[mWa2, vWa2] = adam::init(Wa2)  # optimizer 1st & 2nd moment state for Wa2
+  #[mba2, vba2] = adam::init(ba2)  # optimizer 1st & 2nd moment state for ba2
+  #beta1 = 0.9
+  #beta2 = 0.999
+  #eps = 1e-8
+
+  # TODO: Enable starting val metrics once fast, distributed predictions are available.
+  # Starting validation loss & accuracy
+  #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+  #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+  #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+  ## Output results
+  #print("Start: Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
+
+  # Optimize
+  print("Starting optimization")
+  iters = ceil(N / batch_size)
+  for (e in 1:epochs) {
+    for(i in 1:iters) {
+      # Get next batch
+      beg = ((i-1) * batch_size) %% N + 1
+      end = min(N, beg + batch_size - 1)
+      X_batch = X[beg:end,]
+      y_batch = Y[beg:end,]
+
+      # Compute forward pass
+      ## conv layer 1: conv1 -> relu1 -> pool1
+      [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, Wc1, bc1, C, Hin, Win, Hf, Wf,
+                                                stride, stride, pad, pad)
+      outc1r = relu::forward(outc1)
+      [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+                                                       strideh=2, stridew=2, 0, 0)
+      ## conv layer 2: conv2 -> relu2 -> pool2
+      [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+                                                stride, stride, pad, pad)
+      outc2r = relu::forward(outc2)
+      [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+                                                       strideh=2, stridew=2, 0, 0)
+      ## conv layer 3: conv3 -> relu3 -> pool3
+      [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+                                                stride, stride, pad, pad)
+      outc3r = relu::forward(outc3)
+      [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+                                                       strideh=2, stridew=2, 0, 0)
+      ## affine layer 1:  affine1 -> relu1 -> dropout1
+      outa1 = affine::forward(outc3p, Wa1, ba1)
+      outa1r = relu::forward(outa1)
+      [outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+      ## affine layer 2:  affine2 -> softmax
+      outa2 = affine::forward(outa1d, Wa2, ba2)
+      probs = softmax::forward(outa2)
+
+      # Compute data backward pass
+      ## loss:
+      dprobs = cross_entropy_loss::backward(probs, y_batch)
+      ## affine layer 2:  affine2 -> softmax
+      douta2 = softmax::backward(dprobs, outa2)
+      [douta1d, dWa2, dba2] = affine::backward(douta2, outa1d, Wa2, ba2)
+      ## layer 3:  affine3 -> relu3 -> dropout
+      ## affine layer 1:  affine1 -> relu1 -> dropout
+      douta1r = dropout::backward(douta1d, outa1r, 0.5, maskad1)
+      douta1 = relu::backward(douta1r, outa1)
+      [doutc3p, dWa1, dba1] = affine::backward(douta1, outc3p, Wa1, ba1)
+      ## conv layer 3: conv3 -> relu3 -> pool3
+      doutc3r = max_pool2d::backward(doutc3p, Houtc3p, Woutc3p, outc3r, F3, Houtc3, Woutc3,
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+      doutc3 = relu::backward(doutc3r, outc3)
+      [doutc2p, dWc3, dbc3] = conv2d::backward(doutc3, Houtc3, Woutc3, outc2p, Wc3, bc2, F2,
+                                               Houtc2p, Woutc2p, Hf, Wf, stride, stride, pad, pad)
+      ## conv layer 2: conv2 -> relu2 -> pool2
+      doutc2r = max_pool2d::backward(doutc2p, Houtc2p, Woutc2p, outc2r, F2, Houtc2, Woutc2,
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+      doutc2 = relu::backward(doutc2r, outc2)
+      [doutc1p, dWc2, dbc2] = conv2d::backward(doutc2, Houtc2, Woutc2, outc1p, Wc2, bc2, F1,
+                                               Houtc1p, Woutc1p, Hf, Wf, stride, stride, pad, pad)
+      ## conv layer 1: conv1 -> relu1 -> pool1
+      doutc1r = max_pool2d::backward(doutc1p, Houtc1p, Woutc1p, outc1r, F1, Houtc1, Woutc1,
+                                     Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+      doutc1 = relu::backward(doutc1r, outc1)
+      [dX_batch, dWc1, dbc1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, Wc1, bc1, C,
+                                                Hin, Win, Hf, Wf, stride, stride, pad, pad)
+
+      # Compute regularization backward pass
+      dWc1_reg = l2_reg::backward(Wc1, lambda)
+      dWc2_reg = l2_reg::backward(Wc2, lambda)
+      dWc3_reg = l2_reg::backward(Wc3, lambda)
+      dWa1_reg = l2_reg::backward(Wa1, lambda)
+      dWa2_reg = l2_reg::backward(Wa2, lambda)
+      dWc1 = dWc1 + dWc1_reg
+      dWc2 = dWc2 + dWc2_reg
+      dWc3 = dWc3 + dWc3_reg
+      dWa1 = dWa1 + dWa1_reg
+      dWa2 = dWa2 + dWa2_reg
+
+      # Optimize with SGD w/ Nesterov momentum
+      [Wc1, vWc1] = sgd_nesterov::update(Wc1, dWc1, lr, mu, vWc1)
+      [bc1, vbc1] = sgd_nesterov::update(bc1, dbc1, lr, mu, vbc1)
+      [Wc2, vWc2] = sgd_nesterov::update(Wc2, dWc2, lr, mu, vWc2)
+      [bc2, vbc2] = sgd_nesterov::update(bc2, dbc2, lr, mu, vbc2)
+      [Wc3, vWc3] = sgd_nesterov::update(Wc3, dWc3, lr, mu, vWc3)
+      [bc3, vbc3] = sgd_nesterov::update(bc3, dbc3, lr, mu, vbc3)
+      [Wa1, vWa1] = sgd_nesterov::update(Wa1, dWa1, lr, mu, vWa1)
+      [ba1, vba1] = sgd_nesterov::update(ba1, dba1, lr, mu, vba1)
+      [Wa2, vWa2] = sgd_nesterov::update(Wa2, dWa2, lr, mu, vWa2)
+      [ba2, vba2] = sgd_nesterov::update(ba2, dba2, lr, mu, vba2)
+      #t = e*i - 1
+      #[Wc1, mWc1, vWc1] = adam::update(Wc1, dWc1, lr, beta1, beta2, eps, t, mWc1, vWc1)
+      #[bc1, mbc1, vbc1] = adam::update(bc1, dbc1, lr, beta1, beta2, eps, t, mbc1, vbc1)
+      #[Wc2, mWc2, vWc2] = adam::update(Wc2, dWc2, lr, beta1, beta2, eps, t, mWc2, vWc2)
+      #[bc2, mbc2, vbc2] = adam::update(bc2, dbc2, lr, beta1, beta2, eps, t, mbc2, vbc2)
+      #[Wc3, mWc3, vWc3] = adam::update(Wc3, dWc3, lr, beta1, beta2, eps, t, mWc3, vWc3)
+      #[bc3, mbc3, vbc3] = adam::update(bc3, dbc3, lr, beta1, beta2, eps, t, mbc3, vbc3)
+      #[Wa1, mWa1, vWa1] = adam::update(Wa1, dWa1, lr, beta1, beta2, eps, t, mWa1, vWa1)
+      #[ba1, mba1, vba1] = adam::update(ba1, dba1, lr, beta1, beta2, eps, t, mba1, vba1)
+      #[Wa2, mWa2, vWa2] = adam::update(Wa2, dWa2, lr, beta1, beta2, eps, t, mWa2, vWa2)
+      #[ba2, mba2, vba2] = adam::update(ba2, dba2, lr, beta1, beta2, eps, t, mba2, vba2)
+
+      # Compute loss & accuracy for training & validation data every `log_interval` iterations.
+      if (i %% log_interval == 0) {
+        # Compute training loss & accuracy
+        loss_data = cross_entropy_loss::forward(probs, y_batch)
+        loss_reg_Wc1 = l2_reg::forward(Wc1, lambda)
+        loss_reg_Wc2 = l2_reg::forward(Wc2, lambda)
+        loss_reg_Wc3 = l2_reg::forward(Wc3, lambda)
+        loss_reg_Wa1 = l2_reg::forward(Wa1, lambda)
+        loss_reg_Wa2 = l2_reg::forward(Wa2, lambda)
+        loss = loss_data + loss_reg_Wc1 + loss_reg_Wc2 + loss_reg_Wc3 + loss_reg_Wa1 + loss_reg_Wa2
+        accuracy = mean(rowIndexMax(probs) == rowIndexMax(y_batch))
+
+        # TODO: Consider enabling val metrics here once fast, distributed predictions are available.
+        ## Compute validation loss & accuracy
+        #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+        #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+        #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+
+        ## Output results
+        #print("Epoch: " + e + ", Iter: " + i + ", Train Loss: " + loss + ", Train Accuracy: "
+        #      + accuracy + ", Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val
+        #      + ", lr: " + lr + ", mu " + mu)
+        # Output results
+        print("Epoch: " + e + "/" + epochs + ", Iter: " + i + "/" + iters
+              + ", Train Loss: " + loss + ", Train Accuracy: " + accuracy)
+      }
+    }
+
+    # Compute validation loss & accuracy for validation data every epoch
+    probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+    loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+    accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+
+    # Output results
+    print("Epoch: " + e + "/" + epochs + ", Val Loss: " + loss_val
+          + ", Val Accuracy: " + accuracy_val + ", lr: " + lr + ", mu " + mu)
+
+    # Checkpoint model
+    dir = checkpoint_dir + e + "/"
+    dummy = checkpoint(dir, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+    str = "lr: " + lr + ", mu: " + mu + ", decay: " + decay + ", lambda: " + lambda
+          + ", batch_size: " + batch_size
+    name = dir + accuracy_val
+    write(str, name)
+
+    # Anneal momentum towards 0.999
+    mu = mu + (0.999 - mu)/(1+epochs-e)
+    # Decay learning rate
+    lr = lr * decay
+  }
+}
+
+checkpoint = function(string dir,
+                      matrix[double] Wc1, matrix[double] bc1,
+                      matrix[double] Wc2, matrix[double] bc2,
+                      matrix[double] Wc3, matrix[double] bc3,
+                      matrix[double] Wa1, matrix[double] ba1,
+                      matrix[double] Wa2, matrix[double] ba2) {
+  /*
+   * Save the model parameters.
+   *
+   * Inputs:
+   *  - dir: Directory in which to save model parameters.
+   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
+   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
+   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
+   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
+   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
+   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
+   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
+   *
+   * Outputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   */
+  write(Wc1, dir + "Wc1", format="binary")
+  write(bc1, dir + "bc1", format="binary")
+  write(Wc2, dir + "Wc2", format="binary")
+  write(bc2, dir + "bc2", format="binary")
+  write(Wc3, dir + "Wc3", format="binary")
+  write(bc3, dir + "bc3", format="binary")
+  write(Wa1, dir + "Wa1", format="binary")
+  write(ba1, dir + "ba1", format="binary")
+  write(Wa2, dir + "Wa2", format="binary")
+  write(ba2, dir + "ba2", format="binary")
+}
+
+predict = function(matrix[double] X, int C, int Hin, int Win,
+                   matrix[double] Wc1, matrix[double] bc1,
+                   matrix[double] Wc2, matrix[double] bc2,
+                   matrix[double] Wc3, matrix[double] bc3,
+                   matrix[double] Wa1, matrix[double] ba1,
+                   matrix[double] Wa2, matrix[double] ba2)
+    return (matrix[double] probs) {
+  /*
+   * Computes the class probability predictions of a convolutional
+   * net using the "LeNet" architecture.
+   *
+   * The input matrix, X, has N examples, each represented as a 3D
+   * volume unrolled into a single vector.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, C*Hin*Win).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
+   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
+   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
+   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
+   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
+   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
+   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
+   *
+   * Outputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   */
+  N = nrow(X)
+
+  # Network:
+  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
+  #  -> affine1 -> relu1 -> affine2 -> softmax
+  Hf = 3  # filter height
+  Wf = 3  # filter width
+  stride = 1
+  pad = 1  # For same dimensions, (Hf - stride) / 2
+
+  F1 = nrow(Wc1)  # num conv filters in conv1
+  F2 = nrow(Wc2)  # num conv filters in conv2
+  F3 = nrow(Wc3)  # num conv filters in conv3
+  N1 = ncol(Wa1)  # num nodes in affine1
+  K = ncol(Wa2)  # num nodes in affine2, equal to number of target dimensions (num classes)
+
+  # TODO: Implement fast, distributed conv & max pooling operators so that predictions
+  # can be computed in a full-batch, distributed manner.  Alternatively, improve `parfor`
+  # so that it can be efficiently used for parallel predictions.
+  ## Compute forward pass
+  ### conv layer 1: conv1 -> relu1 -> pool1
+  #[outc1, Houtc1, Woutc1] = conv2d::forward(X, Wc1, bc1, C, Hin, Win, Hf, Wf, stride, stride,
+  #                                          pad, pad)
+  #outc1r = relu::forward(outc1)
+  #[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### conv layer 2: conv2 -> relu2 -> pool2
+  #[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+  #                                          stride, stride, pad, pad)
+  #outc2r = relu::forward(outc2)
+  #[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### conv layer 3: conv3 -> relu3 -> pool3
+  #[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+  #                                          stride, stride, pad, pad)
+  #outc3r = relu::forward(outc3)
+  #[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### affine layer 1:  affine1 -> relu1 -> dropout
+  #outa1 = affine::forward(outc3p, Wa1, ba1)
+  #outa1r = relu::forward(outa1)
+  ##[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+  ### affine layer 2:  affine2 -> softmax
+  #outa2 = affine::forward(outa1r, Wa2, ba2)
+  #probs = softmax::forward(outa2)
+
+  # Compute predictions over mini-batches
+  probs = matrix(0, rows=N, cols=K)
+  batch_size = 50
+  iters = ceil(N / batch_size)
+  for(i in 1:iters) {
+  # TODO: `parfor` should work here, possibly as an alternative to distributed predictions.
+  #parfor(i in 1:iters, check=0, mode=REMOTE_SPARK, resultmerge=REMOTE_SPARK) {
+    # Get next batch
+    beg = ((i-1) * batch_size) %% N + 1
+    end = min(N, beg + batch_size - 1)
+    X_batch = X[beg:end,]
+
+    # Compute forward pass
+    ## conv layer 1: conv1 -> relu1 -> pool1
+    [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, Wc1, bc1, C, Hin, Win, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc1r = relu::forward(outc1)
+    [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## conv layer 2: conv2 -> relu2 -> pool2
+    [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc2r = relu::forward(outc2)
+    [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## conv layer 3: conv3 -> relu3 -> pool3
+    [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc3r = relu::forward(outc3)
+    [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## affine layer 1:  affine1 -> relu1 -> dropout
+    outa1 = affine::forward(outc3p, Wa1, ba1)
+    outa1r = relu::forward(outa1)
+    #[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+    ## affine layer 2:  affine2 -> softmax
+    outa2 = affine::forward(outa1r, Wa2, ba2)
+    probs_batch = softmax::forward(outa2)
+
+    # Store predictions
+    probs[beg:end,] = probs_batch
+  }
+}
+
+eval = function(matrix[double] probs, matrix[double] Y)
+    return (double loss, double accuracy) {
+  /*
+   * Evaluates a convolutional net using the "LeNet" architecture.
+   *
+   * The probs matrix contains the class probability predictions
+   * of K classes over N examples.  The targets, Y, have K classes,
+   * and are one-hot encoded.
+   *
+   * Inputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   *  - Y: Target matrix, of shape (N,
+   *
+   * Outputs:
+   *  - loss: Scalar loss, of shape (1).
+   *  - accuracy: Scalar accuracy, of shape (1).
+   */
+  # Compute loss & accuracy
+  loss = cross_entropy_loss::forward(probs, Y)
+  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
+  accuracy = mean(correct_pred)
+}
+
+generate_dummy_data = function()
+    return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {
+  /*
+   * Generate a dummy dataset similar to the breast cancer dataset.
+   *
+   * Outputs:
+   *  - X: Input data matrix, of shape (N, D).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   */
+  # Generate dummy input data
+  N = 1024  # num examples
+  C = 3  # num input channels
+  Hin = 256  # input height
+  Win = 256  # input width
+  K = 3  # num target classes
+  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
+  classes = round(rand(rows=N, cols=1, min=1, max=K, pdf="uniform"))
+  Y = table(seq(1, N), classes)  # one-hot encoding
+}
+

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/breastcancer/convnet_distrib_sgd.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/breastcancer/convnet_distrib_sgd.dml b/projects/breast_cancer/breastcancer/convnet_distrib_sgd.dml
new file mode 100644
index 0000000..0c5869e
--- /dev/null
+++ b/projects/breast_cancer/breastcancer/convnet_distrib_sgd.dml
@@ -0,0 +1,592 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+/*
+ * Breast Cancer LeNet-like ConvNet Model
+ */
+# Imports
+source("nn/layers/affine.dml") as affine
+source("nn/layers/conv2d_builtin.dml") as conv2d
+source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
+source("nn/layers/dropout.dml") as dropout
+source("nn/layers/l2_reg.dml") as l2_reg
+source("nn/layers/max_pool2d_builtin.dml") as max_pool2d
+source("nn/layers/relu.dml") as relu
+source("nn/layers/softmax.dml") as softmax
+#source("nn/optim/adam.dml") as adam
+source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
+
+train = function(matrix[double] X, matrix[double] Y,
+                 matrix[double] X_val, matrix[double] Y_val,
+                 int C, int Hin, int Win,
+                 double lr, double mu, double decay, double lambda,
+                 int batch_size, int parallel_batches, int epochs,
+                 int log_interval, string checkpoint_dir)
+    return (matrix[double] Wc1, matrix[double] bc1,
+            matrix[double] Wc2, matrix[double] bc2,
+            matrix[double] Wc3, matrix[double] bc3,
+            matrix[double] Wa1, matrix[double] ba1,
+            matrix[double] Wa2, matrix[double] ba2) {
+  /*
+   * Trains a convolutional net using a "LeNet"-like architecture.
+   *
+   * The input matrix, X, has N examples, each represented as a 3D
+   * volume unrolled into a single vector.  The targets, Y, have K
+   * classes, and are one-hot encoded.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, C*Hin*Win).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
+   *  - Y_val: Target validation matrix, of shape (N, K).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   *  - lr: Learning rate.
+   *  - mu: Momentum value.
+   *      Typical values are in the range of [0.5, 0.99], usually
+   *      started at the lower end and annealed towards the higher end.
+   *  - decay: Learning rate decay rate.
+   *  - lambda: Regularization strength.
+   *  - batch_size: Size of mini-batches to train on.
+   *  - parallel_batches: Number of parallel batches to run for
+   *      distributed SGD.
+   *  - epochs: Total number of full training loops over the full data
+   *      set.
+   *  - log_interval: Interval, in iterations, between log outputs.
+   *  - checkpoint_dir: Directory to store model checkpoints.
+   *
+   * Outputs:
+   *  - Wc1: 1st layer weights (parameters) matrix, of
+   *      shape (F1, C*Hf*Wf).
+   *  - bc1: 1st layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd layer weights (parameters) matrix, of
+   *      shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd layer weights (parameters) matrix, of
+   *      shape (F2*(Hin/4)*(Win/4), N3).
+   *  - bc3: 3rd layer biases vector, of shape (1, N3).
+   *  - Wa2: 4th layer weights (parameters) matrix, of shape (N3, K).
+   *  - ba2: 4th layer biases vector, of shape (1, K).
+   */
+  N = nrow(X)
+  K = ncol(Y)
+
+  # Create network:
+  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
+  #  -> affine1 -> relu1 -> dropout1 -> affine2 -> softmax
+  Hf = 3  # filter height
+  Wf = 3  # filter width
+  stride = 1
+  pad = 1  # For same dimensions, (Hf - stride) / 2
+  F1 = 32  # num conv filters in conv1
+  F2 = 32  # num conv filters in conv2
+  F3 = 32  # num conv filters in conv3
+  N1 = 256  # num nodes in affine1
+  # Note: affine2 has K nodes, which is equal to the number of target dimensions (num classes)
+  [Wc1, bc1] = conv2d::init(F1, C, Hf, Wf)  # inputs: (N, C*Hin*Win)
+  [Wc2, bc2] = conv2d::init(F2, F1, Hf, Wf)  # inputs: (N, F1*(Hin/2)*(Win/2))
+  [Wc3, bc3] = conv2d::init(F3, F2, Hf, Wf)  # inputs: (N, F2*(Hin/2^2)*(Win/2^2))
+  [Wa1, ba1] = affine::init(F3*(Hin/2^3)*(Win/2^3), N1)  # inputs: (N, F3*(Hin/2^3)*(Win/2^3))
+  [Wa2, ba2] = affine::init(N1, K)  # inputs: (N, N1)
+  Wa2 = Wa2 / sqrt(2)  # different initialization, since being fed into softmax, instead of relu
+
+  # TODO: Compare optimizers once training is faster.
+  # Initialize SGD w/ Nesterov momentum optimizer
+  vWc1 = sgd_nesterov::init(Wc1); vbc1 = sgd_nesterov::init(bc1)
+  vWc2 = sgd_nesterov::init(Wc2); vbc2 = sgd_nesterov::init(bc2)
+  vWc3 = sgd_nesterov::init(Wc3); vbc3 = sgd_nesterov::init(bc3)
+  vWa1 = sgd_nesterov::init(Wa1); vba1 = sgd_nesterov::init(ba1)
+  vWa2 = sgd_nesterov::init(Wa2); vba2 = sgd_nesterov::init(ba2)
+  #[mWc1, vWc1] = adam::init(Wc1)  # optimizer 1st & 2nd moment state for Wc1
+  #[mbc1, vbc1] = adam::init(bc1)  # optimizer 1st & 2nd moment state for bc1
+  #[mWc2, vWc2] = adam::init(Wc2)  # optimizer 1st & 2nd moment state for Wc2
+  #[mbc2, vbc2] = adam::init(bc2)  # optimizer 1st & 2nd moment state for bc2
+  #[mWc3, vWc3] = adam::init(Wc3)  # optimizer 1st & 2nd moment state for Wc3
+  #[mbc3, vbc3] = adam::init(bc3)  # optimizer 1st & 2nd moment state for bc3
+  #[mWa1, vWa1] = adam::init(Wa1)  # optimizer 1st & 2nd moment state for Wa1
+  #[mba1, vba1] = adam::init(ba1)  # optimizer 1st & 2nd moment state for ba1
+  #[mWa2, vWa2] = adam::init(Wa2)  # optimizer 1st & 2nd moment state for Wa2
+  #[mba2, vba2] = adam::init(ba2)  # optimizer 1st & 2nd moment state for ba2
+  #beta1 = 0.9
+  #beta2 = 0.999
+  #eps = 1e-8
+
+  # TODO: Enable starting val metrics once fast, distributed predictions are available.
+  # Starting validation loss & accuracy
+  #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+  #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+  #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+  ## Output results
+  #print("Start: Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
+
+  # Optimize
+  print("Starting optimization")
+  group_batch_size = parallel_batches*batch_size
+  groups = ceil(N/group_batch_size)
+  print("Total Epochs: "+epochs+", Batch size: "+batch_size+
+        ", Degree of parallelism: "+parallel_batches+", Group batch size: "+group_batch_size+
+        ", Num groups: "+groups)
+  # Loop over the dataset multiple times
+  for (e in 1:epochs) {
+    # Grab groups of mini-batches
+    for (g in 1:groups) {
+      # Get next group of mini-batches
+      # NOTE: At the end of the dataset, the last mini-batch in this group could be smaller than
+      # the other groups.
+      #group_beg = ((g-1) * group_batch_size) %% N + 1
+      #group_end = min(N, group_beg + group_batch_size - 1)
+      #X_group_batch = X[group_beg:group_end,]
+      #Y_group_batch = Y[group_beg:group_end,]
+      group_beg = 1
+      group_end = group_batch_size
+      X_group_batch = X[group_beg:group_end,]
+      Y_group_batch = Y[group_beg:group_end,]
+
+      # Create data structure to store gradients computed in parallel
+      dWc1_agg = matrix(0, rows=parallel_batches, cols=nrow(Wc1)*ncol(Wc1))
+      dWc2_agg = matrix(0, rows=parallel_batches, cols=nrow(Wc2)*ncol(Wc2))
+      dWc3_agg = matrix(0, rows=parallel_batches, cols=nrow(Wc3)*ncol(Wc3))
+      dWa1_agg = matrix(0, rows=parallel_batches, cols=nrow(Wa1)*ncol(Wa1))
+      dWa2_agg = matrix(0, rows=parallel_batches, cols=nrow(Wa2)*ncol(Wa2))
+      dbc1_agg = matrix(0, rows=parallel_batches, cols=nrow(bc1)*ncol(bc1))
+      dbc2_agg = matrix(0, rows=parallel_batches, cols=nrow(bc2)*ncol(bc2))
+      dbc3_agg = matrix(0, rows=parallel_batches, cols=nrow(bc3)*ncol(bc3))
+      dba1_agg = matrix(0, rows=parallel_batches, cols=nrow(ba1)*ncol(ba1))
+      dba2_agg = matrix(0, rows=parallel_batches, cols=nrow(ba2)*ncol(ba2))
+
+      # Run graph on each mini-batch in this group in parallel (ideally on multiple GPUs)
+      # NOTE: The parfor is causing the sizes to not be propagated into the body both here and
+      # in `predict`.  It is not caused by the batch extraction below.  It is the parfor.
+      parfor (j in 1:parallel_batches, mode=REMOTE_SPARK, opt=CONSTRAINED) {
+      #parfor (j in 1:parallel_batches) {
+        # Get a mini-batch in this group
+        beg = ((j-1) * batch_size) %% nrow(X_group_batch) + 1
+        end = min(nrow(X_group_batch), beg + batch_size - 1)
+        X_batch = X_group_batch[beg:end,]
+        Y_batch = Y_group_batch[beg:end,]
+        #beg = 1
+        #end = batch_size
+        #X_batch = X_group_batch[beg:end,]
+        #Y_batch = Y_group_batch[beg:end,]
+
+        # Compute forward pass
+        ## conv layer 1: conv1 -> relu1 -> pool1
+        [outc1, Houtc1, Woutc1] = conv2d::forward(X_batch, Wc1, bc1, C, Hin, Win, Hf, Wf,
+                                                  stride, stride, pad, pad)
+        outc1r = relu::forward(outc1)
+        [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+                                                         strideh=2, stridew=2, 0, 0)
+        ## conv layer 2: conv2 -> relu2 -> pool2
+        [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+                                                  stride, stride, pad, pad)
+        outc2r = relu::forward(outc2)
+        [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+                                                         strideh=2, stridew=2, 0, 0)
+        ## conv layer 3: conv3 -> relu3 -> pool3
+        [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+                                                  stride, stride, pad, pad)
+        outc3r = relu::forward(outc3)
+        [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+                                                         strideh=2, stridew=2, 0, 0)
+        ## affine layer 1:  affine1 -> relu1 -> dropout1
+        outa1 = affine::forward(outc3p, Wa1, ba1)
+        outa1r = relu::forward(outa1)
+        [outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+        ## affine layer 2:  affine2 -> softmax
+        outa2 = affine::forward(outa1d, Wa2, ba2)
+        probs = softmax::forward(outa2)
+
+        # Compute data backward pass
+        ## loss:
+        dprobs = cross_entropy_loss::backward(probs, Y_batch)
+        ## affine layer 2:  affine2 -> softmax
+        douta2 = softmax::backward(dprobs, outa2)
+        [douta1d, dWa2, dba2] = affine::backward(douta2, outa1d, Wa2, ba2)
+        ## layer 3:  affine3 -> relu3 -> dropout
+        ## affine layer 1:  affine1 -> relu1 -> dropout
+        douta1r = dropout::backward(douta1d, outa1r, 0.5, maskad1)
+        douta1 = relu::backward(douta1r, outa1)
+        [doutc3p, dWa1, dba1] = affine::backward(douta1, outc3p, Wa1, ba1)
+        ## conv layer 3: conv3 -> relu3 -> pool3
+        doutc3r = max_pool2d::backward(doutc3p, Houtc3p, Woutc3p, outc3r, F3, Houtc3, Woutc3,
+                                       Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+        doutc3 = relu::backward(doutc3r, outc3)
+        [doutc2p, dWc3, dbc3] = conv2d::backward(doutc3, Houtc3, Woutc3, outc2p, Wc3, bc2, F2,
+                                                 Houtc2p, Woutc2p, Hf, Wf, stride, stride, pad, pad)
+        ## conv layer 2: conv2 -> relu2 -> pool2
+        doutc2r = max_pool2d::backward(doutc2p, Houtc2p, Woutc2p, outc2r, F2, Houtc2, Woutc2,
+                                       Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+        doutc2 = relu::backward(doutc2r, outc2)
+        [doutc1p, dWc2, dbc2] = conv2d::backward(doutc2, Houtc2, Woutc2, outc1p, Wc2, bc2, F1,
+                                                 Houtc1p, Woutc1p, Hf, Wf, stride, stride, pad, pad)
+        ## conv layer 1: conv1 -> relu1 -> pool1
+        doutc1r = max_pool2d::backward(doutc1p, Houtc1p, Woutc1p, outc1r, F1, Houtc1, Woutc1,
+                                       Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)
+        doutc1 = relu::backward(doutc1r, outc1)
+        [dX_batch, dWc1, dbc1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, Wc1, bc1, C,
+                                                  Hin, Win, Hf, Wf, stride, stride, pad, pad)
+
+        # Compute regularization backward pass on weights
+        dWc1_reg = l2_reg::backward(Wc1, lambda)
+        dWc2_reg = l2_reg::backward(Wc2, lambda)
+        dWc3_reg = l2_reg::backward(Wc3, lambda)
+        dWa1_reg = l2_reg::backward(Wa1, lambda)
+        dWa2_reg = l2_reg::backward(Wa2, lambda)
+        dWc1 = dWc1 + dWc1_reg
+        dWc2 = dWc2 + dWc2_reg
+        dWc3 = dWc3 + dWc3_reg
+        dWa1 = dWa1 + dWa1_reg
+        dWa2 = dWa2 + dWa2_reg
+
+        # Flatten and store gradients for this parallel execution
+        # Note: We multiply by a weighting to allow for proper gradient averaging during the
+        # aggregation even with uneven batch sizes.
+        weighting = nrow(X_batch) / nrow(X_group_batch)
+        dWc1_agg[j,] = matrix(dWc1, rows=1, cols=nrow(Wc1)*ncol(Wc1)) * weighting
+        dWc2_agg[j,] = matrix(dWc2, rows=1, cols=nrow(Wc2)*ncol(Wc2)) * weighting
+        dWc3_agg[j,] = matrix(dWc3, rows=1, cols=nrow(Wc3)*ncol(Wc3)) * weighting
+        dWa1_agg[j,] = matrix(dWa1, rows=1, cols=nrow(Wa1)*ncol(Wa1)) * weighting
+        dWa2_agg[j,] = matrix(dWa2, rows=1, cols=nrow(Wa2)*ncol(Wa2)) * weighting
+        dbc1_agg[j,] = matrix(dbc1, rows=1, cols=nrow(bc1)*ncol(bc1)) * weighting
+        dbc2_agg[j,] = matrix(dbc2, rows=1, cols=nrow(bc2)*ncol(bc2)) * weighting
+        dbc3_agg[j,] = matrix(dbc3, rows=1, cols=nrow(bc3)*ncol(bc3)) * weighting
+        dba1_agg[j,] = matrix(dba1, rows=1, cols=nrow(ba1)*ncol(ba1)) * weighting
+        dba2_agg[j,] = matrix(dba2, rows=1, cols=nrow(ba2)*ncol(ba2)) * weighting
+      }
+
+      # Aggregate gradients
+      # Note: The gradients are already pre-multiplied by a weight so that addition here
+      # results in gradient averaging even with different possible mini-batch sizes. I.e.,
+      # the final mini-batch at the end of the dataset could be smaller than the other mini-batches.
+      dWc1 = matrix(colSums(dWc1_agg), rows=nrow(Wc1), cols=ncol(Wc1))
+      dWc2 = matrix(colSums(dWc2_agg), rows=nrow(Wc2), cols=ncol(Wc2))
+      dWc3 = matrix(colSums(dWc3_agg), rows=nrow(Wc3), cols=ncol(Wc3))
+      dWa1 = matrix(colSums(dWa1_agg), rows=nrow(Wa1), cols=ncol(Wa1))
+      dWa2 = matrix(colSums(dWa2_agg), rows=nrow(Wa2), cols=ncol(Wa2))
+      dbc1 = matrix(colSums(dbc1_agg), rows=nrow(bc1), cols=ncol(bc1))
+      dbc2 = matrix(colSums(dbc2_agg), rows=nrow(bc2), cols=ncol(bc2))
+      dbc3 = matrix(colSums(dbc3_agg), rows=nrow(bc3), cols=ncol(bc3))
+      dba1 = matrix(colSums(dba1_agg), rows=nrow(ba1), cols=ncol(ba1))
+      dba2 = matrix(colSums(dba2_agg), rows=nrow(ba2), cols=ncol(ba2))
+
+      # Optimize with SGD w/ Nesterov momentum
+      [Wc1, vWc1] = sgd_nesterov::update(Wc1, dWc1, lr, mu, vWc1)
+      [Wc2, vWc2] = sgd_nesterov::update(Wc2, dWc2, lr, mu, vWc2)
+      [Wc3, vWc3] = sgd_nesterov::update(Wc3, dWc3, lr, mu, vWc3)
+      [Wa1, vWa1] = sgd_nesterov::update(Wa1, dWa1, lr, mu, vWa1)
+      [Wa2, vWa2] = sgd_nesterov::update(Wa2, dWa2, lr, mu, vWa2)
+      [bc1, vbc1] = sgd_nesterov::update(bc1, dbc1, lr, mu, vbc1)
+      [bc2, vbc2] = sgd_nesterov::update(bc2, dbc2, lr, mu, vbc2)
+      [bc3, vbc3] = sgd_nesterov::update(bc3, dbc3, lr, mu, vbc3)
+      [ba1, vba1] = sgd_nesterov::update(ba1, dba1, lr, mu, vba1)
+      [ba2, vba2] = sgd_nesterov::update(ba2, dba2, lr, mu, vba2)
+      #t = e*i - 1
+      #[Wc1, mWc1, vWc1] = adam::update(Wc1, dWc1, lr, beta1, beta2, eps, t, mWc1, vWc1)
+      #[bc1, mbc1, vbc1] = adam::update(bc1, dbc1, lr, beta1, beta2, eps, t, mbc1, vbc1)
+      #[Wc2, mWc2, vWc2] = adam::update(Wc2, dWc2, lr, beta1, beta2, eps, t, mWc2, vWc2)
+      #[bc2, mbc2, vbc2] = adam::update(bc2, dbc2, lr, beta1, beta2, eps, t, mbc2, vbc2)
+      #[Wc3, mWc3, vWc3] = adam::update(Wc3, dWc3, lr, beta1, beta2, eps, t, mWc3, vWc3)
+      #[bc3, mbc3, vbc3] = adam::update(bc3, dbc3, lr, beta1, beta2, eps, t, mbc3, vbc3)
+      #[Wa1, mWa1, vWa1] = adam::update(Wa1, dWa1, lr, beta1, beta2, eps, t, mWa1, vWa1)
+      #[ba1, mba1, vba1] = adam::update(ba1, dba1, lr, beta1, beta2, eps, t, mba1, vba1)
+      #[Wa2, mWa2, vWa2] = adam::update(Wa2, dWa2, lr, beta1, beta2, eps, t, mWa2, vWa2)
+      #[ba2, mba2, vba2] = adam::update(ba2, dba2, lr, beta1, beta2, eps, t, mba2, vba2)
+
+      # Compute loss & accuracy for training data every `log_interval` iterations.
+      if (g %% log_interval == 0) {
+        print("Logging training loss & accuracy for group " + g + ":")
+        # Get a mini-batch in this group
+        #j = 0
+        #beg = ((j-1) * batch_size) %% nrow(X_group_batch) + 1
+        #end = min(nrow(X_group_batch), beg + batch_size - 1)
+        #X_batch = X_group_batch[beg:end,]
+        #Y_batch = Y_group_batch[beg:end,]
+
+        # Compute training loss & accuracy using final
+        #probs = predict(X_batch, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+        #loss_data = cross_entropy_loss::forward(probs, Y_batch)
+        probs = predict(X_group_batch, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1,
+                        Wa2, ba2, batch_size)
+        loss_data = cross_entropy_loss::forward(probs, Y_group_batch)
+        loss_reg_Wc1 = l2_reg::forward(Wc1, lambda)
+        loss_reg_Wc2 = l2_reg::forward(Wc2, lambda)
+        loss_reg_Wc3 = l2_reg::forward(Wc3, lambda)
+        loss_reg_Wa1 = l2_reg::forward(Wa1, lambda)
+        loss_reg_Wa2 = l2_reg::forward(Wa2, lambda)
+        loss = loss_data + loss_reg_Wc1 + loss_reg_Wc2 + loss_reg_Wc3 + loss_reg_Wa1 + loss_reg_Wa2
+        #accuracy = mean(rowIndexMax(probs) == rowIndexMax(Y_batch))
+        accuracy = mean(rowIndexMax(probs) == rowIndexMax(Y_group_batch))
+
+        # TODO: Consider enabling val metrics here once fast, distributed predictions are available.
+        ## Compute validation loss & accuracy
+        #probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+        #loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+        #accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+
+        # Output results
+        print("Epoch: " + e + ", Group: " + g + ", Train Loss: " + loss + ", Train Accuracy: "
+              + accuracy) # + ", Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
+      }
+    }
+
+    # Compute validation loss & accuracy for validation data every epoch
+    print("Logging validation loss & accuracy.")
+    probs_val = predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2,
+                        batch_size)
+    loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+    accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+
+    # Output results
+    print("Epoch: " + e + "/" + epochs + ", Val Loss: " + loss_val
+          + ", Val Accuracy: " + accuracy_val + ", lr: " + lr + ", mu " + mu)
+
+    # Checkpoint model
+    dir = checkpoint_dir + e + "/"
+    dummy = checkpoint(dir, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
+    str = "lr: " + lr + ", mu: " + mu + ", decay: " + decay + ", lambda: " + lambda
+          + ", batch_size: " + batch_size
+    name = dir + accuracy_val
+    write(str, name)
+
+    # Anneal momentum towards 0.999
+    mu = mu + (0.999 - mu)/(1+epochs-e)
+    # Decay learning rate
+    lr = lr * decay
+  }
+}
+
+checkpoint = function(string dir,
+                      matrix[double] Wc1, matrix[double] bc1,
+                      matrix[double] Wc2, matrix[double] bc2,
+                      matrix[double] Wc3, matrix[double] bc3,
+                      matrix[double] Wa1, matrix[double] ba1,
+                      matrix[double] Wa2, matrix[double] ba2) {
+  /*
+   * Save the model parameters.
+   *
+   * Inputs:
+   *  - dir: Directory in which to save model parameters.
+   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
+   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
+   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
+   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
+   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
+   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
+   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
+   *
+   * Outputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   */
+  write(Wc1, dir + "Wc1", format="binary")
+  write(bc1, dir + "bc1", format="binary")
+  write(Wc2, dir + "Wc2", format="binary")
+  write(bc2, dir + "bc2", format="binary")
+  write(Wc3, dir + "Wc3", format="binary")
+  write(bc3, dir + "bc3", format="binary")
+  write(Wa1, dir + "Wa1", format="binary")
+  write(ba1, dir + "ba1", format="binary")
+  write(Wa2, dir + "Wa2", format="binary")
+  write(ba2, dir + "ba2", format="binary")
+}
+
+predict = function(matrix[double] X, int C, int Hin, int Win,
+                   matrix[double] Wc1, matrix[double] bc1,
+                   matrix[double] Wc2, matrix[double] bc2,
+                   matrix[double] Wc3, matrix[double] bc3,
+                   matrix[double] Wa1, matrix[double] ba1,
+                   matrix[double] Wa2, matrix[double] ba2,
+                   int batch_size)
+    return (matrix[double] probs) {
+  /*
+   * Computes the class probability predictions of a convolutional
+   * net using the "LeNet" architecture.
+   *
+   * The input matrix, X, has N examples, each represented as a 3D
+   * volume unrolled into a single vector.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, C*Hin*Win).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   *  - Wc1: 1st conv layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).
+   *  - bc1: 1st conv layer biases vector, of shape (F1, 1).
+   *  - Wc2: 2nd conv layer weights (parameters) matrix, of shape (F2, F1*Hf*Wf).
+   *  - bc2: 2nd conv layer biases vector, of shape (F2, 1).
+   *  - Wc3: 3rd conv layer weights (parameters) matrix, of shape (F3, F2*Hf*Wf).
+   *  - bc3: 3rd conv layer biases vector, of shape (F3, 1).
+   *  - Wa1: 1st affine layer weights (parameters) matrix, of shape (F3*(Hin/2^3)*(Win/2^1), N1).
+   *  - ba1: 1st affine layer biases vector, of shape (1, N1).
+   *  - Wa2: 2nd affine layer weights (parameters) matrix, of shape (N1, K).
+   *  - ba2: 2nd affine layer biases vector, of shape (1, K).
+   *  - batch_size: Size of mini-batches to train on.
+   *
+   * Outputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   */
+  N = nrow(X)
+
+  # Network:
+  # conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> conv3 -> relu3 -> pool3
+  #  -> affine1 -> relu1 -> affine2 -> softmax
+  Hf = 3  # filter height
+  Wf = 3  # filter width
+  stride = 1
+  pad = 1  # For same dimensions, (Hf - stride) / 2
+
+  F1 = nrow(Wc1)  # num conv filters in conv1
+  F2 = nrow(Wc2)  # num conv filters in conv2
+  F3 = nrow(Wc3)  # num conv filters in conv3
+  N1 = ncol(Wa1)  # num nodes in affine1
+  K = ncol(Wa2)  # num nodes in affine2, equal to number of target dimensions (num classes)
+
+  # TODO: Implement fast, distributed conv & max pooling operators so that predictions
+  # can be computed in a full-batch, distributed manner.  Alternatively, improve `parfor`
+  # so that it can be efficiently used for parallel predictions.
+  ## Compute forward pass
+  ### conv layer 1: conv1 -> relu1 -> pool1
+  #[outc1, Houtc1, Woutc1] = conv2d::forward(X, Wc1, bc1, C, Hin, Win, Hf, Wf, stride, stride,
+  #                                          pad, pad)
+  #outc1r = relu::forward(outc1)
+  #[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### conv layer 2: conv2 -> relu2 -> pool2
+  #[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+  #                                          stride, stride, pad, pad)
+  #outc2r = relu::forward(outc2)
+  #[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### conv layer 3: conv3 -> relu3 -> pool3
+  #[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+  #                                          stride, stride, pad, pad)
+  #outc3r = relu::forward(outc3)
+  #[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+  #                                                 strideh=2, stridew=2, 0, 0)
+  ### affine layer 1:  affine1 -> relu1 -> dropout
+  #outa1 = affine::forward(outc3p, Wa1, ba1)
+  #outa1r = relu::forward(outa1)
+  ##[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+  ### affine layer 2:  affine2 -> softmax
+  #outa2 = affine::forward(outa1r, Wa2, ba2)
+  #probs = softmax::forward(outa2)
+
+  # Compute predictions over mini-batches
+  probs = matrix(0, rows=N, cols=K)
+  #batch_size = 32
+  #iters = ceil(N / batch_size)
+  # TODO: `parfor` should work here, possibly as an alternative to distributed predictions.
+  #for (i in 1:iters) {
+  #parfor (i in 1:iters, check=0, mode=REMOTE_SPARK, opt=CONSTRAINED) {
+  #parfor (i in 1:iters, check=0) {  # complains about `probs` as an inter-loop dependency
+  parfor (i in 1:N, check=0, mode=REMOTE_SPARK, opt=CONSTRAINED) {
+  #parfor (i in 1:N) {
+    ## Get next batch
+    #beg = ((i-1) * batch_size) %% N + 1
+    #end = min(N, beg + batch_size - 1)
+    #X_batch = X[beg:end,]
+    #X_batch = X[i,]
+
+    # Compute forward pass
+    ## conv layer 1: conv1 -> relu1 -> pool1
+    [outc1, Houtc1, Woutc1] = conv2d::forward(X[i,], Wc1, bc1, C, Hin, Win, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc1r = relu::forward(outc1)
+    [outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## conv layer 2: conv2 -> relu2 -> pool2
+    [outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc2r = relu::forward(outc2)
+    [outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## conv layer 3: conv3 -> relu3 -> pool3
+    [outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,
+                                              stride, stride, pad, pad)
+    outc3r = relu::forward(outc3)
+    [outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,
+                                                     strideh=2, stridew=2, 0, 0)
+    ## affine layer 1:  affine1 -> relu1 -> dropout
+    outa1 = affine::forward(outc3p, Wa1, ba1)
+    outa1r = relu::forward(outa1)
+    #[outa1d, maskad1] = dropout::forward(outa1r, 0.5, -1)
+    ## affine layer 2:  affine2 -> softmax
+    outa2 = affine::forward(outa1r, Wa2, ba2)
+    probs_batch = softmax::forward(outa2)
+
+    # Store predictions
+    #probs[beg:end,] = probs_batch
+    probs[i,] = probs_batch
+  }
+}
+
+eval = function(matrix[double] probs, matrix[double] Y)
+    return (double loss, double accuracy) {
+  /*
+   * Evaluates a convolutional net using the "LeNet" architecture.
+   *
+   * The probs matrix contains the class probability predictions
+   * of K classes over N examples.  The targets, Y, have K classes,
+   * and are one-hot encoded.
+   *
+   * Inputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   *  - Y: Target matrix, of shape (N,
+   *
+   * Outputs:
+   *  - loss: Scalar loss, of shape (1).
+   *  - accuracy: Scalar accuracy, of shape (1).
+   */
+  # Compute loss & accuracy
+  loss = cross_entropy_loss::forward(probs, Y)
+  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
+  accuracy = mean(correct_pred)
+}
+
+generate_dummy_data = function(int N)
+    return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {
+  /*
+   * Generate a dummy dataset similar to the breast cancer dataset.
+   *
+   * Inputs:
+   *  - N: Number of examples to generate.
+   *
+   * Outputs:
+   *  - X: Input data matrix, of shape (N, D).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   */
+  # Generate dummy input data
+  #N = 1024  # num examples
+  C = 3  # num input channels
+  Hin = 256  # input height
+  Win = 256  # input width
+  K = 3  # num target classes
+  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
+  classes = round(rand(rows=N, cols=1, min=1, max=K, pdf="uniform"))
+  Y = table(seq(1, N), classes, N, K)  # one-hot encoding
+}
+

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/breastcancer/input_data.py
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/breastcancer/input_data.py b/projects/breast_cancer/breastcancer/input_data.py
new file mode 100644
index 0000000..6e65a3e
--- /dev/null
+++ b/projects/breast_cancer/breastcancer/input_data.py
@@ -0,0 +1,229 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+"""
+Data utilities for the TUPAC16 breast cancer project.
+
+This is early, experimental code.
+
+TODO: Cleanup & add proper comments to all functions.
+"""
+import multiprocessing as mp
+import os
+import threading
+
+import numpy as np
+import py4j
+
+
+# Utils for reading data
+
+def compute_channel_means(rdd, channels, size):
+  """Compute the means of each color channel across the dataset."""
+  # TODO: Replace this with pyspark.ml.feature.VectorSlicer
+  # to cut vector into separate channel vectors, then grab the mean
+  # of those new columns, all using DataFrame functions, rather than
+  # RDD functions.
+  # from pyspark.ml.linalg import VectorUDT
+  # from pyspark.sql.functions import udf
+  # from pyspark.sql import functions as F
+  # as_ml = udf(lambda v: v.asML() if v is not None else None, VectorUDT())
+  # slicers[0].transform(train_df.withColumn("sample", as_ml("sample"))).select(F.avg("ch0"))
+  # slicers = [VectorSlicer(inputCol="sample", outputCol="ch{}".format(c), indices=range(c*pixels, c*pixels + pixels)) for c in range(CHANNELS)]
+  def helper(x):
+    x = x.sample.values
+    x = np.array(x)
+    x = (x.reshape((-1,channels,size,size))  # shape (N,C,H,W)
+              .transpose((0,2,3,1))  # shape (N,H,W,C)
+              .astype(np.float32))
+    mu = np.mean(x, axis=(0,1,2))
+    return mu
+
+  means = rdd.map(helper).collect()
+  means = np.array(means)
+  means = np.mean(means, axis=0)
+  return means
+
+
+def gen_class_weights(df):
+  """Generate class weights to even out the class distribution during training."""
+  class_counts_df = df.select("tumor_score").groupBy("tumor_score").count()
+  class_counts = {row["tumor_score"]:row["count"] for row in class_counts_df.collect()}
+  max_count = max(class_counts.values())
+  class_weights = {k-1:max_count/v for k,v in class_counts.items()}
+  return class_weights
+
+
+def read_data(spark_session, filename_template, sample_size, channels, sample_prob, normalize_class_distribution, seed):
+  """Read and return training & validation Spark DataFrames."""
+  # TODO: Clean this function up!!!
+  assert channels in (1, 3)
+  grayscale = False if channels == 3 else True
+
+  # Sample (Optional)
+  if sample_prob < 1:
+    try:
+      # Ex: `train_0.01_sample_256.parquet`
+      sampled_filename_template = filename_template.format("{}_sample_".format(sample_prob), sample_size, "_grayscale" if grayscale else "")
+      filename = os.path.join("data", sampled_filename_template)
+      df = spark_session.read.load(filename)
+    except:  # Pre-sampled DataFrame not available
+      filename = os.path.join("data", filename_template.format("", sample_size, "_grayscale" if grayscale else ""))
+      df = spark_session.read.load(filename)
+      p = sample_prob  # sample percentage
+      if normalize_class_distribution:
+        # stratified sample with even class proportions
+        n = df.count()  # num examples
+        K = 3  # num classes
+        s = p * n  # num examples in p% sample, as a fraction
+        s_k = s / K  # num examples per class in evenly-distributed p% sample, as fraction
+        class_counts_df = df.select("tumor_score").groupBy("tumor_score").count()
+        class_counts = {row["tumor_score"]:row["count"] for row in class_counts_df.collect()}
+        ps = {k:s_k/v for k,v in class_counts.items()}
+        df = df.sampleBy("tumor_score", fractions=ps, seed=seed)
+      else:
+        # stratified sample maintaining the original class proportions
+        df = df.sampleBy("tumor_score", fractions={1: p, 2: p, 3: p}, seed=seed)
+      # TODO: Determine if coalesce actually provides a perf benefit on Spark 2.x
+      #train_df.cache(), val_df.cache()  # cache here, or coalesce will hang
+      # tc = train_df.count()
+      # vc = val_df.count()
+      #
+      # # Reduce num partitions to ideal size (~128 MB/partition, determined empirically)
+      # current_tr_parts = train_df.rdd.getNumPartitions()
+      # current_val_parts = train_df.rdd.getNumPartitions()
+      # ex_mb = sample_size * sample_size * channels * 8 / 1024 / 1024  # size of one example in MB
+      # ideal_part_size_mb = 128  # 128 MB partitions sizes are empirically ideal
+      # ideal_exs_per_part = round(ideal_part_size_mb / ex_mb)
+      # tr_parts = round(tc / ideal_exs_per_part)
+      # val_parts = round(vc / ideal_exs_per_part)
+      # if current_tr_parts > tr_parts:
+      #   train_df = train_df.coalesce(tr_parts)
+      # if current_val_parts > val_parts:
+      #   val_df = val_df.coalesce(val_parts)
+      # train_df.cache(), val_df.cache()
+  else:
+    # Read in data
+    filename = os.path.join("data", filename_template.format("", sample_size, "_grayscale" if grayscale else ""))
+    df = spark_session.read.load(filename)
+
+  return df
+
+
+def read_train_data(spark_session, sample_size, channels, sample_prob=1, normalize_class_distribution=False, seed=42):
+  """Read training Spark DataFrame."""
+  filename = "train_{}{}{}_updated.parquet"
+  train_df = read_data(spark_session, filename, sample_size, channels, sample_prob, normalize_class_distribution, seed)
+  return train_df
+
+
+def read_val_data(spark_session, sample_size, channels, sample_prob=1, normalize_class_distribution=False, seed=42):
+  """Read validation Spark DataFrame."""
+  filename = "val_{}{}{}_updated.parquet"
+  train_df = read_data(spark_session, filename, sample_size, channels, sample_prob, normalize_class_distribution, seed)
+  return train_df
+
+
+# Utils for creating asynchronous queuing batch generators
+# TODO: Add comments to these functions
+
+def fill_partition_num_queue(partition_num_queue, num_partitions, stop_event):
+  partition_num_queue.cancel_join_thread()
+  while not stop_event.is_set():
+    for i in range(num_partitions):
+      partition_num_queue.put(i)
+
+
+def fill_partition_queue(partition_queue, partition_num_queue, rdd, stop_event):
+  partition_queue.cancel_join_thread()
+  while not stop_event.is_set():
+    # py4j has some issues with imports with first starting.
+    try:
+      partition_num = partition_num_queue.get()
+      partition = rdd.context.runJob(rdd, lambda x: x, [partition_num])
+      partition_queue.put(partition)
+    except (AttributeError, py4j.protocol.Py4JError, Exception) as err:
+      print("error: {}".format(err))
+
+
+def fill_row_queue(row_queue, partition_queue, stop_event):
+  row_queue.cancel_join_thread()
+  while not stop_event.is_set():
+    rows = partition_queue.get()
+    for row in rows:
+      row_queue.put(row)
+
+
+def gen_batch(row_queue, batch_size):
+  while True:
+    features = []
+    labels = []
+    for i in range(batch_size):
+      row = row_queue.get()
+      features.append(row.sample.values)
+      labels.append(row.tumor_score)
+    x_batch = np.array(features).astype(np.uint8)
+    y_batch = np.array(labels).astype(np.uint8)
+    yield x_batch, y_batch
+
+
+def create_batch_generator(
+    rdd, batch_size=32, num_partition_threads=32, num_row_processes=16,
+    partition_num_queue_size=128, partition_queue_size=16, row_queue_size=2048):
+  """
+  Create a  multiprocess batch generator.
+
+  This creates a generator that uses processes and threads to create a
+  pipeline that asynchronously fetches data from Spark, filling a set
+  of queues, while yielding batches.  The goal here is to amortize the
+  time needed to fetch data from Spark so that downstream consumers
+  are saturated.
+  """
+  #rdd.cache()
+  partition_num_queue = mp.Queue(partition_num_queue_size)
+  partition_queue = mp.Queue(partition_queue_size)
+  row_queue = mp.Queue(row_queue_size)
+
+  num_partitions = rdd.getNumPartitions()
+  stop_event = mp.Event()
+
+  partition_num_process = mp.Process(target=fill_partition_num_queue, args=(partition_num_queue, num_partitions, stop_event), daemon=True)
+  partition_threads = [threading.Thread(target=fill_partition_queue, args=(partition_queue, partition_num_queue, rdd, stop_event), daemon=True) for _ in range(num_partition_threads)]
+  row_processes = [mp.Process(target=fill_row_queue, args=(row_queue, partition_queue, stop_event), daemon=True) for _ in range(num_row_processes)]
+
+  ps = [partition_num_process] + row_processes + partition_threads
+  queues = [partition_num_queue, partition_queue, row_queue]
+
+  for p in ps:
+    p.start()
+
+  generator = gen_batch(row_queue, batch_size)
+  return generator, ps, queues, stop_event
+
+
+def stop(processes, stop_event):
+  """Stop queuing processes."""
+  stop_event.set()
+  for p in processes:
+    if isinstance(p, mp.Process):
+      p.terminate()
+  mp.active_children()  # Use to join the killed processes above.
+

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/breastcancer/softmax_clf.dml
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/breastcancer/softmax_clf.dml b/projects/breast_cancer/breastcancer/softmax_clf.dml
new file mode 100644
index 0000000..35fd545
--- /dev/null
+++ b/projects/breast_cancer/breastcancer/softmax_clf.dml
@@ -0,0 +1,207 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+/*
+ * Breast Cancer Softmax Model
+ */
+# Imports
+source("nn/layers/affine.dml") as affine
+source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
+source("nn/layers/softmax.dml") as softmax
+#source("nn/optim/adam.dml") as adam
+source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
+
+train = function(matrix[double] X, matrix[double] Y,
+                 matrix[double] X_val, matrix[double] Y_val,
+                 double lr, double mu, double decay,
+                 int batch_size, int epochs, int log_interval)
+    return (matrix[double] W, matrix[double] b) {
+  /*
+   * Trains a softmax classifier.
+   *
+   * The input matrix, X, has N examples, each with D features.
+   * The targets, Y, have K classes, and are one-hot encoded.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, D).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - X_val: Input validation data matrix, of shape (N, C*Hin*Win).
+   *  - Y_val: Target validation matrix, of shape (N, K).
+   *  - lr: Learning rate.
+   *  - mu: Momentum value.
+   *      Typical values are in the range of [0.5, 0.99], usually
+   *      started at the lower end and annealed towards the higher end.
+   *  - decay: Learning rate decay rate.
+   *  - batch_size: Size of mini-batches to train on.
+   *  - epochs: Total number of full training loops over the full data set.
+   *  - log_interval: Interval, in iterations, between log outputs.
+   *
+   * Outputs:
+   *  - W: Weights (parameters) matrix, of shape (D, K).
+   *  - b: Biases vector, of shape (1, K).
+   */
+  N = nrow(Y)  # num examples
+  D = ncol(X)  # num features
+  K = ncol(Y)  # num classes
+
+  # Create softmax classifier:
+  # affine -> softmax
+  [W, b] = affine::init(D, K)
+  W = W / sqrt(2.0/(D)) * sqrt(1/(D))
+
+  # Initialize SGD w/ Nesterov momentum optimizer
+  vW = sgd_nesterov::init(W)  # optimizer momentum state for W
+  vb = sgd_nesterov::init(b)  # optimizer momentum state for b
+  #[mW, vW] = adam::init(W)  # optimizer 1st & 2nd moment state for W
+  #[mb, vb] = adam::init(b)  # optimizer 1st & 2nd moment state for b
+
+  # Starting validation loss & accuracy
+  probs_val = predict(X_val, W, b)
+  loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+  accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+  # Output results
+  print("Start: Val Loss: " + loss_val + ", Val Accuracy: " + accuracy_val)
+
+  # Optimize
+  print("Starting optimization")
+  iters = ceil(N / batch_size)
+  for (e in 1:epochs) {
+    for(i in 1:iters) {
+      # Get next batch
+      beg = ((i-1) * batch_size) %% N + 1
+      end = min(N, beg + batch_size - 1)
+      #print("Epoch: " + e + ", Iter: " + i + ", X[" + beg + ":" + end + ",]")
+      X_batch = X[beg:end,]
+      Y_batch = Y[beg:end,]
+
+      # Compute forward pass
+      ## affine & softmax:
+      out = affine::forward(X_batch, W, b)
+      probs = softmax::forward(out)
+
+      # Compute backward pass
+      ## loss:
+      dprobs = cross_entropy_loss::backward(probs, Y_batch)
+      ## affine & softmax:
+      dout = softmax::backward(dprobs, out)
+      [dX_batch, dW, db] = affine::backward(dout, X_batch, W, b)
+
+      # Optimize with SGD w/ Nesterov momentum
+      [W, vW] = sgd_nesterov::update(W, dW, lr, mu, vW)
+      [b, vb] = sgd_nesterov::update(b, db, lr, mu, vb)
+      #[W, mW, vW] = adam::update(W, dW, lr, 0.9, 0.999, 1e-8, e*i-1, mW, vW)
+      #[b, mb, vb] = adam::update(b, db, lr, 0.9, 0.999, 1e-8, e*i-1, mb, vb)
+
+      # Compute loss & accuracy for training & validation data every `log_interval` iterations.
+      if (i %% log_interval == 0) {
+        #print("Eval time! - i: " + i)
+        # Compute training loss & accuracy
+        loss = cross_entropy_loss::forward(probs, Y_batch)
+        accuracy = mean(rowIndexMax(probs) == rowIndexMax(Y_batch))
+
+        # Compute validation loss & accuracy
+        probs_val = predict(X_val, W, b)
+        loss_val = cross_entropy_loss::forward(probs_val, Y_val)
+        accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))
+
+        # Output results
+        print("Epoch: " + e + "/" + epochs + ", Iter: " + i + "/" + iters
+              + ", Train Loss: " + loss + ", Train Accuracy: " + accuracy + ", Val Loss: "
+              + loss_val + ", Val Accuracy: " + accuracy_val + ", lr: " + lr + ", mu " + mu)
+      }
+    }
+    # Anneal momentum towards 0.999
+    mu = mu + (0.999 - mu)/(1+epochs-e)
+    # Decay learning rate
+    lr = lr * decay
+  }
+}
+
+predict = function(matrix[double] X, matrix[double] W, matrix[double] b)
+    return (matrix[double] probs) {
+  /*
+   * Computes the class probability predictions of a softmax classifier.
+   *
+   * The input matrix, X, has N examples, each with D features.
+   *
+   * Inputs:
+   *  - X: Input data matrix, of shape (N, D).
+   *  - W: Weights (parameters) matrix, of shape (D, K).
+   *  - b: Biases vector, of shape (1, K).
+   *
+   * Outputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   */
+  N = nrow(X)  # num examples
+  K = ncol(W)  # num classes
+
+  # Compute forward pass
+  ## affine & softmax:
+  out = affine::forward(X, W, b)
+  probs = softmax::forward(out)
+}
+
+eval = function(matrix[double] probs, matrix[double] Y)
+    return (double loss, double accuracy) {
+  /*
+   * Evaluates a softmax classifier.
+   *
+   * The probs matrix contains the class probability predictions
+   * of K classes over N examples.  The targets, Y, have K classes,
+   * and are one-hot encoded.
+   *
+   * Inputs:
+   *  - probs: Class probabilities, of shape (N, K).
+   *  - Y: Target matrix, of shape (N, K).
+   *
+   * Outputs:
+   *  - loss: Scalar loss, of shape (1).
+   *  - accuracy: Scalar accuracy, of shape (1).
+   */
+  # Compute loss & accuracy
+  loss = cross_entropy_loss::forward(probs, Y)
+  correct_pred = rowIndexMax(probs) == rowIndexMax(Y)
+  accuracy = mean(correct_pred)
+}
+
+generate_dummy_data = function()
+    return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {
+  /*
+   * Generate a dummy dataset similar to the breast cancer dataset.
+   *
+   * Outputs:
+   *  - X: Input data matrix, of shape (N, D).
+   *  - Y: Target matrix, of shape (N, K).
+   *  - C: Number of input channels (dimensionality of input depth).
+   *  - Hin: Input height.
+   *  - Win: Input width.
+   */
+  # Generate dummy input data
+  N = 1024  # num examples
+  C = 3  # num input channels
+  Hin = 256  # input height
+  Win = 256  # input width
+  T = 10  # num targets
+  X = rand(rows=N, cols=C*Hin*Win, pdf="normal")
+  classes = round(rand(rows=N, cols=1, min=1, max=T, pdf="uniform"))
+  Y = table(seq(1, N), classes)  # one-hot encoding
+}
+


[5/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

Posted by du...@apache.org.
[SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

This merges additional code from the experimental breast cancer branch
to the main repo, including Keras experiments, updates to the
preprocessing, shell script helpers, file reorganization, and
documentation improvements.

Closes #573.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/532da1bc
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/532da1bc
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/532da1bc

Branch: refs/heads/master
Commit: 532da1bc51fed65cd6c329b1c99c1926fe4cf2cd
Parents: 62b64b3
Author: Mike Dusenberry <mw...@us.ibm.com>
Authored: Mon Jul 17 17:18:46 2017 -0700
Committer: Mike Dusenberry <mw...@us.ibm.com>
Committed: Mon Jul 17 17:18:46 2017 -0700

----------------------------------------------------------------------
 pom.xml                                         |   5 +-
 .../MachineLearning-Keras-Eval.ipynb            | 859 +++++++++++++++++++
 .../MachineLearning-Keras-ResNet50.ipynb        | 717 ++++++++++++++++
 projects/breast_cancer/MachineLearning.ipynb    | 338 +++++---
 .../Preprocessing-Save-JPEGs.ipynb              | 610 +++++++++++++
 projects/breast_cancer/Preprocessing.ipynb      | 101 +--
 projects/breast_cancer/README.md                |  40 +-
 projects/breast_cancer/approach.svg             |   4 +
 projects/breast_cancer/bin/clean_spark.sh       |  26 +
 projects/breast_cancer/bin/monitor_gpu.sh       |  23 +
 .../breast_cancer/bin/remove_old_processes.sh   |  24 +
 projects/breast_cancer/bin/run_tensorboard.sh   |  23 +
 projects/breast_cancer/breastcancer/convnet.dml | 495 +++++++++++
 .../breastcancer/convnet_distrib_sgd.dml        | 592 +++++++++++++
 .../breast_cancer/breastcancer/input_data.py    | 229 +++++
 .../breast_cancer/breastcancer/softmax_clf.dml  | 207 +++++
 projects/breast_cancer/convnet.dml              | 495 -----------
 projects/breast_cancer/hyperparam_tuning.dml    |   2 +-
 projects/breast_cancer/nn                       |   1 -
 projects/breast_cancer/preprocess.py            |   2 +-
 projects/breast_cancer/softmax_clf.dml          | 207 -----
 21 files changed, 4072 insertions(+), 928 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e3bf831..ee29fe2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -303,7 +303,7 @@
 							<outputDirectory>${basedir}/target/lib/hadoop/bin</outputDirectory>
 						</configuration>
 					</execution>
-					
+
 					<execution>
 						<id>copy-resources-filtered</id>
 						<phase>compile</phase>
@@ -344,7 +344,7 @@
 					</execution>
 				</executions>
 			</plugin>
-			
+
 			<plugin>
 			    <groupId>com.github.os72</groupId>
 			    <artifactId>protoc-jar-maven-plugin</artifactId>
@@ -870,6 +870,7 @@
 								<exclude>**/*.keep</exclude>
 								<exclude>**/target/**</exclude>
 								<exclude>**/README.md</exclude>
+ 								<exclude>**/*.svg</exclude>
 								<!-- Jupyter Notebooks -->
 								<exclude>**/*.ipynb</exclude>
 								<!-- Generated antlr files -->

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/MachineLearning-Keras-Eval.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/MachineLearning-Keras-Eval.ipynb b/projects/breast_cancer/MachineLearning-Keras-Eval.ipynb
new file mode 100644
index 0000000..c9d3e49
--- /dev/null
+++ b/projects/breast_cancer/MachineLearning-Keras-Eval.ipynb
@@ -0,0 +1,859 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Imports"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "%matplotlib inline\n",
+    "\n",
+    "import math\n",
+    "import multiprocessing as mp\n",
+    "import os\n",
+    "\n",
+    "import keras\n",
+    "import keras.backend as K\n",
+    "from keras.applications.resnet50 import ResNet50\n",
+    "from keras.callbacks import ModelCheckpoint, TensorBoard\n",
+    "from keras.initializers import VarianceScaling\n",
+    "from keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D, Input, Lambda, merge\n",
+    "from keras.models import Model, load_model\n",
+    "from keras.optimizers import SGD\n",
+    "# from keras.preprocessing.image import ImageDataGenerator\n",
+    "from keras.regularizers import l2\n",
+    "from keras.utils import to_categorical\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import pandas as pd\n",
+    "from PIL import Image\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# After move to Keras 2.0 API, need to check if this can still be used.\n",
+    "from preprocessing.image_eval import ImageDataGenerator  # multiprocessing ImageDataGenerator\n",
+    "\n",
+    "plt.rcParams['figure.figsize'] = (10, 10)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Settings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# NOTE: Need to update the following for each model\n",
+    "# 1. train & val data dirs\n",
+    "# 2. train & val data percentages\n",
+    "# 3. experiment directory\n",
+    "# 4. model file\n",
+    "# 5. preprocessing channel means"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n",
+    "size = 224\n",
+    "channels = 3\n",
+    "classes = 3\n",
+    "p = 0.01  # 0.01\n",
+    "val_p = 0.01  #0.01\n",
+    "num_gpus = 4\n",
+    "batch_size = 32 * num_gpus  # for 2 GPUs, 32/GPU has 1.2x systems speedup over 16/GPU\n",
+    "train_dir = \"train_updated_norm_v3\"\n",
+    "val_dir = \"val_updated_norm_v3\"\n",
+    "run = 13\n",
+    "# exp_dir = \"experiments/keras/resnet50-1%-4-gpu-128-batch-size-updated-norm-v3-data-1%-val-sanity/4\"\n",
+    "experiment_template = \"resnet50-{p}%-{num_gpus}-gpu-{batch_size}-batch-size-{train_dir}-data-{val_p}%-val-sanity/{run}\"\n",
+    "experiment = experiment_template.format(p=int(p*100), val_p=int(val_p*100), num_gpus=num_gpus,\n",
+    "                                        batch_size=batch_size, train_dir=train_dir, run=run)\n",
+    "model_file = \"0.38936_acc_0.27847_loss_model.hdf5\"\n",
+    "exp_dir = os.path.join(\"experiments\", \"keras\", experiment)\n",
+    "# experiment_name = model_file.replace(\"/\", \"_\")[:-5]\n",
+    "print(exp_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# os.makedirs(os.path.join(\"results\", experiment_name), exist_ok=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Load model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "model = load_model(os.path.join(exp_dir, model_file))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(model.summary())\n",
+    "print(model.get_layer(\"resnet50\").summary())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Visualize Model\n",
+    "from IPython.display import SVG\n",
+    "from keras.utils.vis_utils import model_to_dot\n",
+    "SVG(model_to_dot(model.get_layer(\"resnet50\")).create(prog='dot', format='svg'))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Note: previous `model` is already compiled and ready to go.\n",
+    "# However, it may have been built for multi-GPU training, so it\n",
+    "# would still require multiple parallel inputs at eval time.\n",
+    "# Even worse, the device settings will not be retained, so all\n",
+    "# towers would be run on one device.  To fix this, we can extract\n",
+    "# a single tower, rewrap in a multi-GPU block, and recompile.\n",
+    "\n",
+    "# Extract single tower\n",
+    "resnet50 = model.get_layer(\"resnet50\")\n",
+    "#model.save(\"resnet50-100%-4-gpu-128-batch-size-updated-norm-v3-data-1%-val-dropout_0_1.56-19_NO_GPU_TOWERS.hdf5\")\n",
+    "\n",
+    "# Multi-GPU exploitation via a linear combination of GPU loss functions.\n",
+    "ins = []\n",
+    "outs = []\n",
+    "for i in range(num_gpus):\n",
+    "  with tf.device(\"/gpu:{}\".format(i)):\n",
+    "    x = Input(shape=(size,size,channels))  # split of batch\n",
+    "    out = resnet50(x)  # run split on shared model\n",
+    "    ins.append(x)\n",
+    "    outs.append(out)\n",
+    "model = Model(inputs=ins, outputs=outs)  # multi-GPU, data-parallel model\n",
+    "\n",
+    "# Compile model.\n",
+    "metrics = ['accuracy']\n",
+    "model.compile(optimizer=\"sgd\", loss=\"categorical_crossentropy\",\n",
+    "              loss_weights=[1/num_gpus]*num_gpus, metrics=metrics)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# # Explore model\n",
+    "for x in model.inputs + model.outputs + model.metrics_tensors + model.targets:\n",
+    "  print(x.name, x.device)  # check that tensor devices exploit multi-GPU\n",
+    "\n",
+    "# print(model.summary())\n",
+    "\n",
+    "# print(resnet50.summary())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Create train & val data generators"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "train_save_dir = \"images/{stage}/{p}\".format(stage=train_dir, p=p)\n",
+    "val_save_dir = \"images/{stage}/{p}\".format(stage=val_dir, p=val_p)\n",
+    "print(train_save_dir, val_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def preprocess_input(x):\n",
+    "  \"\"\"\n",
+    "  Preprocesses a tensor encoding a batch of images.\n",
+    "\n",
+    "  Adapted from keras/applications/imagenet_utils.py\n",
+    "\n",
+    "  # Arguments\n",
+    "      x: input Numpy tensor, 4D of shape (N, H, W, C).\n",
+    "  # Returns\n",
+    "      Preprocessed tensor.\n",
+    "  \"\"\"\n",
+    "  # Zero-center by subtracting mean pixel value per channel\n",
+    "  # based on means from a 50%, evenly-distributed sample.\n",
+    "  # Means: updated-data norm v3, norm, no-norm original\n",
+    "  x[:, :, :, 0] -= 183.36777842  #189.54944625  #194.27633667\n",
+    "  x[:, :, :, 1] -= 138.81743141  #152.73427159  #145.3067627\n",
+    "  x[:, :, :, 2] -= 166.07406199  #176.89543273  #181.27861023 \n",
+    "  x = x[:, :, :, ::-1]  # 'RGB'->'BGR'\n",
+    "  return x\n",
+    "\n",
+    "# Multi-GPU exploitation\n",
+    "def split(x, num_splits):\n",
+    "  \"\"\"Split batch into K equal-sized batches.\"\"\"\n",
+    "  # Split tensors evenly, even if it means throwing away a few examples.\n",
+    "  samples = math.floor(len(x) / num_splits)\n",
+    "  x_splits = [arr[:samples] for arr in np.array_split(x, num_splits)]\n",
+    "  return x_splits\n",
+    "\n",
+    "def gen_preprocessed_batch(batch_generator, num_gpus):\n",
+    "  \"\"\"Yield preprocessed batches of x,y data.\"\"\"\n",
+    "#   for xs, ys in batch_generator:\n",
+    "#     yield split(preprocess_input(xs), num_gpus), split(ys, num_gpus)\n",
+    "#     yield split(xs, num_gpus), split(ys, num_gpus)  for tf aug experiments\n",
+    "  for xs, ys, filenames in batch_generator:\n",
+    "    yield split(preprocess_input(xs), num_gpus), split(ys, num_gpus), split(filenames, num_gpus)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create train & val image generators\n",
+    "try:\n",
+    "  # For interactive work, kill any existing pool.\n",
+    "  pool.terminate()\n",
+    "except:\n",
+    "  pass\n",
+    "pool = mp.Pool(processes=8)\n",
+    "train_datagen = ImageDataGenerator(pool=pool) #, horizontal_flip=True, vertical_flip=True,\n",
+    "#                                    rotation_range=180, shear_range=0.1, fill_mode='reflect')\n",
+    "val_datagen = ImageDataGenerator(pool=pool)\n",
+    "#train_datagen = ImageDataGenerator()\n",
+    "#val_datagen = ImageDataGenerator()\n",
+    "train_generator_orig = train_datagen.flow_from_directory(train_save_dir, batch_size=batch_size, target_size=(size, size))\n",
+    "val_generator_orig = val_datagen.flow_from_directory(val_save_dir, batch_size=batch_size, target_size=(size, size))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create train & val preprocessed generators\n",
+    "train_generator = gen_preprocessed_batch(train_generator_orig, num_gpus)\n",
+    "val_generator = gen_preprocessed_batch(val_generator_orig, num_gpus)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Get number of samples"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Number of examples.\n",
+    "tc = train_generator_orig.nb_sample\n",
+    "vc = val_generator_orig.nb_sample\n",
+    "#tc = train_generator_orig.samples\n",
+    "#vc = val_generator_orig.samples\n",
+    "\n",
+    "# Number of batches for multi-GPU exploitation.\n",
+    "# Note: Multi-GPU exploitation for data parallelism splits mini-batches\n",
+    "# into a set of micro-batches to be run in parallel on each GPU, but\n",
+    "# Keras will view the set of micro-batches as a single batch with\n",
+    "# multiple sources of inputs (i.e. Keras will view a set of examples\n",
+    "# being run in parallel as a single example with multiple sources of\n",
+    "# inputs).\n",
+    "train_batches = int(math.ceil(tc/batch_size))\n",
+    "val_batches = int(math.ceil(vc/batch_size))\n",
+    "\n",
+    "# Class counts (just for information)\n",
+    "train_class_counts = np.bincount(train_generator_orig.classes)\n",
+    "val_class_counts = np.bincount(val_generator_orig.classes)\n",
+    "\n",
+    "print(tc, vc)\n",
+    "print(train_batches, val_batches)\n",
+    "print(train_class_counts / np.sum(train_class_counts), val_class_counts / np.sum(val_class_counts))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Generate class weights for training"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class_counts = np.bincount(train_generator_orig.classes)\n",
+    "class_weights = dict(zip(range(classes), min(class_counts) / class_counts))\n",
+    "print(class_counts)\n",
+    "print(class_weights)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Plot random images (Optional)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def show_random_image(save_dir):\n",
+    "  c = np.random.randint(1, 4)\n",
+    "  class_dir = os.path.join(save_dir, str(c))\n",
+    "  files = os.listdir(class_dir)\n",
+    "  i = np.random.randint(0, len(files))\n",
+    "  fname = os.path.join(class_dir, files[i])\n",
+    "  print(fname)\n",
+    "  img = Image.open(fname)\n",
+    "  plt.imshow(img)\n",
+    "\n",
+    "# show_random_image(train_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def plot(gen):\n",
+    "  r, c = 6, 6\n",
+    "  fig, ax = plt.subplots(r, c)\n",
+    "  plt.setp(ax, xticks=[], yticks=[])\n",
+    "  plt.tight_layout()\n",
+    "  x, y, fname = next(gen)\n",
+    "  batch_size = x.shape[0]\n",
+    "  for i in range(r):\n",
+    "    for j in range(c):\n",
+    "      if i*c + j < batch_size:\n",
+    "        im = x[i*c + j].astype(np.uint8)\n",
+    "        if K.image_data_format() == 'channels_first':\n",
+    "          im = im.transpose(1,2,0)  # (C,H,W) -> (H,W,C)\n",
+    "        ax[i][j].imshow(im)\n",
+    "        ax[i][j].set_xlabel(y[i*c + j])\n",
+    "\n",
+    "plot(train_generator_orig)\n",
+    "plot(val_generator_orig)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Evaluate previous model checkpoint"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# NOTE: We could call the `model.evaluate*` methods,\n",
+    "# but that would not allow us to create contingency\n",
+    "# matrices.  Instead, we repeatedly loop over batches\n",
+    "# of data, collecting both the true labels and\n",
+    "# predictions.  Then, we can compute any metrics\n",
+    "# desired, including 3x3 contingency matrices."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# def extract_metrics(model, raw_metrics):\n",
+    "#   labeled_metrics = list(zip(model.metrics_names, raw_metrics))\n",
+    "#   losses = [v for k,v in labeled_metrics if k == \"loss\"]\n",
+    "#   accuracies = [v for k,v in labeled_metrics if k.endswith(\"acc\")]\n",
+    "#   loss = sum(losses) / num_gpus\n",
+    "#   acc = sum(accuracies) / num_gpus\n",
+    "#   metrics = {\"loss\": loss, \"acc\": acc}\n",
+    "#   return labeled_metrics, metrics\n",
+    "\n",
+    "# raw_metrics = model.evaluate_generator(val_generator, val_samples=32,\n",
+    "#                                        max_q_size=8, nb_worker=1, pickle_safe=False)\n",
+    "\n",
+    "# labeled_metrics, metrics = extract_metrics(model, raw_metrics)\n",
+    "# print(labeled_metrics)\n",
+    "# print(metrics)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Get predictions\n",
+    "for dataset in [(\"train\", p, tc, val_generator)]:  #, (\"val\", val_p, vc, val_generator)]:\n",
+    "  name, perc, count, gen = dataset\n",
+    "\n",
+    "  ys = []\n",
+    "  preds = []\n",
+    "  fnames = []\n",
+    "  batches = math.floor(count / batch_size)\n",
+    "  for i in range(batches):\n",
+    "    # Get batch.\n",
+    "#     x, y = next(gen)\n",
+    "    x, y, fname = next(gen)\n",
+    "\n",
+    "    # Get predictions\n",
+    "    pred = model.predict(x)\n",
+    "\n",
+    "    # Store y and predictions\n",
+    "    ys.extend(y)  # y is always a list of parallel batches, even if only 1 batch\n",
+    "    if isinstance(pred, list):\n",
+    "      preds.extend(pred)\n",
+    "    else:\n",
+    "      preds.append(pred)\n",
+    "    fnames.extend(fname)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "  # Create DataFrames\n",
+    "  y = np.concatenate(ys)\n",
+    "  pred = np.concatenate(preds)\n",
+    "  fname = np.concatenate(fnames)\n",
+    "  y_df = pd.DataFrame(y, columns=[1,2,3])\n",
+    "  pred_df = pd.DataFrame(pred, columns=[1,2,3])\n",
+    "  fname_df = pd.DataFrame(np.atleast_2d(fname).T, columns=[\"filenames\"])\n",
+    "\n",
+    "  # Create class, prediction, slide_num DataFrames\n",
+    "  y_class_df = y_df.idxmax(axis=1)\n",
+    "  pred_class_df = pred_df.idxmax(axis=1)\n",
+    "  y_class_df.name = \"actual\"\n",
+    "  pred_class_df.name = \"predicted\"\n",
+    "  slide_info_df = fname_df.filenames.str.extract('(?P<class>\\d)\\/\\d+_(?P<slide_num>\\d+)_\\d+.jpeg', expand=True)\n",
+    "  slide_info_df[\"class\"] = slide_info_df[\"class\"].astype(int)\n",
+    "  slide_info_df[\"slide_num\"] = slide_info_df[\"slide_num\"].astype(int)\n",
+    "  df = pd.concat([fname_df, slide_info_df, y_class_df, pred_class_df], axis=1)\n",
+    "  \n",
+    "  # sanity check\n",
+    "  assert np.allclose(df[\"class\"], df.actual)\n",
+    "  \n",
+    "  # Create Contingency matrix\n",
+    "  contingency_mat = pd.crosstab(df.actual, df.predicted)\n",
+    "\n",
+    "#   # Save DataFrames\n",
+    "#   y_df.to_csv(os.path.join(exp_dir, \"{model_ck}-{perc}%-{data}-y_df.csv\".format(model_ck=model_file[:-5], perc=100*perc, data=name)), header=True)\n",
+    "#   pred_df.to_csv(os.path.join(exp_dir, \"{model_ck}-{perc}%-{data}-pred_df.csv\".format(model_ck=model_file[:-5], perc=100*perc, data=name)), header=True)\n",
+    "#   df.to_csv(os.path.join(exp_dir, \"{model_ck}-{perc}%-{data}-df.csv\".format(model_ck=model_file[:-5], perc=100*perc, data=name)), header=True)\n",
+    "\n",
+    "#   # Save results\n",
+    "#   with open(os.path.join(exp_dir, \"{model_ck}-{perc}%-{data}-results.txt\".format(model_ck=model_file[:-5], perc=100*perc, data=name)), 'w') as f:\n",
+    "#     print(\"Dataset: {}\".format(name), file=f)\n",
+    "#     print(\"Number of samples: {}\".format(len(y_df)), file=f)\n",
+    "#     print(contingency_mat, file=f)\n",
+    "#     print(\"Accuracy: {}\".format(np.mean(np.equal(y_class, pred_class))), file=f)\n",
+    "  print(\"Number of samples: {}\".format(len(y_df)))\n",
+    "  print(contingency_mat)\n",
+    "  print(\"Accuracy: {}\".format(np.mean(np.equal(y_class_df, pred_class_df))))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "len(y_df), len(pred_df), len(fname_df), len(df)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df2 = df.loc[:, [\"slide_num\", \"actual\", \"predicted\"]]\n",
+    "df2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df3 = df2.groupby(\"slide_num\").mean()\n",
+    "df3[\"predicted_round\"] = df3.predicted.map(round)\n",
+    "df3"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "sum(df3.actual == df3.predicted_round) / len(df3)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pd.crosstab(df3.actual, df3.predicted_round)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "gb = df2.groupby([\"slide_num\"])  #, \"predicted\"])\n",
+    "gb.describe()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Read in predictions + true DataFrames and extract metrics"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# # Read DataFrames\n",
+    "# y_df = pd.read_csv(os.path.join(exp_dir, \"{}-y_df.csv\".format(model_file[:-5])), index_col=0)\n",
+    "# pred_df = pd.read_csv(os.path.join(exp_dir, \"{}-pred_df.csv\".format(model_file[:-5])), index_col=0)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# # Create Contingency matrix\n",
+    "# y_class = y_df.idxmax(axis=1)\n",
+    "# pred_class = pred_df.idxmax(axis=1)\n",
+    "# y_class.name = \"Actual\"\n",
+    "# pred_class.name = \"Predicted\"\n",
+    "# contingency_mat = pd.crosstab(y_class, pred_class)\n",
+    "\n",
+    "# print(\"Number of samples: {}\".format(len(y_df)))\n",
+    "# print(contingency_mat)\n",
+    "# print(\"Accuracy: {}\".format(np.mean(np.equal(y_class, pred_class))))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# # --- Alternate approach with NumPy arrays only\n",
+    "# y_c = np.argmax(y, axis=1) + 1\n",
+    "# pred_c = np.argmax(pred, axis=1) + 1\n",
+    "# y_actu = pd.Series(y_c, name=\"Actual\")\n",
+    "# y_pred = pd.Series(pred_c, name=\"Predicted\")\n",
+    "# contingency_mat = pd.crosstab(y_actu, y_pred)\n",
+    "\n",
+    "# print(\"Number of samples: {}\".format(len(y_c)))\n",
+    "# print(contingency_mat)\n",
+    "# print(\"Accuracy: {}\".format(np.mean(np.equal(y_c, pred_c))))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Sample images + predictions & write to disk"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# path_template = os.path.join(\"visualize\", \"{dataset}\", \"Pred_{pred}-Actual_{actual}\")\n",
+    "# for dataset in [\"train\", \"val\"]:\n",
+    "#   for i in range(3):\n",
+    "#     for j in range(3):\n",
+    "#       os.makedirs(path_template.format(dataset=dataset, pred=i+1, actual=j+1), exist_ok=True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "# filename_template = os.path.join(path_template, \"{hash}.jpeg\")\n",
+    "# batches = 8\n",
+    "\n",
+    "# for dataset in [(\"train\", train_generator_orig), (\"val\", val_generator_orig)]:\n",
+    "#   name, gen = dataset\n",
+    "#   print(name)\n",
+    "  \n",
+    "#   for i in range(batches):\n",
+    "#     # Get batch.\n",
+    "#     x_orig, y_orig = next(gen)\n",
+    "#     x = preprocess_input(np.copy(x_orig))\n",
+    "#     y = y_orig\n",
+    "\n",
+    "#     # Get predictions\n",
+    "#     raw_preds = model.predict(x)\n",
+    "#     raw_metrics = model.evaluate(x, y)\n",
+    "#     labeled_metrics, metrics = extract_metrics(model, raw_metrics)\n",
+    "\n",
+    "#     # Create contingency matrix\n",
+    "#     y = np.argmax(y, axis=1)+1\n",
+    "#     preds = np.argmax(raw_preds, axis=1)+1\n",
+    "#     y_actu = pd.Series(y, name=\"Actual\")\n",
+    "#     y_pred = pd.Series(preds, name=\"Predicted\")\n",
+    "#     contingency_mat = pd.crosstab(y_actu, y_pred)\n",
+    "\n",
+    "# #     # Output images in directories based on misclassification.\n",
+    "# #     def plot(x, y):\n",
+    "# #       r, c = 6, 6\n",
+    "# #       fig, ax = plt.subplots(r, c)\n",
+    "# #       plt.setp(ax, xticks=[], yticks=[])\n",
+    "# #       plt.tight_layout()\n",
+    "# #       batch_size = x.shape[0]\n",
+    "# #       for i in range(r):\n",
+    "# #         for j in range(c):\n",
+    "# #           if i*c + j < batch_size:\n",
+    "# #             ax[i][j].imshow(x[i*c + j].astype(np.uint8))\n",
+    "# #             ax[i][j].set_xlabel(\"{preds}-{y}\".format(y=y[i*c + j], preds=preds[i*c + j]))\n",
+    "\n",
+    "# #     plot(x_orig, y)\n",
+    "# #     plt.show()\n",
+    "\n",
+    "#     for n in range(x_orig.shape[0]):\n",
+    "#       img = Image.fromarray(x_orig[n].astype(np.uint8), 'RGB')\n",
+    "#       filename = filename_template.format(dataset=name, pred=preds[n], actual=y[n], hash=np.random.randint(1e6))\n",
+    "#       img.save(filename)\n",
+    "\n",
+    "#     print(contingency_mat)\n",
+    "#     print(np.mean(y==preds))\n",
+    "#     print(labeled_metrics)\n",
+    "#     print(metrics)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Predict"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "x, label, _ = (next(train_generator_orig))\n",
+    "Image.fromarray((x[0]).astype(np.uint8))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "preds = resnet50.predict(preprocess_input(x[0].reshape(1, 224, 224, 3)))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"Actual: {}\".format(label[0]))\n",
+    "print(\"Pred:   {}\".format(preds[0]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Cleanup"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Stop processes cleanly.  Otherwise, zombie processes will\n",
+    "# persist and hold onto GPU memory.\n",
+    "try:\n",
+    "    pool.terminate()\n",
+    "except:\n",
+    "    pass\n",
+    "for p in mp.active_children():\n",
+    "  p.terminate()\n",
+    "mp.active_children()"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb b/projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb
new file mode 100644
index 0000000..331b666
--- /dev/null
+++ b/projects/breast_cancer/MachineLearning-Keras-ResNet50.ipynb
@@ -0,0 +1,717 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Imports"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "%matplotlib inline\n",
+    "\n",
+    "import math\n",
+    "import multiprocessing as mp\n",
+    "import os\n",
+    "\n",
+    "import keras\n",
+    "import keras.backend as K\n",
+    "from keras.applications.resnet50 import ResNet50\n",
+    "from keras.callbacks import ModelCheckpoint, TensorBoard\n",
+    "from keras.initializers import VarianceScaling\n",
+    "from keras.layers import Dense, Dropout, Flatten, GlobalAveragePooling2D, Input, Lambda, merge\n",
+    "from keras.models import Model, load_model\n",
+    "from keras.optimizers import SGD\n",
+    "from keras.preprocessing.image import ImageDataGenerator\n",
+    "from keras.regularizers import l2\n",
+    "from keras.utils import to_categorical\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "import pandas as pd\n",
+    "from PIL import Image\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# After move to Keras 2.0 API, need to check if this can still be used.\n",
+    "# from preprocessing.image import ImageDataGenerator  # multiprocessing ImageDataGenerator\n",
+    "\n",
+    "plt.rcParams['figure.figsize'] = (10, 10)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Settings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n",
+    "size = 224\n",
+    "channels = 3\n",
+    "data_format = 'channels_last'  # channels_first is too slow, prob due to unnecessary conversions\n",
+    "classes = 3\n",
+    "p = 0.01\n",
+    "val_p = 0.01\n",
+    "num_gpus = 4\n",
+    "batch_size = 32 * num_gpus  # for 2 GPUs, 32/GPU has 1.2x systems speedup over 16/GPU\n",
+    "train_dir = \"train_updated_norm_v3\"\n",
+    "val_dir = \"val_updated_norm_v3\"\n",
+    "new_run = True\n",
+    "experiment_template = \"resnet50-{p}%-{num_gpus}-gpu-{batch_size}-batch-size-{train_dir}-data-{val_p}%-val-sanity\"\n",
+    "experiment = experiment_template.format(p=int(p*100), val_p=int(val_p*100), num_gpus=num_gpus,\n",
+    "                                        batch_size=batch_size, train_dir=train_dir)\n",
+    "print(experiment)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "K.set_image_data_format(data_format)\n",
+    "if data_format == 'channels_first':\n",
+    "  input_shape = (channels, size, size)\n",
+    "else:\n",
+    "  input_shape = (size, size, channels)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Setup experiment directory"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def get_run_dir(path, new_run):\n",
+    "  \"\"\"Create a directory for this training run.\"\"\"\n",
+    "  os.makedirs(path, exist_ok=True)\n",
+    "  num_experiments = len(os.listdir(path))\n",
+    "  if new_run:\n",
+    "    run = num_experiments  # run 0, 1, 2, ...\n",
+    "  else:\n",
+    "    run = min(0, num_experiments - 1)  # continue training\n",
+    "  run_dir = os.path.join(path, str(run))\n",
+    "  os.makedirs(run_dir, exist_ok=True)\n",
+    "  return run_dir\n",
+    "\n",
+    "def get_experiment_dir(experiment, new_run):\n",
+    "  \"\"\"Create an experiment directory for this experiment.\"\"\"\n",
+    "  base_dir = os.path.join(\"experiments\", \"keras\", experiment)\n",
+    "  exp_dir = get_run_dir(base_dir, new_run)\n",
+    "  return exp_dir\n",
+    "\n",
+    "exp_dir = get_experiment_dir(experiment, new_run=new_run)\n",
+    "print(exp_dir)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Create train & val data generators"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def preprocess_input(x):\n",
+    "  \"\"\"\n",
+    "  Preprocesses a tensor encoding a batch of images.\n",
+    "\n",
+    "  Adapted from keras/applications/imagenet_utils.py\n",
+    "\n",
+    "  # Arguments\n",
+    "      x: input Numpy tensor, 4D of shape (N, H, W, C).\n",
+    "  # Returns\n",
+    "      Preprocessed tensor.\n",
+    "  \"\"\"\n",
+    "  # Zero-center by subtracting mean pixel value per channel\n",
+    "  # based on means from a 50%, evenly-distributed sample.\n",
+    "  # Means: updated-data norm v3, norm, no-norm original\n",
+    "  x[:, :, :, 0] -= 183.36777842  #189.54944625  #194.27633667\n",
+    "  x[:, :, :, 1] -= 138.81743141  #152.73427159  #145.3067627\n",
+    "  x[:, :, :, 2] -= 166.07406199  #176.89543273  #181.27861023 \n",
+    "  x = x[:, :, :, ::-1]  # 'RGB'->'BGR' due to pretrained ResNet\n",
+    "  return x\n",
+    "\n",
+    "# Multi-GPU exploitation\n",
+    "def split(x, num_splits):\n",
+    "  \"\"\"Split batch into K equal-sized batches.\"\"\"\n",
+    "  # Split tensors evenly, even if it means throwing away a few examples.\n",
+    "  samples = math.floor(len(x) / num_splits)\n",
+    "  x_splits = [arr[:samples] for arr in np.array_split(x, num_splits)]\n",
+    "  return x_splits\n",
+    "\n",
+    "def gen_preprocessed_batch(batch_generator, num_gpus):\n",
+    "  \"\"\"Yield preprocessed batches of x,y data.\"\"\"\n",
+    "  for xs, ys in batch_generator:\n",
+    "    yield split(preprocess_input(xs), num_gpus), split(ys, num_gpus)\n",
+    "#     yield split(xs, num_gpus), split(ys, num_gpus)  # for tf aug experiments"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "K.image_data_format()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "train_save_dir = \"images/{stage}/{p}\".format(stage=train_dir, p=p)\n",
+    "val_save_dir = \"images/{stage}/{p}\".format(stage=val_dir, p=val_p)\n",
+    "print(train_save_dir, val_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create train & val image generators\n",
+    "#try:\n",
+    "#  # For interactive work, kill any existing pool.\n",
+    "#  pool.terminate()\n",
+    "#except:\n",
+    "#  pass\n",
+    "#pool = mp.Pool(processes=8)\n",
+    "#train_datagen = ImageDataGenerator(pool=pool, horizontal_flip=True, vertical_flip=True,\n",
+    "#                                   rotation_range=180, shear_range=0.1, fill_mode='reflect')\n",
+    "#val_datagen = ImageDataGenerator(pool=pool)\n",
+    "\n",
+    "train_datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True)  #, samplewise_center=True)\n",
+    "                                   #rotation_range=180, shear_range=0.1, fill_mode='reflect')\n",
+    "val_datagen = ImageDataGenerator()\n",
+    "train_generator_orig = train_datagen.flow_from_directory(train_save_dir, batch_size=batch_size, target_size=(size, size))\n",
+    "val_generator_orig = val_datagen.flow_from_directory(val_save_dir, batch_size=batch_size, target_size=(size, size))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Create train & val preprocessed generators\n",
+    "train_generator = gen_preprocessed_batch(train_generator_orig, num_gpus)\n",
+    "val_generator = gen_preprocessed_batch(val_generator_orig, num_gpus)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Get number of batches"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "# Number of examples.\n",
+    "tc = train_generator_orig.samples\n",
+    "vc = val_generator_orig.samples\n",
+    "\n",
+    "# Number of batches for multi-GPU exploitation.\n",
+    "# Note: Multi-GPU exploitation for data parallelism splits mini-batches\n",
+    "# into a set of micro-batches to be run in parallel on each GPU, but\n",
+    "# Keras will view the set of micro-batches as a single batch with\n",
+    "# multiple sources of inputs (i.e. Keras will view a set of examples\n",
+    "# being run in parallel as a single example with multiple sources of\n",
+    "# inputs).\n",
+    "train_batches = int(math.ceil(tc/batch_size))\n",
+    "val_batches = int(math.ceil(vc/batch_size))\n",
+    "\n",
+    "# Class counts (just for information)\n",
+    "train_class_counts = np.bincount(train_generator_orig.classes)\n",
+    "val_class_counts = np.bincount(val_generator_orig.classes)\n",
+    "\n",
+    "print(tc, vc)\n",
+    "print(train_batches, val_batches)\n",
+    "print(train_class_counts / np.sum(train_class_counts), val_class_counts / np.sum(val_class_counts))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Generate class weights for training"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "class_counts = np.bincount(train_generator_orig.classes)\n",
+    "class_weights = dict(zip(range(classes), min(class_counts) / class_counts))\n",
+    "print(class_counts)\n",
+    "print(class_weights)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Plot random images (Optional)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def show_random_image(save_dir):\n",
+    "  c = np.random.randint(1, 4)\n",
+    "  class_dir = os.path.join(save_dir, str(c))\n",
+    "  files = os.listdir(class_dir)\n",
+    "  i = np.random.randint(0, len(files))\n",
+    "  fname = os.path.join(class_dir, files[i])\n",
+    "  print(fname)\n",
+    "  img = Image.open(fname)\n",
+    "  plt.imshow(img)\n",
+    "\n",
+    "# show_random_image(train_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def plot(gen):\n",
+    "  r, c = 6, 6\n",
+    "  fig, ax = plt.subplots(r, c)\n",
+    "  plt.setp(ax, xticks=[], yticks=[])\n",
+    "  plt.tight_layout()\n",
+    "  x, y = next(gen)\n",
+    "  batch_size = x.shape[0]\n",
+    "  for i in range(r):\n",
+    "    for j in range(c):\n",
+    "      if i*c + j < batch_size:\n",
+    "        im = x[i*c + j].astype(np.uint8)\n",
+    "        if K.image_data_format() == 'channels_first':\n",
+    "          im = im.transpose(1,2,0)  # (C,H,W) -> (H,W,C)\n",
+    "        ax[i][j].imshow(im)\n",
+    "        ax[i][j].set_xlabel(y[i*c + j])\n",
+    "\n",
+    "plot(train_generator_orig)\n",
+    "plot(val_generator_orig)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "# Training\n",
+    "1. Setup ResNet50 pretrained model with new input & output layers.\n",
+    "2. Train new output layers (all others frozen).\n",
+    "3. Fine tune [some subset of the] original layers.\n",
+    "4. Profit."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Setup training metrics & callbacks"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Setup training metrics & callbacks\n",
+    "# Careful, TensorBoard callback could OOM with large validation set\n",
+    "# TODO: Add input images to TensorBoard output (maybe as a separate callback)\n",
+    "# TODO: Monitor size of input queues with callbacks\n",
+    "model_filename = os.path.join(exp_dir, \"{val_loss:.2f}-{epoch:02d}.hdf5\")\n",
+    "checkpointer = ModelCheckpoint(model_filename)\n",
+    "tensorboard = TensorBoard(log_dir=exp_dir, write_graph=False)\n",
+    "callbacks = [checkpointer, tensorboard]\n",
+    "metrics = ['accuracy'] #, fmeasure, precision, recall]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Setup ResNet50 model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "## Color augmentation\n",
+    "## TODO: Visualize this in TensorBoard with custom callback every ~100 iterations\n",
+    "#def preprocess(x):\n",
+    "#  # import these inside this function so that future model loads\n",
+    "#  # will not complain about `tf` not being defined\n",
+    "#  import tensorflow as tf\n",
+    "#  import keras.backend as K\n",
+    "#  \n",
+    "#  def augment(img):\n",
+    "#    img = tf.image.random_brightness(img, max_delta=64/255)\n",
+    "#    img = tf.image.random_saturation(img, lower=0, upper=0.25)\n",
+    "#    img = tf.image.random_hue(img, max_delta=0.04)\n",
+    "#    img = tf.image.random_contrast(img, lower=0, upper=0.75)\n",
+    "#    return img\n",
+    "#  \n",
+    "#  # Fix dimensions for tf.image ops\n",
+    "#  if K.image_data_format() == 'channels_first':\n",
+    "#    x = tf.transpose(x, [0,2,3,1])  # (N,C,H,W) -> (N,H,W,C)\n",
+    "#    \n",
+    "#  # Augment during training.\n",
+    "#  x = K.in_train_phase(tf.map_fn(augment, x, swap_memory=True), x)\n",
+    "#  \n",
+    "#  # Zero-center by subtracting mean pixel value per channel\n",
+    "#  # based on means from a 50%, evenly-distributed sample.\n",
+    "#  # Means: updated-data norm v3, norm, no-norm original\n",
+    "#  x = x - [183.36777842, 138.81743141, 166.07406199]\n",
+    "#  x = tf.reverse(x, axis=[-1])\n",
+    "#  \n",
+    "#  if K.image_data_format() == 'channels_first':\n",
+    "#    x = tf.transpose(x, [0,3,1,2])  # (N,H,W,C) -> (N,C,H,W)\n",
+    "#  return x"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "K.clear_session()\n",
+    "\n",
+    "# Create model by replacing classifier of ResNet50 model with new\n",
+    "# classifier specific to the breast cancer problem.\n",
+    "with tf.device(\"/cpu\"):\n",
+    "  inputs = Input(shape=input_shape)\n",
+    "  x = inputs\n",
+    "  #x = Lambda(preprocess)(x)\n",
+    "  resnet50_base = ResNet50(include_top=False, input_shape=input_shape, input_tensor=x)  #weights=None)\n",
+    "  x = Flatten()(resnet50_base.output)  # could also use GlobalAveragePooling2D since output is (None, 1, 1, 2048)\n",
+    "  x = Dropout(0.5)(x)\n",
+    "  # init Dense weights with Gaussian scaled by sqrt(1/fan_in)\n",
+    "  preds = Dense(classes, kernel_initializer=VarianceScaling(), activation=\"softmax\")(x)\n",
+    "#   resnet50 = Model(input=resnet50_base.input, output=preds, name=\"resnet50\")\n",
+    "  resnet50 = Model(inputs=inputs, outputs=preds, name=\"resnet50\")\n",
+    "\n",
+    "# Multi-GPU exploitation via a linear combination of GPU loss functions.\n",
+    "ins = []\n",
+    "outs = []\n",
+    "for i in range(num_gpus):\n",
+    "  with tf.device(\"/gpu:{}\".format(i)):\n",
+    "    x = Input(shape=input_shape)  # split of batch\n",
+    "    out = resnet50(x)  # run split on shared model\n",
+    "    ins.append(x)\n",
+    "    outs.append(out)\n",
+    "model = Model(inputs=ins, outputs=outs)  # multi-GPU, data-parallel model\n",
+    "\n",
+    "# Freeze all pre-trained ResNet layers.\n",
+    "for layer in resnet50_base.layers:\n",
+    "  layer.trainable = False\n",
+    "\n",
+    "# Compile model.\n",
+    "#optim = SGD(lr=0.1, momentum=0.9, decay=0.99, nesterov=True)\n",
+    "#optim = keras.optimizers.RMSprop(lr=0.05)\n",
+    "optim = keras.optimizers.Adam(lr=0.001)\n",
+    "model.compile(optimizer=optim, loss=\"categorical_crossentropy\",\n",
+    "              loss_weights=[1/num_gpus]*num_gpus, metrics=metrics)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Explore model\n",
+    "# for x in model.inputs + model.outputs + model.metrics_tensors + model.targets:\n",
+    "#   print(x.name, x.device)  # check that tensor devices exploit multi-GPU\n",
+    "\n",
+    "# for i, layer in enumerate(resnet50.layers):\n",
+    "#   print(i, layer.name, layer.input_shape, layer.output_shape)\n",
+    "\n",
+    "# print(model.summary())\n",
+    "print(resnet50.summary())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Visualize Model\n",
+    "from IPython.display import SVG\n",
+    "from keras.utils.vis_utils import model_to_dot\n",
+    "SVG(model_to_dot(resnet50).create(prog='dot', format='svg'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Train new softmax classifier"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Dual-GPU speedup: ~1.7-1.8x\n",
+    "# Keras device placement improvements (metrics, losses) (no val or callbacks, full model):\n",
+    "#   batch_size=32,  2 gpus, 100 iters, no keras changes: 128s, 108s, 107s\n",
+    "#   batch_size=32,  2 gpus, 100 iters, w/ keras changes: 94s, 75s, 75s\n",
+    "#   batch_size=32,  1 gpu,  100 iters, w/ keras changes: 148s, 133s, 133s\n",
+    "#   batch_size=64,  2 gpus,  50 iters, w/ keras changes: 93s, 74s, 75s\n",
+    "#   batch_size=128, 2 gpus,  25 iters, w/ keras changes: 90s, 73s, 74s\n",
+    "epochs = 4\n",
+    "hist1 = model.fit_generator(train_generator, steps_per_epoch=train_batches,\n",
+    "                            validation_data=val_generator, validation_steps=val_batches,\n",
+    "                            epochs=epochs, class_weight=class_weights, callbacks=callbacks) #,\n",
+    "                            #max_q_size=8, nb_worker=1, pickle_safe=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Fine-tune model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# Explore model\n",
+    "# for x in model.inputs + model.outputs + model.metrics_tensors + model.targets:\n",
+    "#   print(x.name, x.device)  # check that tensor devices exploit multi-GPU\n",
+    "\n",
+    "for i, layer in enumerate(resnet50_base.layers):\n",
+    "  print(i, layer.name, layer.input_shape, layer.output_shape)\n",
+    "\n",
+    "# print(model.summary())\n",
+    "# print(model.get_layer(\"resnet50\").summary())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Unfreeze some subset of the model and fine-tune by training slowly with low lr.\n",
+    "for layer in resnet50_base.layers[164:]:  #[154:]:  # unfreeze final 2 residual blocks + exit flow ([154:])\n",
+    "  layer.trainable = True\n",
+    "#   if hasattr(layer, 'W_regularizer'):\n",
+    "#     layer.W_regularizer = l2(1e-4)\n",
+    "\n",
+    "optim = SGD(lr=0.0001, momentum=0.9)\n",
+    "# optim = keras.optimizers.Adam(lr=0.001)\n",
+    "model.compile(optimizer=optim, loss=\"categorical_crossentropy\",\n",
+    "              loss_weights=[1/num_gpus]*num_gpus, metrics=metrics)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(model.summary())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# model.load_weights(os.path.join(\"experiments/keras/resnet50-100%-2-gpu-64-batch-size/0\", \"5.08-08.hdf5\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "initial_epoch = epochs\n",
+    "epochs = initial_epoch + 20\n",
+    "hist2 = model.fit_generator(train_generator, steps_per_epoch=train_batches,\n",
+    "                            validation_data=val_generator, validation_steps=val_batches,\n",
+    "                            epochs=epochs, initial_epoch=initial_epoch,\n",
+    "                            class_weight=class_weights, callbacks=callbacks) #,\n",
+    "                            #max_q_size=8, nb_worker=1, pickle_safe=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Evaluate model on validation set"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_metrics = model.evaluate_generator(val_generator, steps=val_batches) #,\n",
+    "                                       #max_q_size=8, nb_worker=1, pickle_safe=False)\n",
+    "labeled_metrics = list(zip(model.metrics_names, raw_metrics))\n",
+    "losses = [v for k,v in labeled_metrics if k == \"loss\"]\n",
+    "accuracies = [v for k,v in labeled_metrics if k.endswith(\"acc\")]\n",
+    "loss = sum(losses) / num_gpus\n",
+    "acc = sum(accuracies) / num_gpus\n",
+    "metrics = {\"loss\": loss, \"acc\": acc}\n",
+    "print(labeled_metrics)\n",
+    "print(metrics)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Save model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "filename = \"{acc:.5}_acc_{loss:.5}_loss_model.hdf5\".format(**metrics)\n",
+    "fullpath = os.path.join(exp_dir, filename)\n",
+    "model.save(fullpath)\n",
+    "print(\"Saved model file to {}\".format(fullpath))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Cleanup"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# # Stop processes cleanly.  Otherwise, zombie processes will\n",
+    "# # persist and hold onto GPU memory.\n",
+    "# try:\n",
+    "#     pool.terminate()\n",
+    "# except:\n",
+    "#     pass\n",
+    "# for p in mp.active_children():\n",
+    "#   p.terminate()\n",
+    "# mp.active_children()"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/MachineLearning.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/MachineLearning.ipynb b/projects/breast_cancer/MachineLearning.ipynb
index 0ac880c..b27116f 100644
--- a/projects/breast_cancer/MachineLearning.ipynb
+++ b/projects/breast_cancer/MachineLearning.ipynb
@@ -2,10 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML\n",
     "\n",
@@ -15,10 +12,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Setup"
    ]
@@ -26,11 +20,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "%load_ext autoreload\n",
@@ -52,9 +42,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -63,10 +51,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Read in train & val data"
    ]
@@ -75,9 +60,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -92,11 +75,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "if p < 1:\n",
@@ -114,9 +93,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -129,9 +106,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -142,10 +117,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Extract X and Y matrices"
    ]
@@ -154,9 +126,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -171,10 +141,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Convert to SystemML Matrices\n",
     "Note: This allows for reuse of the matrices on multiple\n",
@@ -188,9 +155,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -216,10 +181,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Trigger Caching (Optional)\n",
     "Note: This will take a while and is not necessary, but doing it\n",
@@ -232,9 +194,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -252,10 +212,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Save Matrices (Optional)"
    ]
@@ -264,9 +221,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -282,30 +237,21 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "---"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Softmax Classifier"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Sanity Check: Overfit Small Portion"
    ]
@@ -314,14 +260,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"softmax_clf.dml\") as clf\n",
+    "source(\"breastcancer/softmax_clf.dml\") as clf\n",
     "\n",
     "# Hyperparameters & Settings\n",
     "lr = 1e-2  # learning rate\n",
@@ -343,10 +287,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Train"
    ]
@@ -355,14 +296,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"softmax_clf.dml\") as clf\n",
+    "source(\"breastcancer/softmax_clf.dml\") as clf\n",
     "\n",
     "# Hyperparameters & Settings\n",
     "lr = 5e-7  # learning rate\n",
@@ -383,10 +322,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Eval"
    ]
@@ -395,14 +331,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"softmax_clf.dml\") as clf\n",
+    "source(\"breastcancer/softmax_clf.dml\") as clf\n",
     "\n",
     "# Eval\n",
     "probs = clf::predict(X, W, b)\n",
@@ -418,10 +352,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "---"
    ]
@@ -429,9 +360,7 @@
   {
    "cell_type": "markdown",
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "source": [
     "# LeNet-like ConvNet"
@@ -439,10 +368,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Sanity Check: Overfit Small Portion"
    ]
@@ -451,14 +377,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"convnet.dml\") as clf\n",
+    "source(\"breastcancer/convnet.dml\") as clf\n",
     "\n",
     "# Hyperparameters & Settings\n",
     "lr = 1e-2  # learning rate\n",
@@ -484,10 +408,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "## Hyperparameter Search"
    ]
@@ -496,14 +417,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"convnet.dml\") as clf\n",
+    "source(\"breastcancer/convnet.dml\") as clf\n",
     "\n",
     "dir = \"models/lenet-cnn/hyperparam-search/\"\n",
     "\n",
@@ -543,41 +462,67 @@
   },
   {
    "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Train"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
+   "outputs": [],
    "source": [
-    "## Train"
+    "ml.setStatistics(True)\n",
+    "ml.setExplain(True)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# sc.setLogLevel(\"OFF\")"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true,
+    "scrolled": false
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"convnet.dml\") as clf\n",
+    "source(\"breastcancer/convnet_distrib_sgd.dml\") as clf\n",
     "\n",
     "# Hyperparameters & Settings\n",
     "lr = 0.00205  # learning rate\n",
     "mu = 0.632  # momentum\n",
     "decay = 0.99  # learning rate decay constant\n",
     "lambda = 0.00385\n",
-    "batch_size = 32\n",
+    "batch_size = 1\n",
+    "parallel_batches = 19\n",
     "epochs = 1\n",
-    "log_interval = 10\n",
+    "log_interval = 1\n",
     "dir = \"models/lenet-cnn/train/\"\n",
+    "n = 50  #1216  # limit on number of samples (for debugging)\n",
+    "X = X[1:n,]\n",
+    "Y = Y[1:n,]\n",
+    "X_val = X_val[1:n,]\n",
+    "Y_val = Y_val[1:n,]\n",
     "\n",
     "# Train\n",
     "[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] =\n",
     "    clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay,\n",
-    "               lambda, batch_size, epochs, log_interval, dir)\n",
+    "               lambda, batch_size, parallel_batches, epochs,\n",
+    "               log_interval, dir)\n",
     "\"\"\"\n",
     "outputs = (\"Wc1\", \"bc1\", \"Wc2\", \"bc2\", \"Wc3\", \"bc3\",\n",
     "           \"Wa1\", \"ba1\", \"Wa2\", \"ba2\")\n",
@@ -585,15 +530,53 @@
     "                            C=c, Hin=size, Win=size)\n",
     "                     .output(*outputs))\n",
     "outs = ml.execute(script).get(*outputs)\n",
-    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs"
+    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs\n",
+    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2"
    ]
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
+   "outputs": [],
+   "source": [
+    "script = \"\"\"\n",
+    "source(\"breastcancer/convnet_distrib_sgd.dml\") as clf\n",
+    "\n",
+    "# Hyperparameters & Settings\n",
+    "lr = 0.00205  # learning rate\n",
+    "mu = 0.632  # momentum\n",
+    "decay = 0.99  # learning rate decay constant\n",
+    "lambda = 0.00385\n",
+    "batch_size = 1\n",
+    "parallel_batches = 19\n",
+    "epochs = 1\n",
+    "log_interval = 1\n",
+    "dir = \"models/lenet-cnn/train/\"\n",
+    "\n",
+    "# Dummy data\n",
+    "[X, Y, C, Hin, Win] = clf::generate_dummy_data(50)  #1216)\n",
+    "[X_val, Y_val, C, Hin, Win] = clf::generate_dummy_data(100)\n",
+    "\n",
+    "# Train\n",
+    "[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] =\n",
+    "    clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay,\n",
+    "               lambda, batch_size, parallel_batches, epochs,\n",
+    "               log_interval, dir)\n",
+    "\"\"\"\n",
+    "outputs = (\"Wc1\", \"bc1\", \"Wc2\", \"bc2\", \"Wc3\", \"bc3\",\n",
+    "           \"Wa1\", \"ba1\", \"Wa2\", \"ba2\")\n",
+    "script = dml(script).output(*outputs)\n",
+    "outs = ml.execute(script).get(*outputs)\n",
+    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs\n",
+    "Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "## Eval"
    ]
@@ -602,14 +585,12 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "script = \"\"\"\n",
-    "source(\"convnet.dml\") as clf\n",
+    "source(\"breastcancer/convnet_distrib_sgd.dml\") as clf\n",
     "\n",
     "# Eval\n",
     "probs = clf::predict(X, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)\n",
@@ -629,9 +610,94 @@
     "loss, acc, loss_val, acc_val = ml.execute(script).get(*outputs)\n",
     "loss, acc, loss_val, acc_val"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "script = \"\"\"\n",
+    "source(\"breastcancer/convnet_distrib_sgd.dml\") as clf\n",
+    "\n",
+    "# Dummy data\n",
+    "[X, Y, C, Hin, Win] = clf::generate_dummy_data(1216)\n",
+    "[X_val, Y_val, C, Hin, Win] = clf::generate_dummy_data(100)\n",
+    "\n",
+    "# Eval\n",
+    "probs = clf::predict(X, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)\n",
+    "[loss, accuracy] = clf::eval(probs, Y)\n",
+    "probs_val = clf::predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)\n",
+    "[loss_val, accuracy_val] = clf::eval(probs_val, Y_val)\n",
+    "\"\"\"\n",
+    "outputs = (\"loss\", \"accuracy\", \"loss_val\", \"accuracy_val\")\n",
+    "script = (dml(script).input(Wc1=Wc1, bc1=bc1,\n",
+    "                            Wc2=Wc2, bc2=bc2,\n",
+    "                            Wc3=Wc3, bc3=bc3,\n",
+    "                            Wa1=Wa1, ba1=ba1,\n",
+    "                            Wa2=Wa2, ba2=ba2)\n",
+    "                     .output(*outputs))\n",
+    "loss, acc, loss_val, acc_val = ml.execute(script).get(*outputs)\n",
+    "loss, acc, loss_val, acc_val"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": true
+   },
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# script = \"\"\"\n",
+    "# N = 102400  # num examples\n",
+    "# C = 3  # num input channels\n",
+    "# Hin = 256  # input height\n",
+    "# Win = 256  # input width\n",
+    "# X = rand(rows=N, cols=C*Hin*Win, pdf=\"normal\")\n",
+    "# \"\"\"\n",
+    "# outputs = \"X\"\n",
+    "# script = dml(script).output(*outputs)\n",
+    "# thisX = ml.execute(script).get(*outputs)\n",
+    "# thisX"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# script = \"\"\"\n",
+    "# f = function(matrix[double] X) return(matrix[double] Y) {\n",
+    "#   if (1==1) {}\n",
+    "#   a = as.scalar(rand(rows=1, cols=1))\n",
+    "#   Y = X * a\n",
+    "# }\n",
+    "# Y = f(X)\n",
+    "# \"\"\"\n",
+    "# outputs = \"Y\"\n",
+    "# script = dml(script).input(X=thisX).output(*outputs)\n",
+    "# thisY = ml.execute(script).get(*outputs)\n",
+    "# thisY"
+   ]
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
    "display_name": "Python 3 + Spark 2.x + SystemML",
    "language": "python",
@@ -647,7 +713,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.0"
+   "version": "3.6.1"
   }
  },
  "nbformat": 4,


[3/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

Posted by du...@apache.org.
http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/approach.svg
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/approach.svg b/projects/breast_cancer/approach.svg
new file mode 100644
index 0000000..3c57460
--- /dev/null
+++ b/projects/breast_cancer/approach.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" standalone="yes"?>
+
+<svg version="1.1" viewBox="0.0 0.0 960.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="g18f7cf5d33_0_17.0"><path d="m0 0l960.0 0l0 540.0l-960.0 0l0 -540.0z" clip-rule="nonzero"></path></clipPath><g clip-path="url(#g18f7cf5d33_0_17.0)"><path fill="#000000" d="m0 0l960.0 0l0 540.0l-960.0 0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0" d="m254.28084 371.81628l701.798 0l0 164.0315l-701.798 0z" fill-rule="nonzero"></path><g transform="matrix(0.9320026246719161 0.0 0.0 0.9319971128608923 254.28084015748033 371.81627296587925)"><clipPath id="g18f7cf5d33_0_17.1"><path d="m0 0l753.0 0l0 176.0l-753.0 0z" clip-rule="nonzero"></path></clipPath><image clip-path="url(#g18f7cf5d33_0_17.1)" fill="#000" width="753.0" height="176.0" x="0.0" y="0.0" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAvEAAACwCAYAAACGozEBA
 ACAAElEQVR42uzdCbhFVV33cSttsEG0UrMSxSm1RFMTtVJDMURt0CS0Ii1DKxXFFFFSE0SBRFI0RSwEp6RBRTE0isCcAgu1wVBU0IwmSa1sOu/z2e/7u+9it8+955x77j3D/a/nWc/eZ5999rD2Pnt/12/91n9dZ7Qi6bzzzht98pOf3LX9XX755aOPfOQjo0qLSxdddNHoiiuu2Jh/9rOfPXrjG984+u///u+x1+m000671vebJdv6n//5n8Hv/vqv/3p01FFHjb74xS/WhahUqVKlSpUqLV26zsoc6HWuM3rmM5+5a/uzryOPPLLukAWlf/mXf+muOSD/uZ/7uW7++7//+7spuB53nd75zneObnGLW0xUQcj2++nqq6/uvttnn326+UqVKlWqVKlSpYL4GdO+++47OuGEE+a6TYB21VVXDX5nX7tZaah07QSggXbmqeZJ//RP/7TpdQLgYH6z626dcRD/G7/xG90+53k/VapUqVKlSpUq7WmIZ38IgCVTbfvLo9ZmeeCK3eLEE0/s7BLt+n2YCxz+53/+Zwd0UWZ9Nv+Wt7ylWy+KMYCz7WwPCGZ/jiUq8gc+8IG667ZILDTtNUnZDYH5EMQre+sPpVy/rNO/7u6TXEP3iftn//3331jmOg/dE/376b/+678G7zvJ9sxnXfsYuneGjksrgylrUbsNtp+h4xr3v2jLVW4rRpUqVapUqVKlgvgdg/jDDjusAxdQA2iy3DzYM//gBz94Y3m81cDHOoEiwATAxsGh78BO1Fu/D3BJoAgwXXbZZd0y6wfoAH7WBf1sIWXP2Dops1YJb0HadCuIz7UdUtlt1zb+/d//feMatcm+3DcBW/eZe8y9Fmgeuif699O4+y4Q7zv3jH2Nu3eGzknrRNb3W8eYYxg6rnH/i6yjUqmVo1oQKlWqVKlSpYL4XYH4QEfgqL88MATkxsEUoJvUThN12O+zzQA6qMo8UI+H2zFYPolHu9J4iE+Kjx
 1YTwLxQL1NrXK9WStMe82ifLetKVm/vSf699NmEG+9tkVm3L3TP6e2TNp73Xaz7f5xbfW/aL+rVKlSpUqVKhXE7zjE9+GovzwgF/tEa2toYSrrj4P4/J56KQfYbCMQ1AJifNGmQKmFt0qzQXyrSqflYxaIdy1ti6oeJXqoZaTdf1p62us67p5o76dA8iT33bh7Z+icNqsgDB3XuP+F7fsuFZOh1qhKlSpVqlSpUkH8rkE8tRa8ga9YL2JniWe4hS7KJ0tCP5Rg4DCqpmTdeIpjx4g3PhYHlgXfsVUApoL47UN8ytl1ck1bS820dpqk3C9DISbb/cdaA6xVANwDH/vYxwbvif79tNl910L8uHtnHMSPa10aulfH/S9yb+Y46x6tVKlSpUqVCuJ3DOJPPfXULe00yWAFPElAqv0uMMXSMM5SEDhMx9V0Fsy68Re3ENh2Tkxnw4L46VMfwtMJdagT5hDEt30WtoL4IdBv7TT9jq2gftw90b+fxt13Qy1AQ/dOv0xSsRj3Hxg6riuvvHLwfxF4r46tlSpVqlSpUkH8QlPAhqq5KwX3/zzx446l0vbLNyEmpy3XnYgCtFvXdN772ep/UfdqpUqVKlWqVBC/0NS3GOxkSgfLKP2V5p9aa8g0CbzPEuN9XdNu/i8qVapUqVKlSgXxU6d00uuH5tuJxGs8FMu70nzTuPCfmyVe8H6H1r2cdvN/UalSpUqVKlUqiK9UqVKlSpUqVapUqVJBfKVlTSwfRjhlT/qP//iPykuUXRPXZrNIP5UqVapUqVKlgvhKeyyxewDFf/u3f+ssH6Kk/MM//MPo7//+7ysvMLsGroVr4tq4RtUJtlKlSpUqVSqIr1SpSxRekPiP//iPg6OpVl58dm1co1LjK1WqVKlSpYL4SpW6lFFUP/nJTxYwL2l2bVyjisZUqVKlSpUqFcRXqtQlvmu2jb/8y7/cgMZKS/Jw+H/Xw7VxjVyrSpUqVapUqVJBfKVKHRj
 yX//5n/95QfySQrxr4xoVxFeqVKlSpUoF8ZUqbUC8jpR/9md/VhC/pBDv2rhGBfGVKlWqVKlSQXylSgXxBfE7noYG/rLsi1/84tiOuv3l0w4etlXfAfuuSD+zX8dcv6FybcveetOU81Ydt21raL+Vdia5Hv1r4nP/OvevT3vN53297Lf6BlVaGYiv+N0V97sgviB+VSH+sMMO60bwTbriiitG++yzz//quHvZZZdtrHPqqad2y/xH8gz0WZ+ASdKDH/zgbkTncWn//ffvtrfZOpWunYzanOeC8m2vnety1VVXXWvZs5/97G7dD3zgAxM/T4wObd1x0Kdjd7Y/7SjSlaZPQFxZu7b5H/o/9/+7+R/l+vX/00arPvHEEyfa5y1ucYsuj0tvfOMbN7ZfkboqLT3EV/zuivtdEF8Qv6oQH7BLuuiiizbO5y1veUv3/Tvf+c7upd0CdR/iJetNcl8GJMZBQ47J9mZRCPeqeh9wDkS5lsrStbMctKucAbdAfkDbOoH6ra6b344rYzBoHyqCswBctbxMl1S+XQ/Ju6qtoLkGrrV1AtQB+lTc5DCM+auvvnpLgM+zYbNKhf1vta1KlZYC4ofid3/t137t6BnPeMboRS960eg1r3nN6Mwzz+wegKeffnr3gP293/u90Tve8Y7RH/7hH3b57W9/e/fCMn/BBReM3v3ud48uvvji0Z3vfOfR9a9//W6bN7/5zUe3v/3tR/e6171GN73pTUdf9VVfNfryL//y0bHHHjt6wxve0O3/b//2b0d/8zd/s5Evv/zy7iEt57PpX/3VX43+4i/+osvm8/nSSy/t/vSibPhsXWHzPv3pT3cvg4997GOjj370oxvr57cf+tCHNtaXs0/rtscjf/zjH++yB8ynPvWpbruf+cxnun383d/9XQdBn/vc50bXXHPN6POf//zoC1/4wrWyZQD7n//5n7tz/uxnP9tty/7+9E//tHtxnX/++V2ZK+u3vvWtoze9
 6U2jV73qVaMnPelJo6c85Smjpz3taaNf+ZVfGT396U8f/dIv/dLoOc95TrfsiCOOGH3lV37lUsb9ngbiq2Vod3Ouh/vPf/Bf//VfV6aFB3Cddtpp14IAy4Zgqj3mIYjPC9x/cFzyfEl5Zb/9BO7797dtb3VMUZVnUe/72570+kzyu2mv9Sz3BqhKmQW2h5T6dh+tGh8Vf9y+CRu+p/JudT/1K2dD125o2SzXbpJ7Yjev3W5WQlRwXRPvQykKvPdhP8Xa0ram9a+532sBG5f8X7f6f+cZ0D+GScvWMbTHOEuZj3tWbPVfm/Q+rUromkH8UPxuEP/Lv/zLo1/7tV/rYPKMM87oIPKVr3zl6LWvfW0Hl29+85tHb3vb20bnnXfe6Pd///c7ePfnAPLvf//7Rx/84Ae7B9pDH/rQbpu3vOUtR3e6051GBx988Og7vuM7OpAH8Y985CNHxx13XKcgg+EWnM23EA/C5YA8WM8UhAPEAHmg/BOf+ESX/SkD8Fm/rQDIlgfeTfO97fi9rCIRiFdmIB7Ag3GQCpwBvDIF7B5UyeDIFMxbx4sF+KsMOC9/fi8C5QjeU7YqOcpexeqYY47prs0LX/jCjfy85z2vg/tHP/rRo+td73pLGfd7UoivlqHdz7kef/RHf9Td90B+FVp4KGWt+hYFPE3zm6UhiJeoguOa2gMdYAAsDCnxngspT0CYbWZZfhMAyXp83W0rQvaTfbVKIsiVLG+3TUhprQDWHQKNlFMsR6btvqOMDh1jICfZMaS8Uz6tBWYaFb4P5LYdwBuCl5xzC18pm35KOTnWca0jqXy1xzJ07bzXsiw2rmmuXe7b9lo5pkVeu1Sisu/dSMo474Fcz9YWt1XKfy3llAr20PVNBd+5bea1b8vH9lP5a5f5bWvX885uKyHJni1thSDX3fG6DxxLfpPK5dD9NlQZiV0vZZYKStvKMHSf9i1plqf82n27FyutCMQPxe/++q//+g6sX/ayl
 3U326//+q93GUj6TJ3/rd/6rU4h/t3f/d3Ru971rtGFF17Yqe8gnqIHin13/PHHd9u87W1vO7rnPe85etSjHjW6973vPbrjHe/YQfxd7nKXDuRBMBgOxLcQHZAHugA6inqWZ11/4g9/+MOjj3zkI9eC71QIAut9Fd/vfVYB8Nm67fp+D7RVBlKZ6Kvx/qCBnAB8IB68Ax8PUlPLrAPkU3m58soru+MQ5s9D4T3vec9Geao4KXtl6bqccMIJoxe/+MXdMtmyJz7xiV3ZfsVXfMVSxv2eFOJrZNca2XVa9TaViUDQJJWLcRCfl2H/3NoXvJe4F7AX4JDy5WXoJe+ZECuA/3ugw3MKwDjeeLC9sPOS90L3TPG8cDypDAQE83LPizzb88yI0ui31g3UDYGg/bfgbXlsCo5l6BhbWPa8Yj8JOAWCnadrM6kdIQDXKpgtmA7ZZIa87X3LVH/7bXbc4yppykz5DV27PL+UVY4xz/9Jr13K32fb9ZtA96KuneN0P5n3292ooLfe9BxbgHja30vKbZyS31auk4cqfKkIuHY5poBung0pO2Xl2uUap/xt1/XIfZfjybVrt+W6uOZ+M+5ZMa5FwfZy3zge559KQfbdv09zDLab5Y4/90mOPZXjSisC8f343awuL3jBCzbA/aUvfeno5S9/+eiss84a/eZv/mZ3oU3B/Ote97pOMWavkVlBvEwA8Xvf+97Rq1/96m6b97jHPUY/8iM/0gHn4x//+NGhhx46+qZv+qbO/sFmY11wDGj91k0GxluVPJaaKOJDsJ9p1pWBfbaZ7QX2A/OBePuJwp9Kge1ln36T72XLwH1sNP7YsdGYb8HdfLLvY6tR/h68KgS2lwqIqeuiMgTkXYOXvOQlG5Ury1yDZz3rWaMjjzyyU+JVjJYx7vekEF8ju9bIrpOmvMxyHHnxTaIAj4P4vIj7EN+qtEMd7vrHlRd7QAmQZT5Qah+peATMW0jvb6v/fX/dgK39tK
 raOBAMqDmHFoZa8OgfY78jYta371a1nKZTaI6n/xv7Srm3Hubsp69UgtAhFTnHrCzdK+n7MARIfh+b1GbXzjFE6Wx9+ZNcu5xvW8lZ9LXLf8n+d+t/3fZjaL3o0/z32zLMNob87lnfNXd+gd2hykrKKPeZa5gWurb8bKtfkWi/71tzhiC+TZvdb/1nUXuftfts1f6h+7R//7StGTm/cTbBSksM8X246kN8wPHss8/uoFxmsTEFkTzy7B/sNR6QgXgedeBvm/e5z31GD3/4wzs7yE//9E+PfviHf7jzyYPOb/mWbxn99m//dgedjiXQDaxbkO9DvNz3rUehb5cF4FtIt90W4rOsVeqtH4jPtn0OxLeWHbBMWae6B+AngXjgT9l03v5gFP7sz9QxnHvuuV3LRyxNpqlQ6adw9NFHj5761Kd2nvhWiV+mjoqTQnyN7LrED5Ela+Hpq+Ztk/nQy7m1ZoyD+Kj7Q0p8QuGZ36wjZQtv1vNC9jv/e8doPirYEMS3L9FpID7lkWdN9jkOBHOO7T5a8Bg6xnEgmOZ324yiuJn3eMgWFdjx3GuvXwsz2f+Qkg7ihzzRUSXbSsI4JbZfvkPXLpDdKpzTXLt++S/LtbM8NpHdAPn+fyjgOqSkty0uAc++WryZmt9vpRvXh6Itw7R0KEvrKZO8v9Pq4nkxVP7t59x3LWD3743N7rchiO//H4YqCkP36WYQ35bT0P+r0gpBPHW8hXjeeDcdcATuwBHUv+IVr+jm+eb5tvnk2T/8iaJ0s9zw2D/wgQ/sbqqDDjpo9F3f9V2dvYY67zv5sY99bPeHAMKt3aWF6hbi448HvfG9y21n2PzeucnmA/8B9+R44a0D+K3fQr7zkdNCkByIjxc+vvfkL33pSxvxjvMAsCze+Bbk0/m2PWf7Y1dSSVL+ssrU61//+tEpp5yy4Yfnk9fJNR2JVxnia2TX5Yb4ZWnhycupBb624ylI89k0cLIVxAemtrIT9AF
 6HMTH95tmaqqY38XPnnK1PDYd2bp+EyVapSkVjHEQ33be9HvrD0HtZiDYWgCGjjEdof3GsyuKpudZfOCBlUlV1T58pcxcu0BFQDOgp6KQ8sm1cg7j9pnrD/xyzEMtNm2ZDl07fZBybLEgBCYnvXZDEL/oa6dc/S4tArsRmaXfctLanlxf94OyDYwGbtv/t3PJvZHrMa4SEDhtI9uMuxezjTaUJUZwvKyvKbdcV9cvx++Y7MMy12bI6jIE8eOeFbNAfASN/n3a2mkC8InGlD4orUWo0opCPKg+6aSTOsWX2gsWZYq8mw/Am/reDQcsQTzbB0vN+973vg54+bZ//ud/vtvmjW50o64zqw6ubDVuFDCf5SBeJYC9ZMiz3lpkWk+6PxQvfSLQaOpvO6H6XQA8fnfLWlvMkK0GqPje+tlOwD9RasB7OrUC+FaBbzuzxhMP3pOzPP54IG9btpsWCJ2D2Yz8+ViWfud3fqdrsfDQUpl67nOf20WoMWVTMu/arTrEVyjK5Yb4ZbmvAj594PAcaDt+pRNfq9B5dg1BvBfuZhEuJoH4VBqS+h3e4jltOx4GVFvvbl7++T7nFAW5r/xK7XbHdVAbAsGccwsFQ8foOT8u/n4LR23UkUnvrYCJ65mm/YCt42o7frY5LSRD6mxSfOb9zn9D17Ut06Fr149hn0rfpNeub3lYhmuX1pO2c+xutaS1CXy2x9KPBNVew35H3FTqx7UitPdn+qyMg/i0VriH206syqbti5Dvoly3tru2n0K7biB+yPo1dL9tV4nvb69fdm3rwlb/j0orAvE6trpRvOio7FQrEA/K3Xy82VR6IOl7vngPHJAJNgPxOmL+5E/+ZLfNG9zgBqOb3OQmo9vc5jajH/3RH+06swpBCeK//du/vfN1U5xjpQCyQ6Ee+xCfEI+yeVCfTqgB+X40mlhnWltMfPcB6Nhm0qG2DW/ps+3bV8JKthAfQA+kbwbxsd/4vXOw3Ryna8Ka
 5IGscsS2dM4553QPYy0gwkw+85nP7NR4YSYp8qxQBfGV9gLER80bF9Fi2vBqUa8miW4zS9rsWIY6yE7622n3tZ3j71sE5rXvoRj901y/ANxWadbO2OOux9Dx7bVrN2vK/21IOc+1muZ4PAsmCfE57/j/Q5ac7ZbrvK5D/z4NxI8b5bhCTK4BxH/Zl31ZB/EAHKjzvAN3lhowz17DD5+OlYASwCdSDV+82icAZvHQgdV2+d5ZaHjjhZl80IMeNLrf/e7XQfx+++3XhagEySA+sOxhE/tLwD3RacZBPDW7/RyLim22kE5tby00scq0nVYD9tmPyoH1gbbPFHg2GMfcxoVv1fd+dJp45NuczrCJVKM1wTE7RhCvszCIVztWYdI64pqoaD3/+c/fAPjHPOYxa+GJL4gviJ9WjZ+Hh5e6VV7QraFgM/Da7v01a1SMaSOb1LVbjjROkZ42jWvdqHTtNGTlqrRmEB9PPAAH7dT2hDKkzCciDXinvFOHEzMebCZOPCgGoH5ve9R2Vhox4u9+97t3YSbvf//7d8uEmQTxwDWeeBAdUE+O7x3kZr6105hPtk6+y/rpnBplP9sN2MttJ9ZAfGwz6XiaYwnIe4CAeVYgzW+tGt9X5vvZ987ZbxNm0/E6fx2DDZylXA2oxUrT9klgbzIgl8g0P/uzPzt6xCMesRGdRmWsIL7SukO8RETYahCfrVKak0uJWhwIxqYwbSJuTBNfvK7dFUt1XAm1uZ1EhZ92kKWC+EprC/EGDGJvYZ8BjeA96nsAHlB6eIqcAuAz4BPoTMdWf0zfsdJ867d+axeNxh+WjeZ7vud7Rg94wAM6qDevIgCaW4gfgndqeEA981Hc+3aaKPPxyqczbHK223YkDeQndrzjsI4BcGJ3yXHZj+37LoPhOP4+xG+WKfB+5xokXrx9KD/l+Md//Mdd2YJ4VppcD5UqtiYVrV/4hV/oIv7oaxCIN113iK+RXWtk10qVpk2r9Nyo/
 1GlSpWmhni+ahYNgBhgpP6CeJFRQDwFnkIM3GUKfCLTqBEDYYBrmZFadeoQF15MeB1aqe/3ve99O6CXAShIBQZRzYF0wD1QDh740E0D6LHQtNPYbNp1bUdO59eM4JrQk0NhKinv7T5sL157FYEo/gF5HVSp8fHHD/nk25FbAXyOLees8qA1A8BrXv6DP/iDrqyVuVCTlEfXwPWgyD/hCU/orDRCeLZx4tcZ4mtk1xrZtVKlWdToVXhu1P+oUqVKU0M8C0YgniceIAJ4oSRNebHBI9jWmfLkk0/uOryydAB+g0Ilag3Yt96v/uqvjm54wxt2UVMo8tT4W93qVqNb3/rWo3333Xd0u9vdrlPjKfWaRIVOBMrAth2lFcRndFS2kxbawYRlpm22HTn++DYUpW22sG4/4NzydGLNflulv7XltC0FYN7xsNZQ4zNq6zgF3jqx0fhtBqVyHS655JKuRUMFKH54SrzWCi0hylZEIC0lPPGsNJT4vQTxNbJrjexaqdIsKvyqPTfqf1SpUqWpIR6k81wDeLYN00A8lf5JT3pSZ5UBl3JGbWX9EGWGWnfhhRd28c1FUDn22GO736kcnHDCCV22D3F3eeR1yKTYC5fo9yoCft9aXwLx6bgaL3wgPssD9uMgPvAdiO/Hmg/It973dGx1LLHcBPrbcJeOg4ISRT7QTnVvs++t5wGdioGwlnzwWjNEW9DCoTyVrbIG8eCdAs/epOz0XTBS60/91E+NfuzHfmzPQHyN7Foju1aqNG1axedG/Y8qVao0lZ0GUBs4CGgD+cSHB446tgJyfniqs98C0TRJgtN4ZS0DvTrABvQpy4CUlz52HDHOdXC9wx3u0HV4ZRcR49RygPuMZzyjs+IIqQikhbU0QinABfHgV6fUQH0f5uNnb33z8ci3KnysO7HctJ77Vr3v226i5rfRctrQk63qnig2vnNc6Szr9xR4AC9T4lWEVGiixGvZUJnS6qFsjNT65Cc/uV
 PhH/WoR3Wj4O4ViK+RXZf4YbNkI7tWqrSKz436H1WqVGlqiAeBQkwG4sE7iGetSVjJFuID8m2YRfMAlTfeYEXWBaKgPVAqx0svVCKfvPBuD3nIQzqwzeBHUcd5xMWfN0/d5wNXqbAuuHfMYN9nCvUP/dAPdZ1vATVPud+A7j7ExzrTqv0B93SSlfsx5xPRJgNCxRufTrTJLdRT6U0TStJ3GT3Wtpyf8pJF9uGJz7EL36nywr6kNQO8KwODafHDi8cv/v5egvga2XW54WNZRnadNq1jh+nqILl6z41V/x9VqlRpARAvMo0RVJ/2tKeNXvCCF3RWGhBvSoXnWWfhAJaBeDkKPIgHs0YbtY7OmAk/mU6apjJIBfGg+453vOPooIMO6mAUvFOtW4hPmEgw3UaWaQd2AuM+/8mf/EkX157aD5bN3/Wud+0A2DZ/8Rd/sdufSoDfULeBsd859nTKjXIfz7p12xFdlRkA910Uf+tHpU/M+RxXjrkdWRa8247yor4nq7Sw1Kj0qIwodxUpx3nMMcd06vvP/MzPdH74eOL3kp2mQlEuP8QvWyjKSdK6dZiuDpKr+9xY5f9RpUqVFgTxAXkKr8FP2pFZMyorLztAj/ouxjLoBqd87Gwzgff45uOZD8zLAJUaT4m/173u1YWaBNfAlkVHjiLeWmESHz5RY2J/adXwrBMVPFFqLNOB1PGxq4BtHXJFxwH1fsdfLra9iDxR+qnd7DvA3Pk49sSQz8itjgWY234gX9Yi0ar3vo8th4VG5eHiiy/ujkelJhF/ZPvR+qFjMYBXudIfAbQ//vGP7yokQkw6PhDv2unbsO5x4gviC+J3Iq1zh+nqIFkQX6lSpTWG+HRsBYLAkOIOcAEkkNepFYiLOAPKqTxUeBANaKnH6YAJkinhMjtN2/GVGh8rjXmKPzuNKDXiyNsPBVsH0NbL3irjLcRHpW9tLPmcaDYZSCmqfZT1KOZy28k1I7zaBiUcPDsnx6W
 CYwRayrjvH/vYx44OPPDAroLidyo+yg2c66jaZoq7DOyVOXgPuAN25RO7USo6otHYZjoUx0LzuMc9rpunxP/ET/xE54nfKyO2FsQXxO9EWucO09VBsiC+UqVKawzxrBhAHsQ/8YlP7KBRGENWGtFiwDXoZk8BmZRtLwb2D5/Brgx2KfUygOfnbpV40Eqxj+ebwmxEV2EmWXn48X3/pS99aUPhzuiriRIT7/qQ5aaNLpMY7rG6tACfDN7BeUJatgNLtTHls14bjtL2nb/IPabWA/z3vOc9u4qPTqpaGW52s5t1fQp43aOmp6Ovck2FJ2WWjr/Kh41J3wR9EVSujjjiiM5KIySnCgSA//Ef//HRQx/60LLTVCr42OZ9um4dpquDZEF8pUqV9gDE56HBSgI0xSA/6aSTOhhl6aCwA1EgKUJKIs2AduBpHoRaTzaf0VxNQSvFmdebAh2Itx/wDuQp3FR51hZKPLWfit6GekzH1FhZ2vCQbW5HVKXo+xyAT1SZfgbh1knYyHjwA/Dt6K4ZICrqfdZjpQHvzjEdeXnalYFzZ0fSCZUX/01velMH+de97nU7W5HBnA4++ODRD/7gD3YKvAqQCpVOrK4J5R28JyKN4eYB/CMe8Yguok9B/P9ONbJrjew6zX26bh2mq4Pkaj43aoTknU07eX3relRaKMSLE0/1ZZvRGZQSL1ODn/WsZ3WRX6jn4J3yzicP4nnFoyTLgXi2EAAvg9pAPLU9EP/N3/zNHcDLgB7QAvSEr4w1plXjsyxA31fQ+5FhbC/hHDeD+DbsZBR334F2nnY5IN+Cv6ll8blrcUgLhAqL7Nwp9spK+VHhU+lRUTKYExsTr3ssTFT9G9/4xp36DuK1WCgvPn4Qf4973GN0wAEHjB70oAcVxPdSjexaI7uuq1Jbiu7OXd9leG7UCMk7l3bq+tb1qLRQiGelSYhJEM0LLwvdyHet06mRV6339Kc/vYP3vve9hfdMKfYZedR6
 HkpgNl5woSztMyBv+wYwArai3lDkA+StNUaOyt7GiM/oreA90XPMtxAPuCnriR7Txn03D8pbVT/gr1NqC/HprBqPO9UkCrxzTflYJmcQJ8t1FKbQA3WZbQa4K3PLWXT47nWsNWDWU57ylA7iKfDUegq8aD53u9vdupFwH/CAB1wL4pWva3vkkUd2FQMPFUl0HWWyFyC+RnatDpcF8QXxe+G5UR2Xp1Phd/r6EvIwA36oVGlXlXidIzMK6F3ucpcOrvfZZ5/RDW5wg06l972BhkA8uwdwj5o8NIprOmgmg1k3OKClxqswGMwpKjwQvc1tbtMpzWDciKf9GO6B6/7orYnDnvj15vO57QSbgZsC7W2OCt+ODut3fhMLTSw3gX8wrwzFene+KjBppVAuzpsSb0ptp7rLLDNaO5SBzrBaPwzk9KIXvahr/WBfCswb4Ioaz0oj69wqg/qHP/zhnQWnhXgtAspAZCGRhoT/lMTj/8Zv/MYuspAE/kW2yctd5cJ1WQeIr5Fdq8NlQXxB/F54blTH5cnTblxf/Qm1ugsJjWEM1kisk4h++CEVikqV5grxQBAQ3v/+9x/d5CY36cBdvtGNbnQtiAfwFOUAfAZ1Au8gPh1ZE40mOZYSUxm4AniRafjiefJVHIScBMlqy/G0JzJNID7w3qrwzge0g/eEqrQskW4Ssz0jtrYdXQPw2XbU/4SyzPrWSwdayyj0UeFjlfGnVUbKJlF5lA3FPRnE+7PrewDcATzLUgvy1HmAT4kH8SpXOrNS4U0PPfTQrsLzwAc+cCY7jc62Hi55+Nvu937v93aePkkF6053utPGA4fVSvjNNEuqOC0rxNfIrssPl8vU4bIgviB+1Z4b1XF5tvtgp65v/3rEcSBQBxaQ9JnDR5J3PLFPIp4F7vWBqFRpKohPiMmAvAzYgTy4po5T4y1jpwm4pxMreHWzpsNrBneKfSbKe7LlRic1mNStb33rrkPr7W9/+w4agTz13
 43uz+APkI6pQ51XA+jpyBqA9ycKxCdWfNZtB5AKjLfx5q3bjrKqYpAKRFT42GyEjwTwjhe866BK0U6kGfDuj2rZWWed1YG5zrsgnlJunlpObddxWIQeFSW1eICv3wCfPFsTiKeci0ajM6s+Cvzw97vf/XbEE+/cKQeBdlGL2HokYwS4Xip4EuuTVgHHne9dZ+W0KIivkV2XGz6WqcPlOnaYrg6S6/3cqI7Ls90HO3V9h65HfPHEOUwixWbju7SKa8kXqlvSOk/8k7ADLsn7uFJB/KYPNRlAgzOK+C1vecsOsgPYX/3VX91BWlT4+ODBKmhnGQHwbQdW9pkMahSgj61GpBsRWh7ykId0arL9gEIgT31mJ/FyAd4U8ISYlKOau8ED87JzAu8GoUrn2HjjM+hTWxkA8QkxmW1l3UB9FHxAGj89BV7ZqW2rxLDQAHhZ5BlKfEZcBewAnkUmEC90Z0bFBetGrxU3Xz8BPnYRaZS1qag0AFk8eGUlg3hZud3nPvdZWMfW7AMAqKg4J0mT5fd93/d1rSqOxf3RPjRdH+taLvEpuk4VinLvQPwqdsBepQ7T1UFyvZ8b1dKys5X1eV+P/K+0nuedN+6aES8lYM+2K4luh5skEe6wikRYXObKt/N2fH3xoxUO5OoIPAeIp8iD9YR9pMAbiEnHVst0bg3EJ7xkQB7AAzKZZUYG8Swbspsvy/MZxFORhUmkKt/5znfu1H8Qn0g4bubEeY83Xk4Emdb+MgTxfTU+EJ/fBuJNQXui2li33YecAaLig6dwqTGr0ABY8J4cHxxwb+E9AG8QJ3H3WWpkZeHzySef3Cne4F0+6qijupjwwF2nVsp7AN7ngw46qPO6r1p0Gh2XVU60MEiugZaeu9/97hvrCbGp7CS+fuVvWhBfEL+ol3uN7FoQX/+jgvjtXI/AKkbJ+y/91iYBYkm/N4wiacnHJdJzn/vcDcXeOrHGLhLgHQOO0xJILHDehANTny33vf
 UK5GeEePAUGw0lHLBT4SnjAJ4vmncdxA954uXEggfpCalIcU8zES9YplRs86CVkgziASmI32+//Tb8+EAeVAfEE5d9aETWgHwgPnaaKPGJbBNPfOw5fSXeNqxvuwC+jVhjPgDvPKnwmrsC7omVT4FPxBk1ZpnqDNR53IXsBO5Ud3YZ5eB70Wn8CXnPRaWR2ZfYacSXjwIfNV7fBfne9773tSxRqxpiUtmzZGU9HX/1D5CU/a1udasu7KbtqSS123N9lb1rWRBfEL9T92mN7FoQX/+jgvh5XQ+MInnvCXyRY5w1iUDnPyzhiESjM8p7vPYE192AZWKAZ4n9AnbvZ9wibDah1tRny31vPetXh98ZlfiEmYwKr7NposbIPPFf8zVf04WdjPre+uIp0lHe+d0p7fG+A3lTKr2bFdx/8IMf7BRpyjKvt7CJgJ79goXn+te/fjdPiaXA845lVNV40+OFjx+egt42FSdOfEZ+9ZKyjUSnSWx408y3/vjWPmN9AO+4nZfzZp1xfLztbDMqNio4UeHZaCjvAB28A3fALrPQaB4D7aw0fHAyKw14V1niP9ehlQ+enUYznMpOIJ4KD+J1Rl0HiJ9mPfdTu56KlgpOlA0PKvev+yrrsSupaEo8/HKlgo9p778a2bUgvv5HBfHzvh7xynMgeJ/NM+GbHA/GSEVd/7ok3DJPBd4+ROfBZgJxbCYg+D4RCSvG/hwg3qBCQD7hJcVyB9XXu971OjsNaO8P7sQLH4insoMssAvgZcsTLx5gaeoB8WAUqLqIlHdAysJjn9/5nd/ZAS2YBvGxt0Qhz2iuID0dUROhpu3M2u+UGkuMaQvpmQf6iWCTePCystKS4Fzjd6e2B+Ljg6fKg3g1a5DPOkN9p8TLYB5QAnhNYWBeFhdehBoAz0YjnCQIVdGhxPPEq+j4rNxEEoonfq9B/FbraZ5T4dJSkvV0GlZ5ksC8SukjH/nI7rOK5eGHH75xHfu5UkF81qu
 RXQviozTWyK7rC/HTXt95XQ/WGswiCXaxk9cMHyVxBSSx6CZhnKRJWulioXH+WMw7VrngR30AnZsyMPXZct9bz/p+V9aaGSA+GbSDdxnUg8MAPtsNJT6dWjOwk3nWEhaa1vsu2z6YAvXWSSx1yj1oBaNPetKTOohPpBWdXdl5ZFYe29HsBMwD4gnzCLbbwaCSo763HVIBueamHJOXFjj3nSgsPvuuBXzLrAveY6Fpw2mmQiNT4qnz4J0Knwg0wjLGVgPghdZUG1YhUp687wBTBu+avsSAV6ExKqvyUDagnXVGGVHhKfNq0tT4GrF1tvWixru3PKzYl1QmXZ9knwvkC+JXTaktRXfnrm+N7Lre98Es13cnrkfra9dSv1upbaUG2knsrElYJokdpq38KGfL9BUMwCvzoWR5QN76fuf3ZauZAuJbAATqCpRnyTSfMw8qea0oyMImafqRKctsIYl7nsxKQoVul/nMD25AKT57EA+eKKPXve51Oyg1GmnUZZFdQDWvvMGKeOrB+e1ud7uu8y2gB+xg101Gqfc54Gt98B5bigoFKGe90LkyUA+iHUfg3p9G7dS6WhNy/CoiKiFubtaY2IoC660KD+K1OPC7G7gpCjxYtz+hJc2LxvPkJz+5a5U47LDDOnAE7GrELDQ+q/AYoMln1hHnYjmgL4ifD2ypiLm/MyiXDOQL4gviC+IL4ltQqZFd1/c+2I3rO+310EossRerKCwitceq318qPBhEEloaB1LTWZK5BJxrWxEYSr63HmcBAdbvC+KngHiA3kJg4sUDeR1Mgbapz8Ba+Mlv+7Zv62wvlPLv/u7v7lRzQH2HO9yhA3D5rne96+gHfuAHugtpPcoyqNZR03csO2AUwOrEyC4CvIEpWLUNxwKkKeCaWlh1AH1uZDCdCDKxuKTzK3AG2QkHqeIBnBPi8glPeEIX+QWgA/V488V+p8w7HscRn7/jd64Jpanj77777rsRUpIVSQuGYwDy
 LEjKEdiz1AjRqQzB+uMf//iu1UNWHioyfn/DG95wdMghh3T71clYtCBQD9hvfvObd1n5UOFve9vbdmU+brAn+1WRSE0foGoxSFIOzispVqIkD5nEsS2IL4gviC+IL4j/v6lGdl3v+2A3ru+s1wPgYhsJm0wy6OJuQj5R03lhKMzjXGMPGpd8bz28g0H8ftGRdVYK4ltPPLW9Vd+jwAfoQaqOp6LWyOZ1ggXroJcNhOINNME7KOZ1EoGG8m59U2BqhFBwb546DWzB6n3ve9/u96b2D2ip/m5etTTQno6oLDXJsdm04SOtB+I1S6kAtFaffva99QC9zNsWFd6fRdQdlYl00GWrSb8Amb0m4SbVUgG0jq06taqksM+wyjjP9ANQXnxvsnNnlXHubDLKSaVB+ShHrQzU+XxWdipNfSVeq4Jrq5WBzzsQ/7CHPayrIKSG6/roPJzPrqfKWD7rPa5vQj7nPsj20n8inw3atf/++298dk8ceOCB/+uhme913FWRyv2ogmVZ1lMB03rRKhHyuIewZa51tqf1hfc9600CJwXxBfEF8QXxNbLr3ob43RzZdTvXwzaIiBJRcxnAV6WEJcaxxSaz1TmmT4H1/c7v93Jlc1sQ347YGoiPKk9ZVlMC7WBcSEidTynSgBNsH3rooV0nTBAKSIEp4Hz0ox+9obBbH+CxhZhSkn1HjdaZ03IAD1qp/eCQ9UXNNZ73dD5NtJlAvc8B+USeAXOB+L5n3+fEsg+4J6JOYF62TMddUBg7DYgHfQkzSX03QBXVHbTzvifqjNYE/nexz4EqkE8G9oAb2KvsqAQpQ+UgU91ly0wTatL8bttpNJclZSTbJBYkFYgkDxV9B8Y9NBN2M/cjK5OWkaznflA2SSqDznnc9tyPKjXZXlo2sp57uF3/G77hGzZGnJWEsLzZzW7W3btAPrk88QXxs0B8jey63hBfI7uuN8Tv5siu80j64HkHS7a9yDJWOXEs7M/TKPGEY
 r+j4nNW9JmjIH6Tmzng3sI86JH7EC90H5Dnj5epuawxBuoBl/Fvg0vzwBScgTKKs/VUAgA6NZpNhaIbaAOxgEzFgD+eOsxmA7q9ZAwU1IaaTHQZwB6ITwdYy1uIz0BTgXdwbnli2Fsvy73UWjW+hXgAr2MvgA+8p+MqYAd/GXE1UWj434888sjuPE0B/BFHHNGVzWMe85jOzqOsVIRkMOm8VXIyuJPKjfLilc8IripG5Ymffj1Ndu1Q1u4dKkAGM2tzpYL4adarkV3XH+IrFOV6Q/yyhqLcLEWJxxPte25RSvy0nnjrCywSe7SywV4EPsJBxunJM3Y306JGnp24Y2s6kbYAD9qT1ZBkHm4gD7qBpEI3pZqDy3wGluwygU8ZrFKh2UF4u6mg1GXrUaGtC+pZLAKrtsP3DfxdSMDOp91aaRIesj9NNBoe91hiAvCyZaai6gDzRNNRi02kmozMyj/uJccPzzoD3sVVZbegvFPbEypSzmirwN0IrCwZvP+gXauCcsiIpVoptFjoL5CWDJUflRmZ+hxgVzEC9bJWDL8x1WKSa1gQX4M9FcQv9j6tkV0L4uv/VhC/DNeDhXY3YR7Q2hdRLHC+VXQaAU2sRwTFbRnkMyO78v1rIcR9IgACZa3j6VzrNxnBdqcAflEjz84M8QodtAP4dGw1D7yjxPNnA+2EQkw0FZmCTlWmJAN4sMkqA1qBqkqACgFgtYzNBKSy3oDbxEGP/Yb/O51OFVxGXR0X9908gAfj1PUW4mOlobabUtjBuXnbT7Qa8B4LDgWeF56NhjKbAZ1En3GjirYD3nn3ZRAP4GVKfCCe+u78YqFx7iouIB2gx04D3JVT+xnYp0KUsoklSStKKfEF8QXxy3Gf1siuBfH1fyuIX4brkZFaMZHoeO2ynUgq9+m/iKEwymZx4gPwhElCKVcDbtxqZFewHHDHdmkxx2SEWQkfEhy2ez6LHHl2KjtNItAECBNqEsQDeJ0cRUThUQ
 fVpiwvosgAbVNKPBjlh9cp0zyFPX5uFhGwn9FggTlVWqdGKjVFWgWAGg90gaoKgn1R79lYRKZRUwPygN05AHYgn5yINIFxlpjYaVpPvIvtxtFp1eeo84F9+xPRJYP+UODVBBNGUqdV3nd/jsSAN58Y8OCdtcagVc4RyLPSaHkA8Jq90ocgPnfeb+o8iI8aH6C3rvUyAJSbpyC+IL4gfvnu0xrZtSC+/m8F8ct0PbCTZEwUjLNTEJ848SBaMBCAvpkwgPOwFaFyuyO72i+hQRIIg/AqCfWd6HsGB51UgV/0yLNTD/bURqVplfhMxWrfb7/9OosLqAb1gB6Q866DbWAps8RQjAEnpRmYAlfwyRNvW2w4lHvQHw+4C6pyIO47cPW9DrQi4xggSidSzStuELCeEVXlqO+xwbS+9kA8cM/AVIH1VqEH9W486jxfFmhP2EhZ9BlKvOWip4B38ePVKkE8Jd7Iq8BdZqsRVhK8q6gAd5UX9iKfzTvPxH1XVmBeDVaZ8Monko11QXwsNcqrBnsqiC+IX777tEZ2LYgP1NTIrusL8Ysa2XU7yXYDtFwD7WBN2039EVsJrURSwiZhF8dR300POOCAzsFAKMWMOzmyK5t14F4fRa4LSRRB/Swldphx57GokWdngvj44eODD8BT4sWIT6dUUzAvuyC+08E1EWeSA6PxxIN5v2HNsR3ArnMsWw7l3ro6tloGZlUGfLYPv6V0O3YQHwW+BfhWfW9DRfYhvu3YCt753t1srDMAXmQVsA7aqe4JcQjkAbzlBq4SYQXIR5V3U0Z9P/roozt4ZxeSWYoSctO87Bx9tjydgsG581Ye+gz4LqElrZMOr6n0FMQXxBfEl1Jbiu5yXd8a2XW974NlGdl1OwnY8p9LQHteIB8FG9BiNRxGHM1I9wKD4CzlIJz0bo/smvLUHzLKPA5T/lHxQf2iR56dCOITlSYHmQ6slidqDYgH3dT3ADbIFFvcun6T9VllLKc
 oA3pKMfUYsPKEs5VQ860L+qnxwN08RT6eccozi45liWBj3xRrN7kwkgBeOCIZwLfRZIYgPh1Z5USg4Zl3cyW7sdhoeJwSQrJV340gy3elScrNB+IzPfnkkzslXk0PxLPPZERWEM8WA9rjg9dSkag+FHrWmUScUblhJQrUmzr3RLBJB9gasbUgvmCwIL4gfvmub43sunz3Qb+FbNK8qOu7m9cDWEv2J2jHdlLrJcdOovIlAhzRNdZnoitOXIaRXXPMEpcFELcfvvdFHd/EHVv7GcyLQsPywiqT0UPBt06tYJ7yDu4zQFQGA6KY+x17DRUeaAJOPvD438Fp7DfsMtYD8WBV8w47iosO4K0L5kEtkDcF2iD+qquu6m6CDNLUet6BfEJHZjmlneJu3jLrgHm1Q+Ce8JE+88Cz06QTK3gH7qLRCCcpB94p8SeddFIXpUZLQQvxRmQVWpN1BoCrsCTKDIgH8FR2LRWsM1HoM+hTlPr442Wf/Sax9wviC+ILBgviC+KX6/rWyK7Ldx/0+6psB+KXeWTX7SrpxnGROByU16zbGRfVReVE30Ygjy2niScv/jwLs+194Qtf6NhMso9J5hPVZrN5Qq3tO/ev+7qvW9jIsxNBfBsbvh25laIOrAG7g5LNB+ozmms/mg1lXma/AZhAk3IM4oEssI3PnaJvHbCqckBtFkoSALOhgHveeGq1dWPnUcAKiTc+UWRkgN4q7VHkY5cRiSYQn1jx5tVA2Wd0gjA999xzO4iPbUbTCu/U6aef3tlm1NLc5IF4gzuB9wzuJKykiggVHsBrWdAfgB2GAq9zrxYKEE9dZ5nJgE/xyrPQRHH3u8wH4mO/UYstiC+ILxgsiC+IX77nUI3sujyphW5ckGhzxLo2e++LkgIU12Fk1+0kdh9lIeGqaSPbjIuvbhAnIA/iZxnZFWepEHz+85+fGeK1OGR5fx5j2r5WmwQOWcTIs1N54hOhJh54EE/5Bu4+t9Ae2LfM
 d8Ce+g7u25CVoBusx9cts8lQ13WIpbyzkej4aVlGeY1CzXaSCCysNPHj24bCdRO0Pnd+9uTAesJIxi5jvUA8ZV7oSOp7MhUexLPRuNjgXabC6+EM4GUw70/OQpM48UJNmheZRmXENLHh+fnTsdU5sAhR46nvvgPwGQTKZ+euvNqRWpVPyiadX0UEKogviC8YXJ371HMoUy83LYse9loC/bad2g7wqJFdV/M5VCO7Lk9q7S9AHmi1Y8K0Y8PwaoP5dRvZdTvJ8wovSbhrkkTgHLLmbGdkV+v73U5WbnJ87gdMvKjjmxriqe2AHMBT0h184N00gE9pB+gsMcJLgn0AH+88S03A35TKToFmE+ENB6IUdoo0SKVC89hT3FlnLLM+0NXZAhCrDAB9HnodY0WpcYOryalVZzRV2Y0WaE+0mcwH4mOvyY3pd+eff343mBM/vBp5wJ2FRidWyjtwd2PKxx9/fAft6dAK2p1fjp+Cng6poFvozLQwAHi2GhUU37PdsBuBdwq7sgL8poleA+RtJwAfu1EqTaYF8QXxBfHLfZ96VoBgzx7ASxEE6jp8eWFTB/3elG3P89FnrX6at2tk13oOVUvL9GnSjqjubfe4e30vhqLcqgwlboNEtWGLmaVVZLsju6qMUfR3YuTURO1h1SYgT3t8C1HiASAwB+SgG4iD9lhtgDx4B+nWsw6wBt7xyPuN36sEmG899uwjlGV2GjAK0P3WPFj1PYDnk09MdJ8NluSGsRz0g3+VDS83CvtHP/rRrqCBOAiXA/JR5QPwiQffxon3XSLS+B2AZ6VRSQDxLDQAPqEkQXv87+LAeyGz0ohIA9Cp6ACdTaiNkW8epFsnn63nfKwT3zzoT5lQ8EE8C00qBtme7StDZVQQXxBfEL8a96nnkFY+So0XUI3sWs+h+l/urhq/VUhIFWwgT5EviN+8LCV2aMn/vfWAszsblHMozTKya2w340Z2nXXk1KxHhE1iq571+Ky/q
 574/oitwJ1tRUxPlhdQrsNqlHqADtop8FRx8K2mosOp3wB7v7edKPIpAJYb9hEqMl+834FZsJrRSKnQGaU0I79Soa0fyLddMAyu1Xjib4sSD+R9Nk0Yo9ZG0+/kat3YadrRWNsoNNT2F77whR20q1jwvCd+uxzLSwZiilpumamWBsudD3invqvUOMdE8Wk7tlL0Qb1WC7Du/JVVBnvyWT8B16ggviC+IH617lOd8j2TrFcju9ZzqP6Xy5P2+qBQ06aAKpDGLJIOp7hpHMRPO7JrADkju7I4H3LIITOPnJpjdoyJmZ+BofrHx1496fFZz/nsanSaVolvI8wAcaAOzk0N7JS48LJlrBzgE0haj7IOLsG971ly2vCTwN46vo+dRsHzzFOZAS6QBejAXqUA1ANiIAt2wbAOt5ZZT+dVMN762lsol6nt6dSaDq2tzSZhJTVn68jKw8VGc8YZZ4xe+cpXdjU/9pmAPNVdlJ0o5CoZgXgXMZ8BeOwv5p0nhd1nUz54lhplpix0gNWxFcRn6nvlqwKjjEB87EjKX0UoEF9wVBBfEL/8Snw88QZCocpPep+Ku6yZX26TwUooUioGhAEvJUo/NWm37SzL2EGynkMF8QXxu5Oo7xIBVZ8CaagiP8vIrljT8xIHzTJyagZzYpXGe5IAKEOgneNjX9Q3cpLj8731nM+uxolvPfHxv/sM5FlmQDPrhgIE4MAasIsF77PvqeMZtCmKOuC2DRCfUJSx16gQgNJEr8mopaCVrUQH0ac+9amd5x4YA2WQa105lQv7cTFipdEpNeEiZS8xkWfYZUB8O8hTC++m1mOjSTjJRKDJgE6JRMNS03rfAXkU9gzEFOsLABdVRvaZt9X5edGy1ujE6sI7N2WXjq3pCGybgF1oT+UdX7xl6R+gHKtja0F8Qfxq3qdC5XpOTXqfehElisK45MWlqdkI10Da9j232hfZTl/fZeqQtwrPoRrZdb0hfhVHdt1OijVZwn
 Mf//jHr/X9uJFdrYtt+iO7epa1I7v6Do996lOf2nTkVM8/+xG9MJ52VpytWghzfDjRtrc6Pst9b71dH7G1BcCEiqSgJ/Y79Vw0FQeuYKjPYDV2GN8BfJ0sLcsgRXxS1HMqMnW/jWxjP/bhOxCr5gRebZ/SrFMByPV9IrFYT4XBPqjQWgrsH2gDcfHi+xAfVT5RaajwUcLSETYx4dXw1NB47dOJlQKf0VhN+eDZaFQwRNTRGVVn1VhfVEYcp2PUucG8MgDpGbRKjk2I/z0tECw5sdPw1afVwboqTiBfBUF5RPFPx1bXLNex4KggviB+NZT4rKf/zTT3qbBqUrazVaLU24ekdVErouTFOu/yr0ra9OvVyK7rDfHrMLLrNMlxEEUjGHj+sNdJhMoc46wju4Jn6w2da+tNF5CEu8F1AvvTCBhAX2hxZbzZ8Zn6bLnvrddvAdhViAfboB1gt1FpEiOe8g4qwSTwpjhThQPXYDUACzAp0aZgm5qcKDYBep1hqerAto2L7kJTrG0b0MZWYrvWBfG89yoMQJr9RbOHQhVdhvKUjqogno0mMeGjwFtufeBOfWej0fwTC03CSMpqcOw0IN5ATjq0stRkUCdQrzxUXMC3Sg7QdrzgO+EzfVZW8cpbH5QDeZWjhI/02fexzrRlmwg1tkGJ1wriGsVSUy/PgviC+L1xn+qfM23yYsngLZqJPT+jnHnRrev1nedInTWya43supdHdt1O4oKIH50w2o7syoKiBXGrkV3zu3EpHVEPPPDATqiY1pueqDmxw2x2fKY+Wz6JF3/HID4AmDjxMqAP1Ae6rUN5B5NAmwLNIgJOQSbAjkoMXE2BKjD3HUWZlcY2s0/70OHVBbIeRdporRRuyjRANu97+7Fv2w4cg+KEXtN8I4N4FhsZrFPg3TxqcxmVFcBTp7zIVALUusC8zzqzZlRWSr/t88OD+Oc///ldNBre+AzsJOJOwFplg58f1INsx6hsLFf5iT0
 oYSZBuuVy269AhYayn1FZnbvKkM+Ue+WhIsPyVHaagviC+NVV4tv1NMtS1yZJ11xzTTfVkjhrijrluUnpysvVS3Odru88R+qskV1rZNca2XV8wmz9fjtDKYNIsauwokw6sutWYsN2Rk61L2KvyGGt8DFu5NntRMWZG8S3NwJFVw4UAu2AfBtthlLPr06Vz2BNwJWaLLOJ+AzIzcux3FgGaAGubSX2vG2DWCo0aw17Cg84QJaBr9+w2PCHm4JY6j41yUsIqLsAgfdYavjhvTw1fUR513HVTQTgQXuyZaLS8J22thrwTvky5c+ixDtHQK2iojKjmVrkGuq8+QzypDKipYHX3XxGb3V+ysvv08E155TwncotlRcVIetZ5gZVbq5Pew0L4gviC+L31n1KSJhnEixAHyPJM8sLlrKkqXgVr+9OjNRZI7vWyK41suv4VoxpEvsJsdQzxrPHc2ZeI7tOE69dYACMKLJOP40beXbe8elngvg2skkgnvKeUVlbiAfvPlsHdFOGKcsiqYi0Ej+3eRBumtFHqc7g00sBuFKeE08+yr+oN5R16wd8Kf4qCT77PQUexLPhaBWg4tsm1dwfjuoerztFPjHjTaO+U90D7GA9kWhaiDcF8Gw0RmU1sNNxxx3XKWVgnqXGeauUAHmtB+DdenzzfP2pjAB8WVlk5FbnmMGgqPjKI/53FSQKu3OkwseKQ9VnJVIG7UBcBfEF8QXx66HES55hOs8DB6kd4XVomtBohIUMwDKvxItrLA4vK62AnqU8rsB+SN1axuu7EyN11siuNbJrjew6/8Siwtqc5yB1ezdGdr300kuX8/82yUsrMeCddEJMgnUDO5lSexNVJpabKMCBSH53D/ijjjqqU6K9gEAqGwyVnPIM9qnIbCTg1nI2E1572852QT0VGrgCY/Crc8LRRx/dbYfNhA8c5Ao1CXgBrhYBKrw/lVjvasr88V424F2Ts8/+cJR4nwPzKgCmrDSgHsCr
 GYJ31hlKF/UdxLPRmPoM1oG7bN7oso5Tc5LKh2MH7M4V9DtvFRvTjMYqKyswr7UBzDsfZaGCwm6TOPPO1bIMpJVKV0F8QXxB/Orcp4FvwoL1DCg3tJ5nme9N83ncVNZSqUUSgHj+eIF5CYLueSUWHh1rjaKt5VNnLn2IqGlUqGW8vosaqbNCUa5mqpFdZ0sEBK6C2Py2m8SCVzmxXR1cd3JkV/y2shBPcU8GjnKinUSJ9zkRZkBj/PIB+QCniDEuIiAFrzqnyiAcgAPYZIo0pT2jt+rkGnuNzq/gPh05bY//3voqC2wmUeK1BoB+CjZ4Z50B8zqvUt1dHBAdkNZjmepOTdcxVaVD51XQ7rtEpwHvPO+Ud9ugcvm99fMdoPcZuLPPqLxYD8Q7XudoqnID9B0D9T4x4s37XmVHNB5WIrDOKnTTm960K1PlkDjzGTEXxAfgY3+qjq0F8QXxq3GfBr49o6ynJXCcYt+OIjjJeUhefEKqCXcG4A2KQtUSw1kULsA9r2RbXrYUSxHAch4ZyGpZIG8RI3UWxK+2Gl8ju46W4tmsIoWNAuei02w2cqrvpx3ZNfHtVxLi28GeQKKczqxyFPLAfavAt0Af6AegoJzfG6SC+dhKokrHfmNKhXaBdHjlBQfzCXNJbU8HWb9NSEYKPjUevPsezFuX9QW488FrknHRqdsqFkCYJQeIU9Jtj1rlcyLR6MDqwvO4gXzWGNMW4inwwB7Ayz5Hgbeeddh/VF7Aun2AeNm5q9BYnrLxPYi33LHqsKpCokIE4Hnf2WxUaMC7comtKdeiIL4gviB+te5TQoGIWVutBxCkrV4yrU3n7LPP7nydbYQELy0Kug76H/7wh0eXXXZZ9zziP6UyTutjHUpeiDkPYoiWV2WhRYDfdBWAoZ5XBfGLul9W/Xp4hsy7Y+1mI7t6lvVHTg3AZ2RXrgruhqGRXbUk4jK/XdaoPlNFpwnEU3qpwODRvBxwBJGx2WSE13R4z
 fIWKAPOVGawzj8OboFrBjYCtOBcwYJVgG6fQD7Ra9h1/D6DILlA1HgqPI84oGc1cYFVDijh9uUzEAbGfgOObT8dRb3EwDsrDf+7TqusMzKAB+aUMGp7/PAA33fp3Oo7nynzpiA+I6+CdJ1y04oQG5GpMpGt47u0YGRAJ5UTHX1VUJSx8k1kH2XcDqBVdpqC+IL41bpPvTQoRCItbOWdlzxHpnnxUci9VMe9nCwXgYZCBbI9f6iKl1xySWc3nHXwmPb6igyhLLSOZjh2Q5hrFSiIL4gviF+f60EYYLGbdxoa2RUnbhapRz9CLY5cG7OM7LrySjyApobrdEoZB9BAmP88YJ+RWKMGm89ncKkSAEL9ju0FmHqIU5zBO8jW8RMIs5l4QSUso86eoFbtSaUgLQGORYdOdhPWEvMg3jQjyeoYqyXAvu3XNPYd2xZ3XaXCsbC/uOmAugzIqe5APSEkQbnPJ510UrdMNu+4wTrAp8I7B+DPnkONB/Gy83K+vP0qL2BetswxqMA431RgZDcgBcs0o+G6Hspc2Ud9b8u8IL4gviB+9e5TVhfRsfg9t7pP2WMmVeKTKP1erpPCOHCneBE0vNi0aga+L7/88omanLe6vioObIsS4SOjOlLb6nlVI7vudYjfayO7bpW2GtkVs1LfiZucGRiuHdkVK1Ho+4p9f2TXeY2wujQQD9ZBPHCn/gJ5dpVWoW+jygQsZduwPmUHeANrNhEKNHjXfAHggXPCNQLfNnoLyDW1T/uK+mxb1GmQ6wIC97vd7W4bsdgdn2O2nt8CfNuyb9sXRcYUSKcDbgCbBcbnhJFMHHjQTn1302S53wbik9PJ1TyAp7KrKATiM7Vv+6LUq1DE7w7Y2ZC0Emg5yEBZzjE2o3Qo7lecCuIL4gviV/M+9VJhf9lKiQ8Ae854WU+aDHbi5T3NaIUtUBh/Q9IJVyd8SVCAwHf/+k1zfVUYqPSSZ6agAhKrzyJAo0Z2rZFdF3m/rO
 rIrjqNigY4aSz2WUB+q5FdPY8EJlEO2CkAv5l3PiDPek3pz+BOKwfxrf0lYSSBc4A+UEzZFtrRPOsKlT2RUhKCMtl6gJ0ynY6ewJ3yDX59BrLAlpJNxWZ3UfiAP+o4BR3cqkg4Psdq+5R3y2UQLyoDAFbxoMRTsVlThLJktwHp9kntB8bW1SoQqJYp4uA59h+gDuBFqAHwiUiTc/IyjfXGMuengtKCunPIeZrmO8cR64wWBxDv/B1rRmZ1XrKKiKkydT3aKEFtONCCo4L4gvjVvE+p7Nu9T4egv4XlhKucRxJyUguC5Lkop7Vg1usLABI6jvjhhWyZ/cwzus5W90GN7Fojuy7qflnVkV3974We3ck0zciueHCaKDbYyzNy2pFdl06JlwE5xRe4t3Ya4A6k+bOTfQbz1qGAA8wMBGV9qk1CL7bqewvClvseMCeePIi2nvWp5jqyUqftJxFyHJtKBWB3wUAvoI9SD9SFosxIsYDZ8VgGnE1VKGRwrbJgXXYWzTAUe+AN4lv7jNxCvKnPckJL2h4bTSouWgFUCvjgqU2ap52rSoN9BuBTiVEBcRyAXnZ+icTjfBPyM51bC+IL4gvi1+c+veKKK7aEcoqbvjyTDL6UZAyNDGIz75eryDQSJT3n4dknpG+ieEybohZ60QIEVh7PX0rjLK0Kk94HNbJrjey6qPtlr43sOosiP27k1KGRXSeNJz/LyK5LCfFAEIxT2AEjII4CD9hjq/GZ94hX27woKtYD1lHvM5prwisCW8BMdQfslG05IRkD9hQYCrnlGSwJ8ILugDwlPuEwtQRQ5UE8Fd0xUa0BPZuKHK+5DJSp4AA9nVjBNpgG7zrC2g+Q1ok2SrxjTIXEb7xMsixRaxyz43e+prLzVhHJyK2JC2/bIF5LAYi3P9lx6IzBu68i4jwcjzJ2TXxWzgnDWRBfEF8Qv173qagyk7xgqWmf/exnN9bdDPpb4Pbc243razwO6r9zPPzwwzt
 lXTIAn5fuLJUFlh5N6Soxnp3UM3DvBT6PVCO7zv8+qJFd13tkVx514bl3E+SHRk7dzZFdlxriE5mGn1wG40AdHFOBqdwJ7WjeNDYbkAk2/cbUelR1UO7FAeCBLWsLQG/DMvKhC/PIg57OosDY51hUrAtwqdPgPT58U8cK5h0T0AXD1O/YZ4CvDIhBs0qBWPEg3fHEjqMiYN42dHjQkTWjtZo6HtCeDOgdaxT42GWAe8JoBt6VBXjX2mCZz4mgo4Jhn1odMrATW01aFJy3dXJdtJS0oT4L4gviC+LX4z6lNHteGjV6KygXtlEUBp5QaauRXU3FbwcWouLshKI9dH29eM1LQMb+vYA9M3nuvTintcw4dy9dTekEGC9wtgZjhGynVaFGdp3vfVAju673yK4q1NO0CO50q8hOj+y69BDPShMLTWw0QB3Ay6LBpDMp2AXNsXpYz/eBeuuAaRFYEkudGp/oNIDesqjbYrRT5wP7pqA5YR49/BOqEtSCXMcEYtmA0oIA1O0XQINkcM5Xbhq4dxxGSbQfcBy7kIpHFHHH6ftErkl0mijvySokGaxJhcA00G4K2JVBq8ZntFZQzxPvGBy3l5HWAvOWg3pWIJ2EHZcyVhlpo9QUxBfEF8Sv5306CZR78WTE1ElHdvWcIjS8733v60QNir4oNuwwwF48+Vmblae5vvbv5Wl/BqQSdQJgqJhokZhGnVMR4NX3bnHsOrrFtz/pudTIruv9P98JVbhGdl2eVhGVehWpWUZ2XQslns86dhpA24JtIN48xTs+dHAMLONFB8oBfZANSPnLAXmiuMRmwjceX7kc73l86uAY1ANlSr0XVXz0iQDDksJaEkU+YSjBsOZW8CtyDR88YAbRtimEmgyQ+aFUXkydPwsOFV3Foo2Wk1jxrQ/ecSRcpJdiplR82wbyGdSKNQjIA3g5ajzrDEB3I5lPVBq/Vwlxjgk3qdwzGFZ1bC2IL4if3326bB0aP/GJT3TP
 MM3DW0E577mBS6YBX+Eno6QBd35SAzRRxG3TMwdci+luORCmOG412ut2rq/jlw3i4loAfH2KQJB9TzMglfMJxBOAbDMK/lbqao3sWtA4jRq/V0d2NVKzvjnLkFwDLR062QbOJ41OY/218cRHiY/PnbINyhMpJdFSWFooOKwfQD0qfGK3A30wSk32IqJUg3MecjAM0ME8KNZ0nBFQATK4lynglvne7wCt/atYmIJsD2eqvGPNReOVB+QJPWkaqFYJUakA0ZY5TpWAeP61IlDhfQf6E/Ix/nrbUMFIqEzQDsyBesA8yrtl8cPL5rMeFT6ArsKhnOTsw76dVzz6LDS5Lm3IzYyqWxBfEF8Qv737dJk7NE6SQAKY3yr8ZN877zk27qUIUPjOKfSO1UjYnvuS+ShdwDi+9HleX0rnpZdeuhEfmihjXwBfWLhJY16rpIhaIYF5AkzKYhZ//jo+1wDZNDkVwVgp2EnSgdk1qUGhluv6ruv18IxwTu453nwWmf7IrpvFibee9dciOg0QTHQaarzMUsMaQ1kHvDKAp8J7mAc+W4iPUk/JAfgsIVRo4JvRTwE6qKe8J4N1qju4t27iyCckJeilUNsH1R9MA3nbplLrVBuFOmEu0/nWsapUOKccu+x7v2srLal8+F4lwLkkaoyXiP3xvfO/ZyCpQHtsNJT3AHw6s7brpmOrCoVtAnYA77MKhHO1TwDvGFVKXBstBYkVXxBfEF8QP5+0zB0adeTU2kcxnwTKqdfTeFQBscQjP01qw1aqPHim9a8vVWza7U6iuKmosF9ec801Xcx6rbJevmLiswZtlbQwSPpF6YslebEb0GovPtfc096hppNk72Egf84553QtRdKLX/zi7r+TimGOz3uSTcqx69zsGkkXXHDBxn3q3k4LzzIM+lUQvzpJC6RB6YZGdsWfmwkwvree9Vc+TnxOKraUDCYkjKFOlAnnCKD9gRMGUSGoyYDq+OR9L7PTyGAUvFLjW
 wuNaeKoazIF5GA+gzDJAB4Qg2HrWsfvqNcZvfW0007rwN9xqHQA3ozwCnrbkWUtlwFxvPw64vqNzrL5jXk5FZZ0fHUuIDvKOiXeCwTYt51YE0oygzkF0OOH98LTYVUrRQvvvtcSoPxUQlQ6XJ/k9gaMlaYgviC+IH57adk7NE6TVEYo5+NeRuOi2LAszuMF1l5fnXNVQCSwHOWeKp6wlPO4dgaHosgBQ32mJJ19U8mYxOcqao7oNyoJnsO2mVB2ewHio67Pa/0cn/CgrGGOHeSnpUi/h1SaWFuVdyoAGZ1YC7xKqWSU34RfNcJxKgD+p7GRaVVZxkGl1nFkV8z1rne9a2FliieldIjfamRXzGdkV9MDDjigW+576638iK3tYE+Bd0p2ANHnhI6UAXv87oDTQxOEJrIK2AW9FPnEXE+kmPjDwXw7OBKIz2BQ4J0CrxMpO00iuogQY5sqFbYbxdtvgL/vA7/WCeQ6F8efUWi1NMgZsAq0ywDe56yn9cG+wbiKBlC3P6AO0jMCrXNxQ6WyEVsNKI/fnarPJmN5Otz6Pqp74uP7nA6sKhGOO/Cega6cU6vCF8QXxBfEby8te4dGycvGizMgsxmUU8mBzTQquO1SQ7fyjM96fb0oAYSkEpRQlyyVUcNFzplUDd8qUdcS1tK7xDtCcgxbtVQoa9eZOufZrdLhN/HXFsRPDvHb+Z+3fTBcg/SJ0Gk5kUdUDHVoDlimZUnl0XWUWKjyX3DvJVJS24/Ef3un/NA1sut8kmNWIcxzb1y5bDayq6nPlvveetZf1lGFp1biQWLU6gBiq2D7ntoNMoE84GQFAepU5YSABNuUebBNce5DfEY0NQXApoA4seTjlZcDxVR4vnXHo6JALQHWMmVfCMi0CjhGrQiO1/rmY0cB6QF1n9OpFcBT59PqoEIA1D0YKAKBdQCf0V4tM3V+CSvpeIG58qDcy22lI6O1BuCj3qeyIswlaw+AD7SDdNep7cxaEF
 8QXxA/XzV+WTs0zpKcC3V9UiU+ZQBywOtuXV/7Cmy94Q1v6DrRSp6rghlI55577gaozdJa4Dep/Lzuda/bGGEWAAYGxtk4HD+1z8veu4Z9xLm96lWv6vz6q9qHYhUgfrstUgmhKoxpFFv3UixXKo9pEfKezz2GU1KZVPlLxdZ5p++H/1AqAO6ttPYMgeC6jezqGbhV35t5JrY5iVC71ZgQW43sauqz5b63nvWXdUThqUdszYirFPfWX55whn3gtx4FG6iKAANCATx/vAzyKfTU7IRgBMDp5ClbFh98Or7GMx+7DcgH1OA2HRISRQYg24YL7BjYX+KFB+YAd6hFIeeWCosKgvMA0/G2q0RoDYjFJy0FaUUA9IkHHxXe79MqkYgzKjmOP+trwQDw9qNs/N7+/B7EK9OAu+uRss+5+Fx2moL4gvi9cT+3CVzwHwdENoPy+I+nSZQ4xzNLh895Xl8v16in4F5HVsnzNQAp+kwqAAZ7mTaBqlgCPIOzXX0ixsGCl70Bpqj7OnYWxK/f/9y9FCinWgfW2/uCvScgiwtSGSBaZh7T+J86dyKjkKqrPrKr+5+lOa0Zu6G+E3Cn+Y9vNrKrqc+W+34ZLTRTQzwADAQCxoAu5do0/vDkfAd+4ze3jLLNopL1gGjCTVLHwXX88WAW1MdWk9jw5mXLQbI/h/VVDgAumwyYBbAgneqvgygwptxoQtMawFKTjq32Df4BveNK9BzHGg+8balwAOu0JGhpyDbt38sjsd8TPadtPUiFg1qjMgHebUNzLFXeb2yDCu94Qb3Kh46t9smfryKhHPstIQH21kZTEF8QXxC/9yA+LzO/m8TrDXI9S6MsTjKyqxecY5rF2rLT19c5ByC0GgAriXCSkJLC3qUj8KSjuXqRx2ajkpRKg+c2v7aXfdTcFqKE4OSv5fG2rr5iOkHzebPzLGJk14L4xaaouhT62FK0AGjF0S8AJ5x99tndffKwhz1s9OpXv7qbZ0N
 27/XvF0Ji1GidOM0DUh2Ls960g6Vtdd0WPdKuinsgfprQue3/eWhk19bvLy8zwM8E8a0SLyfkZDIQzrzvYvkIWIP6bMN3iXJj3dhwokyzkwDeDKrkRZNBoALxwBg4A3XwDLzj2882MwIqJRxM2y5wB/FsMQAZuINkx03pFn2G3QfcB+Itd1wZdVZnCC0AcsJOtj78DPqUDrlqp44fmKuNayGwbw91FQstEFHoHbNsPR107a+1//QtM1He206uBfEF8QXxexPiJU38osLo5LcVlCdNE0KQlQUk6CwaO8EkoLDI65uoM46bSu5l7dkLkMAU9ZwP1jFN6v2nnrJjqNgYkCoDbIF8LQX9jtDnn39+Nz3llFM68UalyXusIH7vhpjcbsd5HTBj91LuGTMBi2Q9jJQ+A1gl/1WcEgh23WxbRKc2D123cSO78pXP0sI3KXizuUgqxpVmsNOAyD7EU7+BuFjlYBPkUowBOTBORJgo9K31pgXO9jPAB88gly2GvQbQg3eWGjAuuwHTcRakg3JqOoi3HRUG6nZirlufPUWnW8eYUWdBvN86B+vaFzDXSpAWBedIuQfFztv3PKViCtuH36hgqCUbmEpWa1ZLBvK+o9xbj9ru5ZGQm1ohnIPjSNhO5ZzyCIS3intbdq19pv2uIL4gviB+b0K85Lcgc5KRXQGs567n3yTZcxMwsAF4cUtEByNdS8SKqOCUxIQaXMbrq9kcSFFDjUwL9j2jHT/1nRgDTMAOX/Rm/li/BVRGE6fC63Dp/WW7Wi5UEvodob0nUi6ugw68UjpSFsSvd9rtjvO2nXtL5930DdAC4H+rYp5MRG1BfrPr5thFfNop9do9NW5wpoL4CSE+SnosNVHiA/EANBBP7QbG6QyaUV4TojEdSIG9yoEc+A6AAlnbViGgjFOp40lnt1EbYzsB8OAcZNu2fcXKI8wkhZtKTr23LoCOjUcTVUaWdQxsMwCdzYX67rN9UM6dm+06dxUM
 Vh43OVB3LB7AerrzygN86rtKR3LUeQ/1RKixXftRRmm9SEfh5DYMZgvt/VwQXxBfEL/Y+3kRI7tulgABiGax2WxkV+AFzjNa9VbZukOglhc42I1nnY0lI8C21/fAAw/sVHFQ7LlIaQxMi6ATC8uimrTTCQ7MsD44Fs9856LzG2HGOVI+nU8bvaRtpleRouxTWD3v/Rbcs0xY3naEbkfg9X4Q2tJ9IKxiQfx6q/GLHtkVgAv52Sb3/qQQv1PP1ESq2imFf89CPPgG4S3Et0o8SKYsg2NwbZrY5n37Tew1pn3otM/sh2rNbgPCKfA6lKo5ajYC8Qm9aH+xnzgOvvJEvfEQ1owZeBfJxhREA1+f7SPx7rUEAHTKjFaHdICN9QeMU90p8FHh3XQsNJpJgbzjTIx7yj01PiDv2HRwTfz6DNSUVo9AfVvRCdC3uSC+IL4gfvH38yJGdt0qUX/ZO2InGQde84D4aa6vMICJdANcgT+QoWKzCFAI9R8C+gCGeEOlNi9ySKwvoIYKuJuwb79UTB0TgTihB4CLSOOdlOXU+H5oQqDme+8L6nyGgZdVFtIpmfLvs315ZxnAKutRbQvi91ba6ffbdiDePc0SPGkfk61SBAD7iABQaUaIb+0cbcSWwDz1mLquwyUoB/FsNEDa1GfzVPlAtvWBPnU9Fpgo8+YD4PbRWm0o4CoKfm/KhsK/7kFPabc/21ChsF8tA47JA5ClhQrvAcubHlBXMQDwAB3wUu7TmdR3AFuTMbtLwDo+/1h/VFYo8kAdpAfq+R6pYF5Csu9su4357lidV+LwJ259gDxlHhuTylP6GsQjP+STrxCTBfEF8bubFjWy66THJt7xONBdBMRPe30pcdRw56JjG4uKc2KzVAlQgaLuAxHzRBR+dZUXHUtFjVExANc7FfPbdnV05Tl2vPpHaVUAQ0Qby1VWRL3ROuJ8qPIplwsvvLCzJUlUeMetjCiwbfQSlQPb966xzSi4OwXx3
 oOT9pfwDjX17kvnaq0OmVehzPF5R7oujl0LTK6LvgOZZ/fK/MUXX7xhZ9I6kXllmHmhAjPf2p/EEM/9n5YeqW3taft1JAZ9/osF8eP/vyqck4yGvFVK5dTzpdIcIT6hIwOUUYQTRhLIJ+IMcI4n3hRsU+B9BsuBeMAdUE4FwLrpZGo7wNr6wBXEJic2vd/Ytk6mmilBMbC3n6j/tu2hyOoC3jXbsr+AecoO24x17cv5gH/HAPJ1OgX6AJ2PPvCceOyyz85HdBwVBZ5QsYa9ZJJf//rXdw8r+4s1KP535eecovC3kX1kn5PHQXwL8P3wkgXxBfEF8buTFjWy66QJBAb6VhHiJ4VocGwKdAE7wOA7T2c8Agow9J156jmQs47rBvr91tR25gH87g0tB+4PYKlVVkUD3LbDv+uMLDkHKr0yVkbW13qb9RyvCgnIB6daBVQSgD3wmvdzzf0xTZbOOOOMDXAXcjHzbcuDd7IKinP0js79wI4ar7Z3duaFcE4oQfbYzGtlDwBq6c484S7zxLN0ANWynnkiX4BdK386ehIIM48tMq+SklYtEfUyr2Nz5ll4s77tZ97/JvOuV+Zd28xjkxwP9sjy2Lf6zwP3Rs6FC0B0Gv8tlduspxUn62jFSpkYzyDzGdMGyCdbtlt2Gs/FjKI8blyGStuE+ER+iaUjkAkuKeBgNio4uOaBB+bxxQfiwT6YBdxsLX5jfWALnC0H+PGtR9X3274P3DFYzhLjj8kz73d+L6tg+BMBd820HiBAm0WG1cWf3DEA6ijgINpv/IkTy91xOM+cdwaIClD78/uTURlEhKCiuCGFEfNysB/Hk/CQbcdUv09rhH1YJ3al9CFIZ+J2n0ODOxXEF8QXxC8mLWpk13lB2jpA/LQJyKvUgIYAseOi4jtGdhbqPtXXPDAC19RjAo0KG8gCz9PCfq7vO9/5zo1y8Y4C/IQgfbMkqj1V23sl6yU29stf/vIO2lNJc4xnnnnmtc
 qZMl3/8517hw0p9+6pqPvOL/Oiq7QtA5lXwcs8W1buJRXPthMqtbv/PNB6k+NwrzgO+zzttNOuVTlMRUhrYCo/BMbMn3XWWZ2wgFvavNV1c+8LhTlryj7YZoZsYpW2CfEtAILDQOwQxIPN2GtAeQvv8cZnHnQDZMAeb7rPVIn43j3EwK8HZyLGiK3uN9R34G3/6RBr25R932VwJAq7WrQapRvWw9EDUVO2TqgAm0Ju/84R9AJkKr/fOhfQHGCO5cV+A+I+U/yds2O3X8ftOLVGpIxUXlRwVFpsW1kF5M0HvtPBVUXBdtOHIOCe8t+sk2t54gviC+IXo8YvuoPaVsmLnoCR2NJ7GeInTa4nCHLNVMzAO0UTwABn7xA2Gpm9k9ruO1AOvCjnKm2uPWgy3W5HaFGBvMPYtuzDe0eLb3u/OF7vPkkl5R3veEf9z1c0zbvj/CKvWwQPz8oKF7kAiO/baVolHsgDT0DdDqqUgZ144M1TtsG7zF4CzkG85ikKOHsMkPdw1FSmWQkYs8bwp2uuYnWh0ANi+7dt26L+g2lquu2CeA9WIZQoLCLFqBjImud0irW/ALVz7HcoDTS3nXtTkUkZOH+tD85TTgx8MK5cHCeAt1wZxToTVT9wbtspx2Tbtl76I/Q98f1OrgXxBfEF8cv7Ml62+7Qgfn4JsIN0lSQ+b5/ZINhCAD/QZ+OkuOrAK0jDLFDGrmVwH+kVr3hFF7TBNo877riN9VQkvOd4xg3sRbmXrEN5lebVGbHug51/bsyz4/y8rtssncoN2lmq+y5BfHsjBFoDkaASPANVuYXOQHw6tYLaAHsGW8p3oD4QL9wiANcB1DQjrgJwIA/o+bZ4yDz8qN1+D54Td90x2gfIp96DeMvtj59NxBkVBBUBFhwQr2LQ2lDSoVdOZ9Z2sKqAt/MPkFuntcrE5w7IHR+AD5ArL+vHQpMOrQF7rQyxELVWm/jjo9QXxBfEF8QXxM+SKLkUeT7egvj
 FqPuUVZ04wTx7jig3ypWHmaDF+sk6433G+5zyU0Fgx/BbLQSuocqCMmVPyHpGpz388MO78tb6bDvsO1pjbM/v2EBZLyR9ASYdvr7+57t/v8yz4/w8rpuWnUmVdJGo3vzmN3fzWogqLQDi+yOugsl0VJVZPhJqErTGOgPc+c5jnQH0+S4dXoG476jw4JzqLmQRwNYBBMiDdxBvmYedF5DvWHAArkoAT3187cDXcVDAfR/VPJ8Dwwl/Gb96fp9zbSsv+V22FXhPa4RpQm4m4k4qOSmfxMdPRSB2m6j1Cc2pXIC8c0hH3+RsI2EpC+IL4gviC+JLiV+dNE1HaH54kJ/yA98sOxR1vxe1hehlG0bTbUfW1HkR6FtHR1ogyOZjfT5671nvTSE/qfnHH398d3xUe7av+p+v3v0yScf53bpuIgFJOsrqC1RpSSGewhy7SB/iQTufejqyUsXBaQvxFALrgHgPEj20NQMCd14/thrw3od4ijxV3f4p7popW9tLOoUGxhPeMgAc/7vcAnEL8UNhHtMfILabROgJxCuDlE2UdMeYkJIZMCt9CQL4PPNpNWj7FTjm+PRtx/6i4I+LGV8QXxBfEF8Qv1USfUIrp2fPpCO2WlfEET5rnUOptzreae6nGLKPWKaZve3wWfD2/9NOdYTWCTfruQ6CK7D3WO69qQJAEfWutFxfMR0TgeHjHve47t2loyXxTas1v713rN871nlE66n7YPH3y3avG2tYBiYbV+mQ2Jrbz5UWCPHtAESZxguf8JJRikEnUE94SWCq82jCSgZOY6sB+NQA6gCA18Qn/BQbDaWArQbMg/gM7pQOr14qKgfsMR42gfAMHpWOqI7LftxU1O549uNb74exTKz6eNED8VHcA/cB9PjWE3YzFZqo74H6VICUWSo7jifZwzMAr+xYjpQRiE+EnFQE0pG17XCc+cB8rh+YL4gviC+IX/x9v50Oal7golxI/NVC8wHxNs+qxk+TwaDA
 AGKxi8YhJjrQoxSKEiZqhiguLJLsG2Aw5+Hl7xnOFgBGRA/j2XZuoBN8irJBvXMfUJL5vS0Dkfa3DlCwyI7QtgsM3Yuuh+NwLan0ylklzTvIvrWS3/jGN+7Uf+8ufcz699w0911B/OLvl+0+n0Xi07ozlETDUSmU2qg9lZYE4tvBmNqOrQH4DPokJ8wkoE/HU8p8ItS0HVv51XUwBesUeC8FHSDAOnCnugN88WFBvu9APZAXjcZyn8FyorhEtXZ8Hkh88h5APPD88gcffHC3LCO3OpeEfeyDewvr2WbgHKgH1ttoMqnoBPoTzSaVCuUQcI/6rqwC/21FyL5tK51qk2PNcdy5VvaT7wPwlhXEF8QXxC/+vt9OBzWdGQNM4JeIIcxgMlV9VpDfSRVRbqOmiNUO7EHH29/+9s7OoWLA/gEWeWgNoKeyw+/tOU/tV4HRZwp4+o3W23e/+93dcq24ifuuI6kyVqlgDeEh5yWnQqtc2K9KhGUZBMo0x7pM98uin2vUX5Ye++Cz33fffTvBTBkn+zzpfVcQv/j7Zd7PZ/8Z9izpta997Y4NqFZpzhDfjxMPXsHppBDPQiODaBBPiReBhpJDgU+O+h6bDZU+EA/4fS/SDLgHv4H4dtAosEwVAvG89MDfvBeCFwFvfWLR5/cB7tb6khyvu9x61dO5t2/BiaKvjFR0osKD9Ixoaz4jzaYypFXDeoH4tkNrYta3o8gG4NMZNxBfSnxBfEH87qcMcgMmKZ0+U6wpnSDSAC3sgjqlGdXZM8y8kaL5kzfroDbJKIvrfn0pfeAbNLCBUOlBp5YAlQQVA35yEK9ioDyp/kDfe8Sx8O0a+EklwXqEHRUDaqJW4EsuuaRbx7tHi4BKgOsj+ovrClqon1oSVE7sRyWBrcHUsWUgoFWE+KH77thjj+3OOxnIF8TvDYinwhvMq71u9seHX2kFID6DDSXUYiA3QAtwW198/O9874lOY3mgXodUGcTz6cV2A
 +hZZNhfKCyUGNDO12dKeQfw4N6URx7Iqxykw2ki4QBj29TjnwIvdi7FCvxH5ecHBMxRzFlRhgZ3av3sgep46Nt+Am1kmTaWvIoE65DjCbCrACgn5eJzKgmpFKmY+JzziqXJfHuNEjHIbwL7ZacpiC+I35kUMANyBiuRqL/GopAAHbiRfO8lp0kcCIoEAhKF/5Nn6aBWEL9zyXWSwXniu4N519p1F90H2GtVEeIR0IN70WRcX8q+94pr6zvvM/eAioZ52+Irto4Bmazvt9mObdq+yobxTUSwSfkZYErFxbFF8SyIr7RbEO96u3fti7AqqcBWWnKIb+0lUeRbO006t0ZBBvHA1HxAnhqeQZ8SXpIyD+Yp8iAezFNCKCDUcgAP2gPwVPrEjwfgvjcF9TLwB7+Oj//ePtJ51nZ11hH9BvyrGEThF0Pevh13Rj9NPPZAfM63hfi+Tz7wnc6tfuuz89PkqJLinC0LjLfbb1s0zGcUV9uOxz2Ku8/pg2D7zjNlnz4B7TUsiC+IL4jfOmkaBk8SSBPKTzJapwF2AuXHHHNMN//hD394Q4UCXpTe3GtD255XB7WC+NVMKgEgXE5fAGCu0ue+o+QDJXYh95NwkGLOp/xAP9jXaq2SoOJICHMvsAu1ceK17HzsYx/rKglaIUztQ+uEa2BfKgiOQauGSobjco+O8zMXxO9tiNfHRauVfWmxqrQiEN/GRm9Btg2hmPCKAXhQSW0GmVRoIJ0Qiubz2RRwg3igTa3m9eSPB9mJEw/iwb3vgHcL8dR1WbOo4wjER423Tw86EK9yAORtD8iz7HgI8eT7TVT4NiZ+C+4tvMdmY+o3CRUJplVsrKcMdKrVCgC2HU/botGq+Yn2ozzjgc8xqFhQ1FPJsL5ydo7KTMXIZ+W9ChA/zxHoCuIL4idJlEsqksR6kQFzqOIGgpPYXvTJkaybdaiwgCgwvh2Vdx4d1DRre/6Bt2TPwIL49YKyWZ+TiUoD1N3r6Q
 8A4vUT0IEY1ANy8O7edj+pJGhNYDNVSfB/IEBZrtLhPX3ggQd2v0suT/zq3y+TZkKp+6Wu24pBfGwk/U6diRMPXKnPgLWF+HTOpMADWJFkWF5MqdKsJRTwdGwFo+YzGFNrpQHy1AcgrxkSvLPBgPyMwppwaZRr+8wIqo5NB1r2HF548eipFNR7L2z+Uy9ASn/CMwau00E1lRTnmwoM4I5SnnjvlpmCaa0M4NoxBfBNKemmtpnvHaPfgPZ0RgXztsEKpMyUJStORrhNS0M7uqvt2kbb2XUZIX6eI9AVxBfEJ8KGRNnUSVLijSYGSDzP+tNIgEaYxKijlM9lV9SotewVkrjfWgMoo23ey5W0dYSyRY/UqcKqEkChZysC+EJTznrf1X2wnPfLpNm27aOu24pCfFTnfkfPRGUJzMcTn46ZCTWZkVUD1z6DUko1r6Bavk6mVADATTWnyFPNAbfIMlmug2oGhuKLT3hKYAuwHYd52zdvv7ZrP7YD+K0P4r0MTW0no6+mA2k6uCZspHOMP965t375gL9lAF0lJhUd2e9l30Vxt93EpFeuygSgO2fqoM5vykALhEqMzrjKKOE6MyBUwmbm+jiGZY0TP+8R6Ari1xviqdd85BKQOOmkk7p50UVUyiUKVAA9kUiyfuwxy5biu5bYH3itJRFbtA7271PqqfPci5W0vZiWYaTOug/W736ZNtuGbdmmbdtHOu1XWiOIjxoMUEFlRhyNnSb+dzlx0TMYFFU+UWJkMM9+Alyp5lHcwXeU9AA+8KbMU9uAOAhOpYIab9uUbvui/Ns+W02i3agcmAIAFQLwmzjrLcRnICfTQHtgOeWTeO2JBW/9/qBOabGIdcbntHCY1wqhlUEIuZe85CVdJAtAb5n+AAmP6Xxin1HOsSillcDxUPMD8sv00Jz3CHQF8auXKHtJXhC5HirmrhFPrhYnyb2i30qg/LzzzttQ32eN/LGbScsAa4MkhKKRT6Osi7cu+Q9
 kHcrnut2nBW+Lf07WfVD3yyzZNmzLNm3bPmoApxWF+NZO04ZZDDS3ijxgDeQD1tZaw0ICQqnkIJsS74XN8iKbB6qAFdADW0BPhQfygft0fqXKmwJylQEg75gBbiAe6AbqwbzKQBR921MJsA2tBs4PACc6TdvpFGgnbKR9WA/0J1Z8OqmCfftXFvZtu1T6VBD8zvr6Adi3VgGDtxjSXFM5gBd2jpVI5B3HympEpTdCrXnl51yVZ2LvJ+SlCoJ92c+yPTR3asTCgvjlSB70SZScJJXRJPdukhanXA9Dx+c+XZV4w4438M2rHljP+bIg+F9LbDxU983SOt6nBW+Lf07WfVD3yyzZNmzLNm3bPpZpPIWC+DlCfOC9tdMEoNOJNfHiASdYB9TANIMvZRnbCPsIKAfuYJvy3maqHZVepsiDeKDPWw+y7d/2AHSsPfGTqxjw1+sMm+0DZZUIAN7GXA+wKwPnmE6v6QAbVX6ov0BUfBWYlI2KhM5AfP/Ufy0IQEamzFHqRCVg8xHPWHaMsQSp2PDIa8UAQwF6ZZvKBohXWVhGJT6qwW6PWFgQv/3E0pF05plnbsz7/yT5/ybpb5IUS8wqvtzdqxnQhF3HYEQSn71+OxL/vRELJfeuPEvajQ5qi8rOybkVvO3+c7Igvu6X7WTbtO1S4VcY4vugmtyO2BrFO3HhwTulOB1b286tYBu8A3ZQbb610wBs1hequxclZStRZQLuifsus8VYVydZcO1YAK5KQ0ZEBRj2A6LtgyJvH9R4+7N/v3PelOzEi0/YyQzmZHnUdNn3gXlloXITjzx4VxbO2faBu3B1gP3UU08dHXfccV0vf+fENgDmKfAsPpZb3zGCeJUUZRVrUuLq27ayzsixjnNZ7TTTwEyFmNy9pMk06WUve9nGvApkkv9NUqwgkjJd5Zd7QupRmM4666xunupESJBE+fBfzLr8oVl/J+77ne6gtqhcHeMW95wsiK9UaY9BfEIatip8
 YD6WDTkDF4HV+N1b5T1Wj8B7IsewhciUcXAARqM0+wxYwTWVLyElo74n1juIz2cQbz0v3kSIAbkqC/bpWFUYADGYNg/4qfMAnrpPjVfhiO0l8J5wmdR8lhrnmhFi2xFT/UZlwbk6Hx3UADubgCZ16qXpCSec0ME777uQXzJLTSoklHg2Gn0BZMofhZ6yaXAr56vVQDk7NsfiGrg2KiFyRmt1TAXxexviqXRJ6RgqHXLIIRvz/mtJie4iiZG+Li93YfOS/I+S4r+nNJ122mkb89T13U471UFtUbk6xhXEF8RXqrQgiAemAfe2Eye1t426EjtNBnJqB3RKZJrYPyy3DMADdso4dRlQA1+ADeITF17oRyAf9Z1KbR6wA++EjgT5On9SDhMlB7zbnsqE41OJYNuxPxBvf44DvKsUAGeQb13nTk03n4pJOuk6ftsKPPvO8asMsL/EIgPgATpwN5qjbJ6KKZqA6SmnnNIN5W3fOtw6Nyp8wmCCDRAP4B2j8nDMifbjOFQwHIcKR67Fso7YWhC//UQBjgps0I1AkWhGucbukfjKE2ZRMthLUqKjrMPLvW3mPeecczbmo6ZLnhFJ6SC7bGmnOqgtKlfHuIL4gvhKlRYA8RkhNBCfqCw+Jx66DGSjyvOfA+d0YI0aDzYD8YHg2GnAL2iXgTX4MA+IZd5xU4BLeadMgxLADuCtb1vgPR53+wW2YN4+7Jc/37E6LhUE+8i+Kfy2DapBtGOOXYb67vcZrKm15KRzLlA48cQTR2eccUanPAElwE49N4w2cNdZ9aUvfWm3DrBnWTClylPewTpIBxqprKgQUOx9R6HXauE4gLpzpMYrM5UI50Z1T+faZY0TXxC/ecqoiZIIJrF6PO95z+v82JKKaeb1nRC1RXrb2962AUirNBz2NPdp2zLgv5OkMpvUevENTrJqFbSd6KC2qFwd4wriC+IrVVoAxPeVeNOM3NqH+NhpAu9y5qPIB+YTJ95LV2SaD
 Pgka9YGqaAcnCa2u3ng7DMVHUBT36PQB/CBb3zzts3uAnbBtuwcHCd4T3QalQRhKv2OJ13lAJhn0CRZZQDMO2ZKvcoC0Ka2U8mBNkAH5ubTKdV3fO+AXrQZ9hoWGqEjLTNsNasNwLdufPJUeOfiPBLb3rEqByCfypHyzSBSGYAqoTBjCVplO80iRnbdycRbHeA+//zzNyK5APTPfOYz3bxrHRsH60vCKF500UUbcL9uSubQy90Q3zlflfacc6usC6uXtCrRbCZJO9lBbVG5OsYt33OyOjhXqrRHlPh+ZJpxEN8q8VTrfA7Mp7OrDELT4dWUqgye2UQAffzxplHo458H4WCetSSqtQ6gXvYBcrYZdhi2Gvuy7YyUalux6VDzgQEw91vbVcnQ6qDS4nz9XqXAcVgP6ANsEE9FlwGXDODZYUD8c57znA7KKfG+E0aSjQbEg/fTTz+9A3jL/I6CKKuUODZAp3yUg9YDx+oY0kFYubajyMarT6lPKMtVhvhFjOzaJtdt0qwCFuA2YM+nP/3pbl7lTAg4yXUV0UTiOwf1kmPaSy80L/BUZl7zmtdsXA9DugvBqCz8p1ImrZe9UqVK83lOVgfnSpXWFOLTKVKOPSM5EJ8Qk6A4YSZZWJITXjKDPqVjaEYXTRjKLA/Ug20ADthlwE51ZyGgxlPOqeFgHnCn4ysoD/gDbaBuP1HSbVcn0IxwGjVfhUFnVMsdg2lGaU0HVZUDSj9oz+BLjgPMs7/4bB0VCh1sAXl87Cw1RtRjszHUOwWeD54VAOjzvbMI2U6UdtmxpGxVKpyXioN9sfSo9FhHOUeJdx6AXitC25qyihC/qJFdk2zTvWE6SXa/xN7Bk5344ewfe+1lFbvEZZddtjHKqH4fV1xxxYayntaGdpCa97znPdXMXqnSDjwnq4NzpUp7COLbWnMsNG1u1V+gmTCTADjQnimwDLxT5mOr6VtrQDZoStQY8CwDdoo0gAeyMruNdU2p6E
 JLmvpMMU/nWJAPcEG8/Zg69nRaVRmx3HHJjglA259tAnXbdQyOy3bTIVeISkDPq07BTxhMVhiKa2LZA3Q2GnYaYep0etVqoFLAvuP35kG/qW3Zn3NJq4RycCzWBUDKK6PiKlvlnqg5rotzC8CvqhK/qJFdW4jPID2TpGnXX/XEu5+WB9aXeNVVdC+55JJuXmU1NqFLL730WiO1XuuhVF7ZSpV29DlZHZwrVdqjEN+PEx8lPp9jpZGjvgPMVnkPJGd0UYq7aRvvXKYuU9MD7OYT0z2Ra0C6TK23TsJQUsUD2xndFcRTsjPgUiAe2FKrnUvbCZcn335YVyj7wNm+48+Ppce8dUA3NR20U+FloM2nD+Kp5umgCuYTQlKmwIN6U+p94sODdC0Ejj2DUiVKT1oAHDNwV96xLylf56fSouK16hC/qJFdC+Kvna688sqNQZ60MLz3ve/t5nW2fv/739/NG+QoynqU96keSgXxlSrt6HOyOjhXqrSHID6DBMlU3ajxoBck9u00gfeAewvt8cLHDw/UE2oy31HAY6/xPUgH1NRo0AzW44u3DtUe6FPLA84gmAc9HU3BMNuJY0tce8eanONU4XAc1O1UBFo7j4oDsFeZUDEA0YDa9gE4ZZ2iHnB3TO13phT6dKQF/rzx7AVCUDpe4SUp/raffQB7EW946W3HsUWBN4iUyolzsywdg5VNogWtup0mKtNuj+y6lyDeSzRRbD70oQ9tKOjORRN45rNc2cXHP9eHUkF8pUo7+pysDs6VKu1RiI8SD96p2AARQAbiMxBS7B2m8b23KnxGTY2FJpAfFRyQ2lagFLCz1VDCA7cZqAkMs51khFaQH6Xe72zDdh1PBqOKIp8WA5UFsK5CAdaj/lO/eeqBN7BWYWjPzbGrZFDIwbpsnjof4LeNjK7qO373xI8H7OJ5A3gdW/nmVQKAu21R9Z0b2NdBliqvDGyPDYeib1vpzCun1SAVCOWj4gXg1x2Odir
 E5DpB/Oc+97kNuwsl/a1vfWs3r3Nt4sZfcMEFo4985CPd/NVXX72rftWC+EqVKlWqVGlOEB/460N84sH3IT6+d3AM6FuIT7aO7zMYVGu1iT2khfh0agWwUaljq0lHUoALsuOxz4BMGYhKzoimUeITLhLAB9xtV05oSzAMkoEz64/f/Z/2zl5ViqaLwkamegnegHgHgrk3YGZkLJgIBgoGgrkYqoGCkWAkHETUwMBMNBAUDQTFzD8wm4+n+dawbbvn7+3j6Z55FtSZmT7d1dVdu3atvWtXFddnV1ruSXmIeYegx7jA858NrPjkHnjw8aJD1AlBIISGkQI88ZA+vO3EzOPF55544DEgIPFZG573AEnn+lxLPhgEePopL2UlD65hdIKRE0JpJPHbT+IZjQCsiPPs2bPm+9OnT5uVjwDrxzOxDCQuFYxlOFsSL4QQQgxE4qsnntANiDyEPZ74Gk4TUhyiXsNpEm/O90zAzO/sqpoVauLprhtDETeP1xvvOuE1CSeBSJMfZWMt+OxUmqUwMTiytn2+8wzck3JCwskPTznEGM82Hm0MBAwIvO116cwseQlZhoxDniHZ8cTjHYeAQ57xvGdH1RgEdRMnwmcg4Hjg8YSyVvy9e/ea7d4JCYLEkx/lyURZltuD7HMdv8mL95Jy8vwQ9sOHDzfvhJQlQvmfJH76JJ5Jodm4iGUqWZoRMBcAmQJMHn38+HHznWedyoZPknghhBBiIBLfN7E1BB5yW2Pi6wo0fNbEMcgm5D2e+HjLQ+Tr0pM1BCdx8pyTuPasjsO9IauJ168kvnrisxlSjmNIQOKzdCUedSbXco/E93NvzsHDjncbTzohKpBxPhO+gvcb0k74C5747AyL8YIhAtGOJx1ij4eeSarx1nNvvPAQdEJsiKvHu8+qHoTbsGQhpJ5ruJ6Etx+jgvfEfbJST0KQKDvPsQ0x8btE4rNZ0c+fP2d7e3vNd5ZkxHADrJWOXAAmmkLYwTYs
 0yaJF0IIIfaJxGet+ITShMRDjCHJ8aK3CXzIOSQZ8g6BhnhyPCS+ht/UteMzSTPknHJA2vEsM9mW33xmwm3bE1/LmpGDTAaFXGcn2EyW5X95FsoKKYesQ6IJ34GoE6+eVW+4HjIOQSeGnlECyk8eXB9yDYnPuXjzCdvBS0/i3njWM7kVrz2EnQ2iMukVjz/GQp3wS77MI8BQyCo2WYozoxWQ910h8UPu7LrfJP79+/fNJ8szssMvYAUY6j3f79y503z/9u3bfNOorVZKknghhBBieBJfV6epE1s5BpHORk+Q17o6DSQzk1j5DknPjqkQaY5liUnO4f9cH9KdEBjCZdiwqSbCRjjOZ105h+8xODIRNxtPZTMnSC6x6hzDSKAckHM2aAphhwxnV9i63GPi5iHUPGN2rsUYyco7eNIJz2FdeMJhmJx69erV2c2bNxtyTsw6hB7SjReeMBo2hKJMEHSIPWE3fJI/hgRl4xxi4VnZhrwJA8IAYGSAuHsMBOYOUG7KyLvblYmtQ+7sOgSJJ9b80aNHzXeWUyM0CmBsMJoTgp5zdn25NUm8EEII8R9JfJdHE+93iHyWmIQcxxOe8BoIcQ2raa8Pz++E3uR/8b5zjOsxBLgHJDwEHqK+iMQn3Cfe+vymXInXT8pqOFlXnfh7yDnx7JBtCDjhMXjOIcV46iHbSZB4SDKhMBDuTKiFgGclGjznkG4moBLLDoHnO/lD5jAQIN2cC9nP5FSMC+6BN5488O4TOsOIADHw5MlxDAPi8Fmykjw4J2XlGGXHY8+IReY2UKfU7TaSo6F3dq2knDqGmK+SssRnykTd55ADZ6QAAAoGSURBVPvr16/VPJJ4IYQQYv9IfJdHs5L4ulY8x/hfPNHxwmc5RjzdWT8+ZJ3feN3xWscTDrHHKIB4c4+EzPQR+D4SH8895eI3BkEMipD3lIEyE84C4c0yjXhIIdd4wCHBEHUIO8Q6m0dlDXfIczz0fBIzD7lmQ
 iqEHC85Me543lnnnfXrIdrky33JD7JOiE42lMIYYBSA6/Gw84lxwUgBZeI88kg8PaE0PBPvD8875SQ//s//sjINiTqlbreRHA29s2ubyK+TskqMkMQLIYQQ/4zEr+rRNE0jUXfUIXVJnVK32zAJso2hd3YVknghhBBiUiR+VY+maRqJusua4NQpdbutu+QNubOrkMQLIYQQkyLxq3o0TdNI1B11SF1Sp7s+eXLdJSaFJF4IIYSYBIkHq3g0TdNK1CV1uq1eeEm8JF4IIYTYeRIvhCReSOKFEEIISbwQknghiRdCCCEk8UJsRuI33dnV9G/SNu9nIIQQQkjihdiQxG+6s6vp36Rt3s9ACCGEkMQLsQHcB8H9DIQQQghJvBATg/sguJ+BEEIIIYkXYmJwHwT3MxBCCCEk8UJMEO6D4H4GQgghhCReCCGEEEIIIYkXQgghhBBCSOKFEEIIIYSQxAshhBBCCCEk8UIIIYQQQghJvBBCCCGEEJJ4MWG8e/du9ubNm527txBCCCGEJF5MFpcuXZqdP39+5fPZPfPQoUMHcm+x3bh//36zJvwYcPfu3WaDqSFAPg8ePLCCRw4dGuoH9cNqePv27ezChQuzX79+LZXnId/V2PH8+fPZp0+f/jrOsYcPH/51nL1Pbt26Nfv9+/dcxtv48OFDczyJDRAl8WKOa9euNWR6mfLMRjtsuvPixYt/dm+xG0C+MA7pHA4ayDhloRMawmilQxvK8BU6NNQP6oeDxNevX5vyHj16tPm+SJ7ru9oF8KyXL1/+6/j169c7/3fs2LG5XEfG25sa5tqcS8JYkMSPEFhrqSQSW8xjoZ08eXLeaF69evXHuSdOnGg+T58+PReivb295jtWMr+x3LiO6/lNfrH8QqTJDyGpwoUFiNClPBz7/Pnz7Pjx438JGInvgHLX5yB9//69l8TjXUnZ+OQ332O5ci2/URg3btyY54kFCziGVyDv6eXLlwrTxBCZ6fJiHB
 RhoHMdwmgdkuwJHRrqB/XDQYJ+l356FXmu72pqgG+sK29wpPCgispb4m0/c+bM/BhcKzLeJvFcW983PIckiR+pkqJyEBxIaRoAgoGldu7cubkiC4mHsPO/CEJINYCAU/k5l9+cG8JcGxvXVkXCORzD4gsxJp/qNQjBpwwQ7twjz4KQYkhQ/i7rNPemTDx3LHyeIUIPeBcxYGK15pkh+DmXMvCO+rwD4t8DA7IagZHpKEfqjvpuG37UdwzNNmKsVQM1v6n/auTGe4H8VYMTuWzfEzlrd9LVaKXzqufHWI4BXJ+zr5NOG06bolwY4rUT4J2kPUYnVA8Y5azPqkNDh8ZUHRrqh2nph9ou009H1nN/3k8Xie+T42Xtft22tajNdb23dt3BW8IvkniOvmsh5fXcPhJPPabNRkbDr5aR+LyLvDfKKIkfGSIgtQIj0PVYCHH+N6+8/wsCio3vUVwICUJVFUc6BvKoHVcXiU/eaUxV4bSHdULk214T7r+IxLffAeen8UeAeZ58RzlF2dHYON5WjGIciPKlM6XOMrwa2YpsRGY4J7LY5W1IR0EHh/LkXK7ne2Q+HpTIZ45D7kIaKjHgejrcnFM7nmq0Rv7IoxqZtBXKnTzaQ/5pM5WghBCmPJQ5zxbCSRlpz+RPJ1PJKveiDPuFK1euNEmHhg4N9cPu6Ye+9s874R2kbLVvr3yli8R3yfEq7X7dtrWozXW9t9Q/3xPrn2fJsb5royv4Xuuli8TneUPgY3ivSuJrasfFp74OHaTilsR/+IMs10ZRj/WR+Eq66zBNFGFVeBEWzl9G4nNuH4mvCimNoea/ComPUYFyIKURVM9XVfhRwnwi8H357yrG1I5DMKhX5KJPNtqGX9oDxIQOEMXa9si1hzrjEQmxq51EPZ/7dd0zZQ2RoOOp8s79u7xOtR3UZ+vytAE6AZ4phidtM7JPSt4cj1eNc7v0wTbKkA6NaTg0hpAR9cM49cOiuq0ytg6J75L
 jZe1+k7bVd7zvvcVL3kaM/lqf7WvJu8rRonCanEf52qMJq5D4GHKL6ksSf4BIZwIBRzHhSfjx48cfFmWENzOg+zqa/C+CQl6xZMknJL82tjQghCTf+axKiPTx48f5fWORV29YlPE6JL56vqKQo8zqEFw6Y56D/8X7IIkfL4lvD2GmA6zD5V2KN/L05cuX+fB5DFHOjTcDmYxchMxwv9o+lhGDdrtJW+zqpLs6rLanpI2u4fLqiapDyNXjFo8vv/G8dBHZbSbxOjTG7dAYSkbUD+PTD8tIfMrGM6e/XhZO0yXHy9r9Jm2r73jfe+N5ukZ1qh7pu3YTEt81+tClX5ZdK4kfIdpxWBGWeiyTItqehCpwEfy6/FM7Xi8WXfUARfElfq+rI4TEHzly5K+htS5vyqrep8R5RRnn2gw/VqFuK0SeURI/XhKPvNYh/8gzhlnif6u3keMox9opV8Roy9BlwiSq8m4bucm7T9lzPkYhco9HpnY8q3TSMa77Vs5oD5fzDvI+0uFmGLy2Xzp0npNOK6EBu0DidWhMw6ExhIyoH8apH9Yh8ZQr3uVlJL4tx8vaPc+3btta1Oa63lvC5ChT7hs9wrnIW+SpfW3aa/TJKuE0y0g890+b4LgkfoLoGg4bagh9WT59/190XYj70MjQ2ybPIYkfRzuOIVZjWNOp1YlA1GcMuHRYXWtCR1HW2MD2xKIo/Dp8uWjYtU44457peOhcSTFaa2dQvU51Ih2dS3vt5JpHJl/mWZHvdF61k6/eyeoB2gUSr0NjGg6NIWRE/TBO/bBqOE2dmFwngbZJPM9Y67uv/+5q9+u2rUVtruu91ZGtOvE4Bn/y6ru2La9dnKUvzK2WtWtCLnKwSoicJF6MDrGOx7K5hyR+f43Hdc9tn7OJURel2Y7NXAdR7HRmEDCU7TKFm7LmM0PMYzJWxyJDOjTG69AYUkbUD+PSD/vV/hfJ8Srtfr/b/iayNaY+XxIvRgOGkTN7W0yf
 xI8R7WH0TRCPG96+eG6yxOEqiJcty9EpQ2IqDo1tl5Fd1g/7Ubc65iTxQgg718GQCXBd63avAzx1dMzkhZdpneHsLCmoDImpOTS2XUZ2WT/sR93qmJPECyHsXIUyJJQRYd0KSbwQKmAhlCGhjAjrVhIvhFABC2VIKCPCuhWSeCGEClgoQ0IZEdatWEjiT506NT9gMpmml2jDtmPTf0lnz55tku/CpJ6x/ZvGnS5evDh78uTJ7BB/rDiTSQVsMplM6hmTafzp9u3bjUf+f9RDHhmJmrjxAAAAAElFTkSuQmCC"></image></g><path fill="#000000" fill-opacity="0.0" d="m263.36478 389.604l115.11282 0l0 116.13983l-115.11282 0z" fill-rule="nonzero"></path><g transform="matrix(1.0205472568014475 0.0 0.0 1.0205268625811956 263.36476587926506 277.1458259483198)"><clipPath id="g18f7cf5d33_0_17.2"><path d="m1.9610188E-6 110.1962l112.79521 0l0 113.8038l-112.79521 0z" clip-rule="nonzero"></path></clipPath><image clip-path="url(#g18f7cf5d33_0_17.2)" fill="#000" width="224.0" height="224.0" x="0.0" y="0.0" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOAAAADgCAYAAAAaLWrhAACAAElEQVR42uy851OjeZquObGxEWfPl93ZM3vmnJ05PTM7bau6uruqy3T5ysqs9BZIA0niIZPEe+9BeIQwwoMknJCEhBBGCJDw3
 iTpvfeusrKqe3pObJy49iFnv9T8AedTd8QbRYOQ3vf3e+77vm7xKv/qr/7qr/4XOf7DX46/HH85/qce/+tf/f//25ADOf77X46/HH85/qcdm5r7PzcFeDsnoHRZG9OOpbKX29dvMFrQS8qXUQR8GEH2jgxsin5un7/JjV4Hzvh6BkKqsZ6sRXusFGtMI47TDZgPZmPxUTB4soahk2pcJ4oYCS5Hf7KSCZWBsbhGBkMa6fStxOxXgitFw2RyD7MZWpyx9UxlG1mqHWQyt5UOrwoGs4e4efY2T769y7PhaYaC1bS7FaNzz6Z7fwaONAv3xlaZLdEw5FmM1SML86FkRg8X4jhRKa9dxWhkJa7AGlzhNUz45jJ5NAunZx72AwpG/VWMnG5mNLGF9Qwd1uBKCvamMpTRzvmSXhaTNFQHltFb3EbJ4SS6Amq4Uj+ELayS8q8jmYzrZKZ8EENwAq27M+iPb6EjWsNixzqrhlUSdqUT9Ids2uI78PzQg89/vp/SU7VU7k5iUa6zIqCBwfplXqydYaLNzkidFV1wPT47CzmwNQuPT7M5/GEwGe4KZvL76Y1toz2oGu2pFuZts7zcuImjdpR0zxZyfGpI3KNAnWbi6e1nvHr4mD8/fM63L1/x7NFLrp+9wGBBDVVeOUyV2pguGyTFq4GknZn4vhdFxI5SLMmNVO0Pw/Nnn7Ntiw9n+lz0++QxFFHLuU4X5+tsOCJVrKqGGSs1Y8voYrSwifWBBaZ0g1ii65lX9rLeOI7ltIqpU9XoTjVT6NmMMbOXiVYXgxmNNISUoQhpoS5Ww2d/v5V9W05xc/U6987eQBVey+f/cBiPX3ijz+vCmmfmxfXb8hrL5B5X4PFxJEc/8CM/qJmL9nmuj69zdXwFXUQN3p/EE/V+DHF7cjBWDXKlZxpbsgG1Zy6N3qXUyZzev/+Yf3n1kLqMWrb/xo/P/nEnfp/6Mm6fYa13DvcPQnj7/97PR//kwb4PfBhtNf
 Ho/jP+9OoV5+zLTNnWMOfUY0yuYN0yhim/jsBPYgj/OoL4rW4Maqu5f+U69qoZ2sIbSP0qnupQM2uT13j96CHP7j7k5aNn3Dl3c1N8t+X4f/5NgH75ywbVJJcW7nB1bIqKIykc/uUJYrZm4zSscMe5xtn6fmz+FfQcVtB0vIgKnwLqfOuYLrXiiFXR5ZaC9XABHcfzsUapsPvlMhxYgfJEMeaEViYidRjju7HLogyLCMeiGhmL0WCXQZ8o6GYytYXhUypsQUqmIio5k1vDo6kJ7pqMOILTaNqdQP3edLQHC3BF6DhX0cuU/J4xqATL8TQcR9Kw7U7E6iXikucej9dgi25j5FQVk7J5g/uTmDiWy9gRGarASqyeJUxFNjMZ24BdxNfoU0ZboonZ+nERdzMmbwXdQTU0eOTS8HUc6QfDcSh09Of2MxDXjj1LDKVYw1EZ4HwRjEvRw3jbCFcddupCIig+XkZDsg5leDE7fn2Y99/2I/JgCgG/C6Ijrw1DTgcj9VZsyjqqEqrIDCygK7mN+fpexnQzjNcbZdhOEemWgTmqCosMbG+whpk8C5emb3H/+lMeP3jGyshlLk/cYnLwCk9uPuPl08c8vXyf5doBOezcWb2LOiyJT//zVo6LccwUD7Fc0sPE5vqc1FDkXoUppgbN8UJCt8Xy7rvBRPrVMZOlwiRGtyT7spxZR+vOUCyHFAxEqbGJ0A1JWgqOKlBFNTBc2I0tsFHWvYGhcBGzXyFjYWJ6mRqZhQZGFDYmzdPU+GSiPFbBXKOIuEbFP/7tr9j3B39WrRvcPnMRneznl+8E4i3G0Jyipz66iruXr8tMrpAtpunzQRCHRGgmMQCTrIdZZsdV3kSuZyFp7oWc/DKNmuxelq1TzJZbGE6oo3hfMqU7kij0KObK7HlevXxOpI/M92+O8fE/7mK/iHrStcatuasc3xKG+8dRuP0mgIO/CiI1qJE7K+f58/0HTLWPszyxQVuKmrjtMoP
 964zohojalsbuf/6Md/52Jwd/GcxE14yY1wLt4e1oCha4fuUaj8+f59LIBKPVHdxeXOTa9IUfC1BxsnZ5trefG85BelPLOfTWLr7+R09Kw3VcWTjDme4pWVAVBfvTyPBQUuyrpjGkntZAJa6kVpy+ZRglmUziNEMqEysKDf0iUn2clqLIBlmsGsxeuaz1z4lguzEH1jEU18pUeg+LShvOhEYmTlYw5qfA5lOK2T2PgWP5LIRWMOqdQ7EMjnJbDF3yGq7wJmbLhjGHFNF1RNL5mByHRPwHcmg9mMNQTg/OlDZsISLmiEac4Y3Y3OR5w6uZle9NHS9h+EQhg8eVjIiLjxXrmS3tZaV5hDOlOpaz6hg9Wc2Afxlmjxzsx7LRHMoieIe8RrKGnph0aoIL6Unqpt27iI7QJvpP19Mgol4anCI/PJVf/vXHJMsaTfdeoCO7n5bsARQi0JCd0ex7ZxcBe/3RtE4yPbxB+qEkdv3yGFHbM+mLlsHP0zAtadThX0z2YRWJR3LI2edHT6qaiXQTS9ldPHKe497IAo+XN7h78SYvHt/m1atn/PHlU7599pj7G7dxVAitaCZYb3Hh+dsTvPNPR4nyacaZ3sp0lFauL49WT6EW7wqsB3PR7UgUYxERKYdZKW5gfP+moSbgOpZH5zdBaL4Ip2dfFH0hTThyDJIqhSi2B1EbVo2zwvxv9BOixCyJ4/CV5wwowhGqxBoiNFJgFjJw0CjmnPN1CCq3LCo8k/mH/+Mt3L9MYrJ7gctX7uKwnaMquI1qoStnjpnWEFnDNgdXF89QXdRBwMcJhO0qwBRaS9mBPKqOlpO7PRu/TyKI25VH+okqFjpXOG9w0itUVrorlaz9peR/k0jenlxmNON8e+826pouPvr5Eba/c5pEIa87T59y78JNgvac5thHgXj93g+PT2Lpap9lZmRa0rsDY0o9N1ZXRBOFlKZ38/TJMxzl7ZzYEo331mL2/SSAY18kopXHubIl
 bKqGuL06zay6i8EKLdXeacRuLaO7YoJrZ6/+WIAdav3yeNsoplwDefsy+OnffIL775MY6VpmUbuGo7CTWlm02K9SyPIso1IQsjOqDrWgjyVIxcCRVGx+xQyIqy51O+k+Fo0zqhS9OLpWFlG7Jx17aD3XrfOMxNXRHd3KcGYno5KMY2my6UeyGJNBHz6ex4RXIU5JwAlx/f4jmdRtTaZsSwyGoyWM+tXSL0NybnRDBr75jTAsh3PoEPztC1QzcLqOJRneQf98LEfzJdlUTIS3Ci62c71/lYkwMYigYhyCxYMRaiZzJFESlCxnNDGX2cyECHTYJ4fRQEHrI8n0iTgsXlm0e5Uxk2qg9XAIPj8/SIa3ksaIUip2yqaeUjLoV0pfUi8bejtROxPY814kqggN19duc75PHLGun43mPkpPqPES5PqDpObZldtcnVkmZ0c68dvyqJE0UbvFUyyIGr8lFvWJWubLe6iS1/m7//0zvORaHbUjjAr2zjRYWCy3caZymHtzN3n2/Dl/kuNfnr3i9eNvuXvmGhuL1zmnd5J9KJyIL3OJ89HRmaTDnGxmrmqc9u2B1H0aJTSgZEwootktXyikgytiQraD8v2Dydi8yxjyyKD70xD0gsIth8oFC61Mqa2c/MSDd//jr4hxT2O6eQijX7nsQRl93ukMiTF2+ZRgj2hgIlrHaHYHa3XD6ALKCf9tNIVBauqTavn0F+4Un27h2sgaL54+4/v7jxhPbqFFDHCwaVSed4LBRNk/ZRfTjU7K4sx0xOmpidAKWlZRLZVDuScJxYFM8vaXU+Wv5LzWwaCyj+Z4Pd25gvbqRdoTu2k9rUFzuob+4ibmO1yE+6nxcStkeukCLy6f56JtDm2GnjCvKkJltuprnDy6dQtzZS35HgkkfJPAtGkIbYIWjdSwuysXmJBgai4awlnnoihpEGV8O5k7w0XsyZgzG9DG5OH+q0N4/9KN/b+Lor5KDHFUsHlg8scCXF6aW
 /7uxQ88uPeMxthqvv5lEIWBGjZmVpkzSc/xr0blVU3R8Spy92Zh8i/BIAvec0ISKqqZ/pAKOmSoLTFNWCXFDPtScSQ20y1i6Be80R5XcdZ+iSllK30iirF8E6NxbTiDK+j1SKH3UIakVyYmtyI6pf9NyXO6fEREnpXoJTlt4rpr0j2c6TLk/fM8X1tmIbWa9p0RVG5PpFs2ermyT5BHK+IvlCGQQ9zQ4l6A0y1HzECYPbv1DSY7AlVMhpfjPJlHn6eC4bhBJko6GfBMwiriG/FTMiBisEqHq9mpwCHd2NU6w73OIXIOZ7D9px5CAgl0CWYZkzoZkWvvjqphtXGU0gNRxH0UQsxX4VS6pzBXqeeqa4o5tZBFupaRvEaGKnuojK+TvqTFVdVC9GcBtEYoydkeLoh1mHRJiJYAQeQABT2Becya5mlRjKBL7pRNbefcyDlWqmySKkPYc3TctkzzXNz4lfSU16++59W3cjx8zbeLVzGmV+H9tQJdluBxghqL0smNJ4+509FP48dH0XkqGc0wY48pZ11l5MH8FRaFBhyC3TaPdNbFZLsFGTN2FVMt3bo7U5Js9iqTfQPsfD+EX/82huRKO/PqTroOp2I/kUnn7nQKD5XQmmKWNJWumN6GPbyFtXqZIzHbtCOlLC3d5aLjkvS1fNqjahkp6+LCwADXDXbydyQQ82WC4Lv0y+FZOuJbKffXMGVe5NG1+5w7d40Ll+6yMXoRfYqB+K/TqTiUR5cYvNq/jk4hBJdg4JWNB6x39NGbUY42Nl+6eAt6oaOOZAtdeX1cMrtwZg8ykKWnRxKrU87NmioGlWtkvMxMe3KVrLmGvBNKAj/0x/uDWMo3q0qMmsr0IUatVloVpWgScuit0HDvyS3GLYvES0Kn7M0n9A8nOfheDEc/O83X/3CUcEnrjQv3ues8S2+M9scCXFiYWv7zq+/esG51ohpFlEnSb4mbUoAXy/tpi+2iJaGFit2pKLenoTuYzu
 CpWukSvUxnaLFLz7KHCrqFqTEKgvQHKOmJ1dETKikWUM1izxwPphbpCZYIDu3AnKXFmSYdRB477FdEpXQNjTj+UsE4V4dWWCrTYBEDcMlz2EPqmC0cYSm/gfk0LRfLJDlPyia7pZP6dSwp0s9aj6uxJ2gwibDUYhBdh3KYkNdu3xFJnXSHgbROwdlaur2LMcp595xQyPM3MSbOdds5iitBJZ0wX8SXz5jgtN1DDEZEbYwVkZwsYaPdzkJ2A1bpaSfe8ubkexG0R3QyWyL9VzrRaK6WjrAyvH92HJ+3g4namk64IHOFYPSYoKM5pYlWee0Gn3L5vUriD4aRvzOW7phW0nyqiRcH9/xnN778v7bw5X/+kIKQcjojK4n7IpbJtAouVArGaOdZWrjLhf5xIYcmlhvFOCSJluptPDl7gefXL/Hy2nW+vXuDh6vrXNe6sKRb5TU6GD5djeZUO3enrvDq2kNWaozUHy7mkqy1sy6fhK++oCCpjCvmQVaiRbA7sijeUUJFjJ4J6yptkkpxO1JRpbbz+MojpjWT1BWMMHXmDncdw/T5pKLdFUOHrHezewlD6jEWG6Xrx6oYzTTiEupZyGsQehGDLOnj2vnLfHvxtnTlCkl9MbLYRkr3xMoaZZG6K4GgL2LI8q5k3LqMvdbKWP8iTwUTv//uezle88Mfv+PR/fssDp2lq9j55o0OTVQT9moHRtUozrELPLj7AE2kQnq0DyG/C0GxPxVjohDS0AyzrVbsleNYG4bRK4ZoiW1DdbpRcLqNCf0iA9VGaryKpLPW0ijfD9qSTti+NJI8CqhPN7Axt4guLY9jXwex51cHOfb5XhH9OPfW79GZ2UOOGH+8VJaoLSfx++0x9kj6tYhBP796FWfHIHVSJX4kwKX5+eU7q+OY47JpPCU9ZGCGx3eucs3sEIZX4SwwoYtupGxPHqX7cmjzKcIS1cacpOWYoKJZnMEkQ913NJ3eI+mSiq20S5LopXtNqIa
 47jqLPblaMEUeW9yBI7aVwQApsuK0HZJKqj0pTKYYuW6f43K1gZmwRjayJHlEsBMx9Uxk19DnW40jWy8iKyT662QRXySp25KoOSCiEtzpECGX7UmjUQbA4CmuJ32xxrOahZ5RFvIraTicJEIvwShIOigdYyGvmbWcOqzBSvrE3QdCRWBB1cxkdNB2opjOeEGnCOm2Qa3iinrqDiYRt+0Upw9UoMp1cVnEcK7FKKYUjcIji2TpIhnercS4lZG9K54jX6aQGdmNtbCH3s03c063Ysg2U+Wbz+kPo8kX8xiTFDZkN9Kt7CfSU427eyvvfpVPiWyQLS6fo+8Ec+oP0WiCmrnaN82djUVWChvo8K3HXiCizO9hKLeD1qAUMTtJdEmN5VwNztQ2wb4+Juv7Jfl60AerGS4WXD17lzurgk6KTmYax7h6/Qb7PjvG3/zH35Eh2LfZZU3BzVgK+tBJz9kYu8JKUy+5u6QGBLYxVO8QkU4wnS8/axvjfIsOpyB97/5c+g6k0Xm0DGtahxiTnv7AHLrF/FwRtYxF1jMqqT4pMzQjODpZ3MaGyYkxtYvsg4XUCkkdfzeE019EkXugiMivCyg41cZ4p4t12zo3ptd49vQhL1+84runLyXhH3L31h2e3nrIrYUrXFi+xWBlL0vNLgZr+rh7WzrxnRukS48/9GEAH/7T5+z9hQeZsk/dmS0stEnfF/GesU5zdngZp3Eeh6xHe2ILs70zrNkWsVQ6cGmkThWZqE2wECEdPf6bfEz5HazIY059E4b/jmS8vzjJlz/1oySyhmtrN7Hk6Un4PIKTkppu7/jiua2YlAQ7V8/dZLW+A1tyswRC848FOL+8sHx+bpUdvw0mK7SZudF+ZuTBvaEyuF5KpopNjCWpaZDB7RbM9PjYl8rNdzd9KrGLw1klprWHsqWL5TB4vEKSpBiLh4LRHIugxSLLagdd3rXY0gR30hsxHy2SfpWFUUp5lXS8wbAu1luWOKc1
 MRxQLF2tkqlg2dSjuczlGzD5VOFKa6c7pYF09wyStkQQ/JH0rOAWNJK8qt25tHuUvXkTqMOvWtCtmuFo6XdKi+BHMl+/9RWnPjxJp3+5nF++YFUJ3UG1aKXLab1qMQQ3SjfdfEe0mXEZlHq3AoxuCViOJ2P0ymA8rIDy7Rn4766mq1PIYO0WDx3z9MSXs/9tL5KkC2ftjRRSkNSUNI3ZkUlhpCC22sxkpiSz4JEtSc90aRcFB3II+G0gAb8JIOhtP/x+50VruBiC6QxPBA8f3HtAR2EH0VultB+ooTGpj+H4DqazOqWnyvUeK2dCM85MyxC90m36sruJEwQLkbRsOS5dVPrRSKKYV0Id44EV9IVUMZQoa1FiedPdzrb0MZ7bjUlt58b6TerLrBzdWYjWt5KFrC7plUNMK4c40zrGZc2gGFEb6ccbGa9zslpilpqhxZFjxipI1ynd2BlWK31dycjJGsHxalwp7QxJavR5ZmM+VIQrUPr8qWp5jIZe6elVbmnkfROHWhKmR/CzSGYnx1dB4O+P4f9RmKTYEPpSwcMuSTJFO2u9y6y5LrAxvsbDjTNcHBhjqtrK9fM3ef7DczGVW9xZOYut1IyzZZZJywJnhtc5Y5+mNbWbQ38I5pOf7Gb7O4E05NlQBmkYl7U761zCJc89KWi8YR1h2bVAR76dKRHjjVnphBMLLJlmmB8+w6J1TkTYTltChySlrGv9MKeEwLzf98fnD6EEHmvGPHJJ+vglTIoWTn6TwqF3T+Mb0M7oyh3u3X7Mzakl9FkD1IjRFXjV/DsEnVtYfnDzAW1108yLs8/1OdBI/BplIV3yohvlFvrE6WJ2h6IOVxF3VCmbLc4W08D46XIsvoKReyXiN99ZDKiSHljMQIqGS/Z1VtQ2hmNUdB8vZqpn829LkkJepXTLxjQcUkiiGbiydJFLyjaMHtmCf9mMSi/U7EzE4lPGmGDtsK+SfulNyv1SukUIMe+dJGmvbH7JE
 F0hajSbf4N0S2LoeBbj4WpJsyrGC/WSDrUc+NVRPvp1FBFbMrAHKuQcy+gJaaErWke7FHqdIKQ5tp61Cjtj8c1yPk0YAioo+TKWzO3xlB8uo0vKvS2yk3MWG+dsNlY0FlqPpnL6gyA83o2Qst8v2ORPzP5sco7kUXlc0l6GS+OdS5ObpFxCJz2JWuoDSyg6VEbgewl4fBDJgbf8cHtXXqNkimff/YnXf3rBvz59xq2b32NomqQ6rYHFrgkcmX2YTsl5CkYP5QyLOAYZTtUzKOJxSgfrEsSN3pZC56l6ZrKMTCS3yfBnot+Tgf60iolULVN5FsYrzAyG1+GQOrHQMspKpZab1RY2cgS9TimZSWxjWQS9UiB4Leg8dLpYHFsSVdB/trqXIdUIk7X9OOT1piQtnKGlTJ4sYzzo30xzLKSS2WQRoKyfXYzO6VfBtNDRpAhwStJvLKKOsn25RHwQQ+7OPFRHSqiWuaiQCuPxhwj8v8qmM0HH1ObfYZUSALE9zPSsceviIy5NrWOTOezK0TBi3ODuo4e8fvKdzM5lbpy9w4xeUm1whVvrl3Gohdji1Lh0TpKDa3nnH/bhfqCAs2tPmOqdRV9mYsk4zcVRF51J1bTKvExY5rg+f05Sf4oNoY0BVT8L5iUeXn/ArbnLrMxd5ezMGWr988UUWyiU2fE5WEtGhIPVq3f44/07nLPP0n26ilxvNXUtZ1i9KOd44zx3+4eYr9UxWztEpxBWbULvvxPg9PLyd6+e8f0P3/LDn59zwTpDa3CNbLL0L3HPsagadDK4237uQ6JfHhb52Xj6Zk+rwHA0hybZbNWOeIa8CxgLFPFFyyYrrVKCzfQHK+jaL8mY0MVqjYb2A8mYBO9qjuehk6Ta0I9xtlbN4OEKrL7l2I7koNuTSsOBPEZEJD2x7ThiuuR5KunwzkMfWEObpyCvYN2YvwLrsQx6JbGG3fOxbQpYOl5XTBvznXM0hyhI2JpK5e48zJE6wV
 wzppBScftiuoLKaPUvou1IqQxTO05JqvYwFdd10wyLWEIOl1JUN0+boJxNukR/ehVJe7zx+bUH+R5JRH4Zj+e7MRx+9xDtMeXkS2fNFzcv9lORsiWOcknZuhP5KN3T6KseYUQWX5feKyiqQSOCUgc1ELojl8yj1Syv3uL7b5/w9K64+ZXbXB0cxpTbiMKvhF7pma4KGwPSh8fkXFZ6pbdVyfcyDYxV971J1cHAcgqldxpOC71ktdHnlUW9YHDL4VzGSoekJzqZydcKCiroPlGAWTqmK8/MYrZ0cemamxhpj6oWHM/HESxf+xcy5FfKoBDCsFzDSLQIu8HJpDy3K7oGp2DlvAyhUUzPeCBVDLIce3g1o3E6SepuHIFFOARHZ08qWYhoYUy69Ljg8URGO/0pJqokOauFgjqC6yk6VkG7GFTUl/kUeTdhSrGIUVVTE9LMiHTJG9ef8PKPT7h9bpqqJKlAJxtZGZjn8f3rvHrxmGd3XgmOPmTeOMFInUVEM0CZXE/i3gycbU4GlE6CtlVQLut05/x5zk+usWQ4w0SlkSavZHL35FAXrmOibR6zQklzarkYew+2yhHWhgR/zy5zfsbF4yc3uX32HOqTdRiKDSytXad/8jJnl1ZYaW/CJug/0mKiOrCO7jyjpPAcq4LruvgaSnxyURzOpzmmVnrn6Bux/7gDTs8tf/ftM757/oiXV9aZqdSjj2nBKgIbEmccSGhkVNKhWi6+06sY/YlCLII2I8EyzJ4F6PxaxPVUDPilMehfK0rv57xtWdCplF5BTc3+BBzCxptiGZSEmVB2USkCc4gb2E8q0AdoGEySzQ0QQUmXqxBBdwZWCfY2MlkmF5ZYjMMznwUp644EJbM54rLSD7vcBbGiOnBlGJiSobjRYOWqfvrNuc6cFEQWYXaHiIOnmyXhDLjiSrAcSxfhVmL1LmdAHNvkdhrtsRSaTtSiDqmXbquhO01HX+kA3/fNsFxsJsuthKQvIvn
 iJ/58/CtfDvyzp7B/HKHbg3j3f3uf9Egt0+Zx2k4XUh9dTn1aN5XuiYT+OgC/LZnMuC6Js15g1jzHmEo6rlLPeuUwvfntTNcI7ml6uTm9zHSdmYnyXpoCVSRsS6X8SA0GWX/TySbsRYOsacaYVxs477qMUcSoT9DTt2lkOzOoO6JgVgZjOLqE2q2nRHxCBdLpLpoWOGsaZ0I6Y9uhDCp3pQvdqJhL0DDhk4nxUDoDpxsYDZUu55FOvyCmIayUJiEga3ChrGEs45FVzGdpGI1plj1U4vAuwuSVhnZrLP1ukpxxm8mpl77oYK5YhleoZC7bwpAk71BmoyC4CoPg6UyWngERuvm0EqPMz4zMhPJYKbaSCelel3C2r3Bh8qKYVzEdIvCbC9d5fmsdY52C4L3Sqd73I1n2+bJ8/5JUgMfXr/Hqh5fce3ifNeMK7clG6sT8SwPU0sULGTNJXbh4m43ZG8wOXGLZ7GTO5ODC4Jygew+m+jXGes8wrOxlWdNP8PYo0g9kEi7rsDAkvfPZA+ozyjmxJYCmjBrurm9gLDRQ4pXHyuAIi90GKnzi8Pp5IHG7pKYczMec2crVuXVubdzh6vodNPltBH0VRPL2dJozJITGz3F54uKPBTi9sLL88tUr7t15+qa4V+7PpGNHtiRFHmnC7B3BVTg2+9MpwZkgFZNhDZJWFW/ujOk+UitMbmQyqpxevzLWtGMs1vQzqWqn72AejdsCyfosHlN0NaYTKqYyxaFlE0bjZQAyNOgFbza6JplOLaD/UBKtB/JpcS8QYUqXE1Eb/AWDDiqwCeqaTlbRF9aJPUY4PrmTpbI+zjYNi+trmBK8W8lqYT63VRKhBJuXYI9qlvPGBRby6rAHZNBzJI3uPVn0i7sbPHPI+8qLfT/7mi9+dYCcY2UMyKY0eSqkkzYwEVDKyNFkzL6ZdMVpUfmUk7w7jaD34vH9NJ4iQbnjXx3k7Z/sQS+i6ovOJOrDGKI/jcEQV0Wm
 DHCcn45B/Ro3z62wNjiKI7sBw5FcMSExt4gS1gpsjBTWUR+QhjGjjoEYWaOAAmpkI1tCusRVrVQeqxZB1nDZsspwQhOalB4eOC5zZnIVe2wtHT7FKE+UUiYdbipSzPFYLio5B7N0LFdqJ8t982xoncw3uhhMM2CLa2cwQHrwoUg50rAJ3m/+zbVPkLFdEm9k89Y7D6WkaS5T3jmY3LMYjmhgukD2VAzYfLwIq3ROzTfJjEaa5JylcpyupD+6ifNicv1CF07B9Ym8bjHacQYLhDryJanrxhkRyjB65TMg9WM+vYOh+FpKZS8uzVx6I6SxFhsTiWmyjt40pndy+8J5usobOfy2J1//8wEOvx9OXeUYj19+x8LAIj2VUnNW5nh67REzjQMYxYCUkWpKApqIFaMxFLdwbnqFb5/c5v6jR6yOrjKtcmDOacWkneDZD0959eo1bbJOVkkt78+CCfvCC7cvTnJBOuCTh48pFNQ/uS0Pny3RTA7P4hJ9lB6Npy2ogNLQSnKDOymPH+Xs5DXGWufQR7YyJz37yeNr/PHxY6b18/jtKiPdvYThGgvXXMusGx0/FuDKkGX57sQ4j5aWZQPaKf0wWZKjBqU4V+r2XDo9JelOFAt6VuOUsu06XoL5qEKwox57torFqj56ZONHFH1cHh+kXZhfE1JL255o3H7hRUGUnkmluHNgk/SFMnqDagVlquiVxFvI6+BMhoqufRnofOqlmDdilZ93Sgq27k9jQEQ7mG9ksXaSUUW3JEcHK9Ivz+U0MiNItbD5ho100boDGSR8GUmtYKjNr5L1xE6u1ncyk1Mpg5aMdn8uQ0kmGRTBoZQ2GiWNd73jyba3wvEV/Gkvc8j3tRz87VFUkXWMSPqPuGUxsTeOSa8Uig5kk3usFpWQQam7GkOqgc9+LUn4VRz1wQVEfZFOnGBgxsEyNMm9DEpXGa/uYU7dyVhmC/0JbejFwMx+NUwpbbiKRllvNRKx/zTZHnnUn
 pBkCGqi43ChiFDNkrKTtogqGnzLcER3cGtwnv58E4s9k1zRDnJeDpOco1Ous84nj5LtcXRtC6fPt4o2rzrMAXUsKYxcsc0wmtjMRsXmXTTtuBKlR8pj2g8oGA+rZ9C3gAFvMTgRn146XU+QiHNHCJ0Hk+k7Uf8m7eyBpUyIWPrFEO3pch0iGltiF/YmPSn7TlK8JZQKOU9zuBKrzMdglAgx3shK4wjTRZ1s6GzM1pnQB1ViOVou9FHDUm4zzYJmxcENPLzzmD/965/pEfF7vxeGx+f59MRbubtyhcqEOrb881F+/5+24CPrOz4tqfenbznnWmFKOy+4Wc1Yowi8wohNOYw6rpssnyoS9ldQfLCa2UY7C53DXJ6a4oxhElfjEHWpDZwVDHwton904RIlQZLICjOndiey5WduHP0yHENBDy82O+bVB3Q2zBIXWMtk56gQhpKA9wMJ2HISjfzeXKeTu+fv8sPzF7x+9IIxtQ1jkob14QUe3b7F0ugCmlSpHfGdgqA92FUuenL1PxZgyAeHlpM+ihYk0jOa3Url1iSWpAjnu+XS6KmkVcSnPlHG6Ok66RG1dO9PkV7QQvuxNCIPxGDyjiD381gs7S6GEuupFEztDSoh5nfBJMpiLxhHMIQWUuEehzFYhia6VsRYyZAgoN0zlW63TGyxgmVtNpYLWxg8msege5HgUSmjUW0sxTeylFqFK0GNLaBM+l465oMxaHfIkBzPEUctwOhfR+WJBmqPlGPwyEQv6NS3+X1JanuQhplcG3cmznNneJG5dB2FXuU0BOloSe1hXNBoIbuTdkndk1vLKBLkm8wdot+n9M09q5MZRnQx+n/DvrQB6QfjDOXoSNxeiEIGrTG8g0pPFS3STesFbcvdEindn44lrBV9WBO9SQPS39rfvDPYLzh33zDGTIGaiK3JHHs3iNqDhRhim4UMmhhMluQoMKE9Lpi/+S60Wz4zKf3MlZrkZ/UsZagF93Pf3FrnKh
 wQkelp3BlG+RcRtO8rYiKykXExkxGlnfkcrRhWNwYvFSNhtQxIH5tU2FirceKMU9LvEY/Nc7Pv5WA/Ll1a6oRmfzLhn0VSFFDPmHQ/k5ivffMOFy9Z58gWVvN0jMbWcNu8iCa8Eu93QvH4MJXWgGq6pf86Y1uFTuqZLbSKSFVYfYroPV5JjVs5nX5KOg7JGgQKYeSpif0qnObcHl4+ec2Lm5doT5NELx3GVjXKGectrtgvoxLM9t+jZPvHWdSUjHPv2X2+u/8MQ14nfcUmzNlW6sO70AbV0JPWRk9mL32qKSb6LjLeusi6c4OaVDHEOjuzbeNMqq2051uYmlrmztmzuGrNKDwq6FUMSh/PY9fbwTLThfTXjHDFOcqTm5e5d/s60zLblpQWtEcLyNmdQ9rXScTtjmfOOs+rly94/t23vHr9ij89/o5LAyvYUtqZqut/c6N6e7yFAdUY67ZVxtvHBINnfixAz7c9l7M+T8Wcocce10iPoGSvpF2PryzgyUoq9gpeHqlkXAZkJKASvX8ZztQWNB65REuK7HznCOm5vUzUGQVfisjam07Szjzit2fSIJ2tK7mS2G3JFIugNt8A6HKLl+cR13FLla8Lmcg1sKFsxXW6DKt83xEjfTC1QwZIRFnSLQ5eR5dXgaTi5t8bi6nek0He9mxK9kpyyNHspmA4vIbJmHr6QxoEYVUiRBW2UOlmRX0sKwycLTNwrsHMpXqTdEG1lHArC9KLJkR41sQm2o6mSYrGU3+qQHC5jdlSM5b0dmabR1m0THNh9joPNq7y9PwFrhjH6ZAhGtDO8OSc9DtlB2qfQooOZpEqaJbtL6hVPM5szxrnbOe41zsiWCZr6FnG2GkhhuTWN3dlZCV0UXC4mLFSB8OyuX0xnazoz2AXHOoVATZ7SxcTIxrZNCoR3Fi4WhKukeG2c9irLIxmakXQjdR+lYLmsJBAsp5h6VfjyUIpsYK5klK98R30h26+mdbCbG4XMxX
 dnBFU7D0ufdsjH5fs9eadSy174tEI+leJ4BKFIDL8KuiSoe71raZPhNUdKntS2EWnVza6E+WsNY5jlLSrOaqkXSikSwhoQBLNLkg7FNvGeFyDpLACg6SlVSim26MEo5/8PNHMmRobRdLp033U6KJbWWofxJotlJXajq2gF5tg4uN73/Hy2TPuPXnK9fV7bCxeZlK/wqpjiVsrZySN5tEltGOIqsMUX09XrFaIQYsl38ra0Aa3Lj3gzu0XXBhZ5ox55k23npHKYs1tY1264bjg7nSnA0v5MC0Ffcwa5sRkdZQfVlPmWS0CF2NL78ZSoMOWWsdisZ62zXe1D2eJLnJoOphJ5fZYqQ4Nb24BfP76Ba9evOA7EePrh6+YqZNe27XGgm6Spb5VLo2scVs64Y2Fm8zr/t2bMK1htcstgoyO9CZMYbnyQgr6JfFGgjbvVMhA77n5rmf3m7ezrdI5bFG1TMgw6LwKOfrpaTLy+nDqLNRI+iRsE/Edr6fIT0HsoVNs+9UWPD4OJnRPHmp/Sc8jBYI4aVgkOW370lhOlHIfkiNilOeVcjsWqGK1cJT+HDX9qeUS8znU+6ZQ71/FaNYwjhQ95hgtfeFN2NOMWJO7pNtY6Y9owy5dZCJWxBXfLAPQyGh4FePZHYwJck68+TiU9DrvfEmMOIrdU0VoInp5bLsMSbg4mufXWWSGdWAuH8Kp7Ge0rJ8xKegX9Q7pUX3MVBsYyeqgU17LUjHKtZVLghRtJHwTTty2DAoPiKmUj/Lg0XO+v36TJ06X/I6FIelHZg8RWmKHGFmW/LeNK9ZJFjWdjKcNSQfuZ1iwd75rlacX18UUKqgRE2sVtx2Q5Ok9Jl1MjHCmZIJFwyovv3+Go8aIUcxmWkxqMLaTATGs4axWun3qsKXZRHw6rJufRkiX4p8vexcuRhdXJ7jexGyMmsmAclxisEafMkq3xFK0sxjFYRH88TwSv86k2qtShCcoKAM3FNLKmiRpryBm
 vacYnLeYiJCSM0SBK0x6oaScWdJT61PBYNYQ69pZTKGyd/szMO6XTu0jHTGmA6sIczpXR19cPVn7sqgKTafySBxlB5II/HUUpiohiwonjvIpnl27w4sXgqb/4zXf/b8vefxU1vPqPc4uncWsMjFS1EtbdDutkuqqE3XytUbEpGesqZdrs+d5/vRbHj++yqXRRTamzzKisjGa1opOaOrm1F26C/VoIhoFGcVU4pX0Si+clJ59ZuoJg42TdBQY6Cyz4BCj6YhsQONXTPmOWGoF9dXfxFDjVUHuN7FEve3FvMYg+Hmb1yLEZy+f81o66qO5qzzYvBe4f1jwe4Db8vXr7+Vn919y79aTHwsw40jIcvMJhbhoPmE7BD/2xWI+FINF0mgwqI6B6Ab6y8WlBFEHTgv6SPm3iNOWyQaaFBbao1qpEpZvyxqgKVRNvSRRwcF03N4O4Pc/O8y+X58i30+NXjZgQLqlM6iR6cRexlONjIXWoA+pky7Zg/m0mkUZqKkKLXmH0/jsp2588l8O4vu7GLIPlAqi1TJyUsWoIOxkbBNnlBZWa3qZTFdLV8yj/5gCs7uCgcMKLDK0VkHmtfZ5RitFSBld8toN2AJbGBYEsYgjOuLl9yQ5u5LbMTU5mRk7x4WhVVwFIpSwKunBkrjyWr1xrSx0L3Bj9Z4U+YtMd09ytcnEnEJHrVcVFUdrxahKWDDP80RS8obJzkZhDwNRLdL3nFjFRecLLIzFVNN1WPA6qhqToPymiSwVisP6KxkWslhPqcUeJetxJB/L4XQcYUos0sE2P1GwmtcjaS54JyIbq+pAcyKLOq8MuZYeJjMbGI3RMB2vw1nayarWxojgujFSLSmaSb9fHjrp7DM5vcyGVzDvrxJakETMaMcZoWEwU/ph/TDn9WIKm/dCyj45xRDsgoz2U/VYRbgOOX9ruonxglYcae1CShpMh3Lpk4phE1w1CMp2ieGtlHSIWRTS4lVKrZiqwU/6vtIsH
 a2LyUoHziKDJFcdnp+F8Lv/9C4f/WQLsZ9EUeijY3X2DmsDi6yYz/DoxktuLl0Rwxnj1tIadx7c5dGd2zwUAhkpsTGhmWBKzne2QcxG0YcxpxOt1J9G2fPZnmmeP37I05v3uHD2OvcuXv63DhohBFczzqXJIboyail0z6XKI40EMV+9iP/ZC8HIf33GLRH/VP8ZJrSL9OU1ULiZdgfSaNgbS9veJNrl2sxieobYbjKll+7+6T6Kjkay2qjh+cMHPH31R74VoT1cucmYCN8WZ2C9f4l7l+/y+PY1vn9w/8cC3PrTI8s1e7LIE9F98/Yxqr+Ko1MGetW2xLzB+aY02mssNIeKw4srTpyqovlIMU3iCh3BpajF2YebxxjJ0zBRbKArq5PSg2VkfJGKz0fxRGzLwZxuwFXSz1KRXuK8lwuGeZa1dobTBRG7l5nRDpArGKQUB672LSVBBBywtwD/L/NI21dCa7SWQdn0sRDpOGICE5E14urC7l7pGNyT0B6Mp1MGYfnNH4rF2aXX9IXXc3F8ncXmYaZkg1aq+pkuNDGQU0Glfyz+7x9FIf1rVJDnco+L8w390rNaaD5eIAW+lNZ4DenC+WmeiVw/d5fv/uV/8PT2I86aJxnbvKn8VDUzRUYxBhlKObfbQxNsVIvwgosZPlWDXXqyQ9LXmdaIK6Jael0e5Z4VmMQchk+UMi6YZz2Wx8DxcoZkvfW7s7Fv9kZJy9EkPRPi0noZEt2BVAYOJtD6zSkGT7VQdriCkO3FFPlUMSgC2hTtVHKzJJUIIqSM/rgaaj0VFHsWkfl1PO2747CHVrEqCTApSWoV/Nz8sPKImItLRHupQlA/TS19spkZ6W2T8bK2EYLzp8WshFCModXMmZeYaB4Q4elYkmQYSWpnPNci4hqSc9VJ/8ykY/MjaUcU5G7PEcKoos1Der6k5iZZzSodnB+fx1hgZm78HDkJzfz873ay9xeHSH3Pn2HdKi8ev+Dq9EUWTN
 M8u3AdV6ud7KOFNImZ3F4/w1yXpGPVAKNtUzy6/YBr41ewiSFp45oZzNeiFYHVikHPCHZ+/+IpPzz7QfrlE84PzGGM1lHllo1WOnnNyVyStqZJCqcR9X4U8X5VXFi7yZ9e/cC3kmAP7t/g0tIFlk0usg4mEvVpLElfxFOyOwvF56EovzyFNlQhHVcS1b+B1M/S2PezE0R9HsaZ8Rle//Atz759yr2bt1g0zmKVuZ/pmmC61SqClF7eZfmxAMM/9FkuPphPoHsiX/3cnc4DeSxXiZtffsJKxwCGdC2dKXU0Sx+xepVgFYEkfi5J6VaDQzXJxdVzLNToMO+Jkb4ihT+pm2bfPArkhJt81Zijm5k83chkUqn0gxZcKc3M5bazIiV1VC1u1twnz9NHoXcF6VszqXCvQCmL0ixit5ysYDa5lvUCvZT/xjdvoJgCKtBs3unhX0HYJ0cIe+cQqv2R2DJbyDoezbbfbmPvz/YR8G4gnYnVjBZ2cq7FxbzKzKRCXEvQyP23IQSJMWgiO7BkyOYFFFC0N54mSf2cXTGE7U7EWNLK3rf8cN9VxIVz1/n26V1uj88yndeJzj0F2+FkXAF5TATkshwmXS1AcFHWRrc3jT7prI7gKlyC6g5JsH7/Iho2b5Vzy2dauuKAINhcniRzZi9GEYTWQzBc1n02yyDuqsaQqKZTemOtWyk1YhL1+zc/jTAghnKJgdIehuX3bCEVDAeXMRjdiD24Sbq6oKp8f1BwsVwMqPKEEo2/jtGEPjEgLaZAEehp6XUeuW9uURvN7cGR2kqXXHPSF1GU78ugU85NK4PqkD462zwjCN5D/s5UFocWJPnHsAjyTW+SkBjaWJeLCTG1Af9yqRBK9GH1FO6T/rhXRYNUiT5/6X1yXoY0C+ddV5ip6cJeruHxlfOs9k4R/LEPn//Ne+z7IpBLF+/w6uVrbi/dYaF9lo7MdqqFTtKPlnJ6Rw52oZihMsHbwRVJl9d8/919bs8
 s0pNhpCOxh+rgEhL2ZJC2P47qyEIcLSZGNVZuTqzTlVhDpzxXc1SnrI2L5jgzpbI2ye5ZRHkV47Qtc+fiGe6cOceGdYrL9lWuX7hNr7ITz3c8CJKEVnrXUrk3l0KpT2HfJHBqawhxO8PIFnNO/siH0k9DUG9LZWVkUVDzpaTgU8Hg7zjvPM+saZ0V64r0yh6itsa/efPtRwJMPpK3XHxKgyG+TZ4kE4egyXVxgMu6bqoP56CSYdos3hbphqaj0pM+jSbPXclsi4OHi5e4NTorG1pM565EhmPFwU+2UC89sMavieEU6WJJbRj8s6WwF9MoHbLyWAkjOdIbG5z0ZjVLUilE9NH0HcllKLiaQV8ZZnnc0OESBj3L5etypsLUtPuWkLwznvitKcRv3pDtEU6GIK37B3GEvB9J9BchHPqVB2/9/SG+fDuCiK8kPfzq6U2UTlhuQRdYxmRhL1rpkRVS2LWZegqPpxG5MwCf3+3gvf/6W2qln2UcqSZ8VwFxW6OIEWPSFw1zxebkmmUGfWI7fWonF+cestCxRL8gt3Xzbpd98Yz7biZzK1a3PIY8hBQELWdEgGObN3x7KWQgaxiRFByTNJyS1LjbKudxLIiIL8Jok/5lFdzcKDGTfCBChkNNT5iBKv9KJqsmuHv2Lncf3ufR2CLODOmg0eUUiUk0+Cpxlujpjih/8xnNhY4JFrQuDLKO/aEVLOe1cKa4SfpfK2a/MpoPFaA6mMFq0yST0meHjuVTsSNJrjUBrXsODYcyhG5S6Q8swSUYPShmYM3u5fz5y5xzrWNLbcdVYcJZbGaqoIfR01ViLlVYskboTq6j5ECm9NwcBk61CbqaBMvneHXvFrfPLKKS+cnblUtbohh6Ti3v/P07/Je/3kLY3jKunrvMD39+LJi/ysrQImPd89TKWub61lAapacn3YhTM8udG7dEqE949vAxU5pl6XRaNLE60g+k4C9CcX8/hGO/DyVSkHvNsSHrMYJF
 erAtbxBXnYsFi4OF4XVGu6coOFKOOtnMRo8dR6nQW5VeuqKZLlnPe5fu0pXawK5/cCfgk5OUeWVTvT8ejVuS9OVMor9O5cQfUjj+myD2/MKdwLeOkfJROOtDK/zx5be8eP6IJ6+e8Pj5Ax48fMbN5TVqU5S4f3gcRZjqxwLUVmiWxypq33wY1RLayqLawkZZN4odmYR+lEyLoNLmJ9WbxB1zROXxX8ey1rPEi0eveHXnIZO1FrqPlnMmw4QlRnpKhp5zTcNv7sxwKfQMF/ehyxklN0BFxs4sOqR/XO1f5IJxlMpDcfh/4M+pL2JpkjJtkS40JAIcOlJAv28B5qBSQctC9IK6ZV5KEqVvFQbXk783k6QPIlD6pNNZ3k5DvJ6IL8PZ8U9bCdsTS76gVvI3kZKCwRSJGIyCg01eZVhi67hsnuCSdYk14yLR7gXs/jCGj3+yh//219toKdZzae4WDu040YfKac4ZZKHcjEHOXS3XdkbK9J//9c/892dPeL5+lbUKGcST1UwKUk76FUgabt4SJ/3zeDVjQSr6vQskVbIZkTUc9JafeZUzL1hnl+SK+CqWT34WQPr+MrqOltDpXYhOkj15m4KR9hnpTWac0rvOKw3cmVzkkq4PW2gRPW7lsieNaCJECMndjCW3oQ+UvhbdykxKAyvZ9eg884h8z4+Q3/nStDebvlO1tPup6DnVTG9UGysFGqwBgmRybp2+VfRIOnQfLcJyqJjJiGYM4S2CT6vMFws2lZlYt4wzlFovvbQRZ5acf2YHNllLu8zFcLiWqXwjlsAqunwb6QpsZG1w480tYj88+VfunT9HlV8WFXJ+6kwrE7azJAen8/7bwahKXVy0XWLD1MdSs0b2OE1mRcO1qfO0CabWJvSw2L/OcOWA4N1Fnr94wasnP7zpeA/WLmHK6abKt5xTfzjJ/p97EbovidM7MzglezBvHGGotFeMoAVTjJLu9DaUIdVc7LIxmd1GpV8LPXLez
 Qka4g8XE7w1mpCvIlCI0a/2jqHYF8ex92Lw+/1pSg+VoNiZjdm/VhJf9koqm+VAOkXbktnz1nF83wqgYGeurNMCrx8/4TtB0B+eSKd88ojHd55gl4pWGNTIkY+jOb0n/d+9C5pStdzjKQwvXaS/qAuDFFmNpFHxvhyajhczIINjOFlOmXcWkVtzpKeVc310mburM6zUiLPKBdhkQ9ZLxYklcVzy/8cSy6k+EEnU9mSyDiuZzGzGlavD1bbG2tCq4KhsZmwN4V8lknK8A6uyD3tBg6ScJMWJEno982k6miwYmisYpSJ1Z5q8bj41x9Ko9EhGtSORim/i8P0ojEzPWuy1PXgLn//u77bhti2awshM3vrbL2XxotBJb+oWNNZLmgxKB3AU2pjTzzHT7WSye4nBKgelx0s49qtA4relcda1zLLOJfg1xg3DFM7ERjECNZfHL/Dy+i2erV/gRpuZ+WQto5IUI8cymRCHnJI1HDi0+U9zKJkqcjAs/Uq9Nx/TYUkxMZX+QwrGRVDGVC1VPirCv4gjeEs0xQeyydudTINXNV2yfu3hVfSFlmGMaGBk89MFkqJLWTpURzKolERulV7acUrNXPs4i5lGbIKF/ZFN6IOqaPFSYfCtoHBnEiffCyX1S8Hi0AZsCW2yvmamcwyMxTdJ/yzAeCSbAd+yN/8CwLSinalEvfRaHXa5Xl1+D/dn5+iNKaM308RYRR+9gdUieOm0+Z2CqI1v/lkLo5t07ahGHMmtOAVnJ9MbmFRaeLZykUffPuD5pYuM5jUQf0B6bs0400IiT5eXaVCP0WZY4fWN+zy98oiR+hEy3GXID9fRJrhslf5bK/u+NLDC7ZUrTOpmubxw802yvJJu90zw7vWrl+iyNahOqEg4VIbbByl4fxRAiIiitciCvcbGmckz9NWYaSwZo6fKikmSuzW6TZ67jqHWcdYHVukttaGK0hB3oIj4oG46a2exFJqJ3p4gCdxIjmcZ2TtKUQ
 c30htXKyGjpjugBPuJMhwnKmiX/S2T+TSJCa5phkVwN/njH1/xL98959nFa9y/dJMJy5oYi5Wcw3nEbE/8sQBVxxKWB+SkZsR5ek7X0uKZQbUMhm7zA6URhZz+zI/swwrC5b+nPpXN9yvHFlxM5mcRBP0mVNBn8xawDrJ3xUmi5dFwrJDi/4+rtwpvK8/Sve++m+/QPDPn64GemWaorupKUVKpMHPsxI7txDEzM9sys8zMkmWSZQvMzHYMMYU5qXAFCxrOmZvft6S+qr6ox085srRhrfd9f1v/vfbeZMKFCyoVnTQ5ZVPjUc9q6zLXhqa4XKqi5kQUUQf9CT+RwHLHAOUB0Rz/1T6st3qQ7lHOxc9ccN9ij5/8bBKw75L3yBJH8//oAsGfeFK1JwS9VRLq8wU0uBRhSG6lXBhEIQVRKNAeZB3Bgd/K66SYh8xL3sRZdb7CSnIQrg1ucGt6ncW6cSZThDfPuhC+043IfSm0l4ywWDvCJfUQk2ntmITx9FKgC80T3CsxX8ksQOtRSPnxKJoORdNnHcW4gwKDdQwaEYV++wwGJJYtSfRWSVwvP5mKXtywz/wldWgj8xFm5S2hTBipwK6IFJcKgveE4/tVLFXimOOi0nXSnEk7PWj1LRN3E7cJlRgtDZwhLNZorSB2hz8VvvVsNE1Jk6uYlKLul0hbEaokV7a1yTXvbw34lSMxFyoYKZnAWDxOf00fCynNli/V84W7Ug740Hw2EZV1JCMStyalQEfkWBWciCZMOCV6TwQJEuNHgxqZDO1gOqeX8SA5nr4iZNLotSdiST2WRL0UntYhXlw8GVNRF40iGqP6db79+jmTWWX47hSGcjLfhZLCqT/soEDRzMs3j/nzD3+23Ghrvnx/a/Ym0y2LDJRPcf/WCxambtGe3sPm3Dqv375nfXKO1YkbvP3mPW/fv+Htu+/407v3tGQ1UyY1tjS6xKxpgeZcicI5I0yIgM72rLKgn0QlPH1jdI3
 bM+ZzbsQQLTxcaOTG5CUmag1kX0wiTmopfIcrxXIMv338DVeHZ6nwK6AtQsSv0kStTxIG+xSS9oWTLLhhjMzG6KdEJZixWd0j56GSPvcsRnJ1PNi8yoO5ae706ehPb2QiV8utFj36mCwCt5/n9K+P/bgB011yliczayxDddps8mg7HYP2aLJlDJ3bdlts98eTLGrp9YeLArrRJB/3J3tfCAXeGnqbV7lU0ivdn03AMQWFVsJDwjwDkS0SKQYZ8S+j2V/F1cUHPNu4wXKxjnzJ0fE7XcT9Qgk8lU7EeQ9+/5Mv+Kf/dxcuu2LoTW8lWiKU/ed+uO2RwjSPMhQ+0VmWadXT7lSB1iqczO125DkXs948KcqfhlrUvD+zn65YcQuXMhJF0RJOxJN7OByNg3BJRA2L9TM8uXzf8v3QqKin0x8P8vP/toVD4nxD/cKz0ytMlQ4IwI9iitPQYJfGTFYvlxS1tDmnkHM4kwIRlm67BHpPSfNJUhgUrkvf5UHEjlB6pPhNEuWG/KsZ8Cm1zFVZEEYZCKiUSK2k1yETvfy+VVLGeKyKsaJBkqVBU0T5m+M7MFaPMVAzSLV5Abuv2dkkNjrGU7XXlzqJ4LHH4/H8Mo6aeL1EtyX05hUXrRP0prZS5iWx81Q4dQGlEqXicTheQWfvPDdX7nN/1XxDaAszEh/bfMsJ/NALq9/aY/8He9IkCmsvZjKr1NCXoEJpLQJwpoD00xm0X8wWzitlPknLJXE5zdl82mV7e0X0zLdyZdmLGMtnp59REHoglIrAPPra13j49DFXu3oIlMjlczCVmMMRhDkUsvXzUNpVizx99JBX373j3atXfP/yNU8fPOfh9afcnljl+e1VvhenW5amutS/LE36SthtiKnmfl4/+5pX71/x+vkrvrl9n4n6PtrzOhlq6KS3op9RzTx3r9zm9vo97okLb0ytEie1UxMtztw+hyZDRWN0E6qMdiZaekk9l4TPfn+8pZ4P/swF
 b5tibk1uMKaep9RdTen5KBEZO7w/PYrrluNEbrHh/C+PE7U7DOWZVJqD6ikLDcNpmzWOf9xLZ0IFU1U6FKd8CNzmQMrhWLReVaQcEUP496385r//mi8/8vtxA+qzG5aHw3Josc9hNK1DIF9UNb+P5qg8HHcFoQ6vRXEsnFM/PY7n8RB2/M4Bj+1e5Jx3Zaxaz3iYkvi9fsTLCSs7pKDbKZPRkCJaZOMGsga5N7DA+9FprhQKN0hzltjmsvNXJ9j7y4PUSjw59dk5Pv/JCQ7+zgltlFbiWRpN4SWkXCgg17dY4mMhRs9sZsIquKyoQuWWivvHNnz8kwPEBxXRVVRKnWsp+qhyrnbomTZPXotU01s+iDqwUdgwCK8PLooCl7Gq7OTWyDQ3e6ZFyaLY9dOznLhYx4Bw3/dfP2O9a8aycmI6r06aqZiBOCk8pZxkz2J6xGVUwp8DwqaTZ8OZlGIfkGJssI3F5kNPsk4JK4tY9HqWMCfKPBtUIc7RZLmUb3Q3LwBPp/V8Ji3nhbUk1lfYZ7Ixfpm7d59InByWz17k8dO3XB+cp+BoPFO5/dIYOZR+5Uz57mDmlVoK5RzlHs+jP7aV6dQWRkQcZszfiTUPoZRj62e+yCEuq4o3MNt+iTeX1rk1f1M47CrVDtHofArYqOmnyDadC1sCsdsaQblDhZyzfCaTtUzl9YkbqujzK6THrYAhd/O6XTkOsv+mhH7Gc6YYkpQ0FiqR3rxaKqkJQ0IT2Y7ZnP/cncRANU8Xb7KuGqbGr5jwc0XknlOicChiSbZjef0RD5bv85f7j3m+tMSj3lne3X/InyWu3RgYwihRuDyqktWxZTYuXWdOOP3t6+8YqOuhJEIjjfqM777/lhvj41xuX5JiH6IxXE1DXDu69mlWZ26zou9jpd7IfJMwYLmW2FO5lCd2sLnylPbCHgySAlTRIoASQdP81ISeE8SxKUIRV4up2siSYZYnd16wblwi36ea7f+yl93/+
 Cmf/OOv2fG//siBP3jRWjzGROsobfK+Jz+x5Rf/8BH/+t9+geeJYMt7O35sy8GfupMtLD1Q1ktVmolDn7jxy/++DbsDf3cVNN4+YlkvObo/tFkKwcRwWjfDJc0EHAnG6bNTeH1xkOjDgez5xQlsxbn2feSInVUYv/rFKdnwdEoORBO1xZ+8oykC9EqB+TQqHIqZz2thMTtfijII/X5X+vZ7M3s+gdXgYhJP5vLhJxEEn83G/4QCu+3RkuMLLVfw0sPqKQxsF3aoJ2yfO16f2hK0U+LUnhii9oYTsM2frb915cDRLJTCJIFfeJK6z4/zn8j2/d6b7IPR9Oe0ownNRXkoiPgvvDj8e2tU0gANtimUSrTWJ1USsNuRBNcKUePnPJ5dYEpAvz+3kf5EtUTGUnGfXEaKWhgVUDeJCyyFNzLnqWTmdDiDJ0TJT0cz4pFJnU8d7YFaRkJK0DkksBhax2RUHfM1JobS1HS65dAs8axLXEZ3NpY6qwQazsajuVjBvfFrvH/3iJH0KrTJdby6c5dpKZxGbyly+X+VNHrTsQRafOvojymk+GCocLKoqiSEdjfhbWGla1Ioo1ltGPP04qQlqHyKGJdINBVTiS4sn+XKDlYb9JTZmpeD5TEhsUrnkkvjuXxMwpALVQZMAZnCflXMxBayFNyA/pgPnVaxDAcU0ifncirdxC3dFHdUBka8q9C7FdMVX81ESqNlLGHAgQgynMvZGF1mrd1gWaAxod1ktHmWxphudHmDPLl1g+8fP+BezwT92Q0UOUSQetiNcu9k7m4uos2TdOReQq5TEc3JLYyJs48Xm9jsG0MTVU19qJoXa5s8vLRErQjbRM8K179+y8TABlV+VXRGl9MjrNyp6KW7aJxr89dpUmiIdYilubRD2GyTlb5ptLkGCsO0LIxt8GDxHsbGHmLsYomzTSJ2jx/Rdj7cvjTLommKqY5R4gM1bBeD+PQfj2B9PJ+BhgVeXn3E6/u3uKyfIN
 uvlVOfR/Av/+MzEuT4qn0zufB7Fwrzh1iYWKQhulY+UxJU2TjZkYPE2pf+3VzQA7HLncpuFntmmClqpsI6mlaHAuJ2BPDhP27lj/+5D4ddoZz9NJQgiaZhhxLF9mUnAhrIOp9O1NFIjn3sJqqdQaNXDe1hovqBhfSfT8Ioscwg9q6TBuk+EsKQaz6z5uGp8W0s1YwwmKCnzr+OMjmAvfE6bhSa2NBMsFTaTYZDDsGHUiTa5opL+uH+SSgFtjmUOVdTHStKNbzJQEEdLiIAkRdqcbeuIfKsOKdzLY1JKi5IZPbd6Uf4thA8toYwGNhCVZQGhRRuQUgNgScl3lUOs1Ddhimq1XJ53bxguTupkQZx2YaAWvThzSzIsVmKk6ZyyWRE+Mk8LtF8n+PoxTwM3jVMFIvrRqahPeaHUSLxUFgbQ/kGcdo1uoNyaJBo1nRaQZmoY9YuH3FiJd3yt6tDl7j36Gs2Ozop3eEtySCD6RSJrq4JwmYKtPZpch6yMAhbTPsJ1+33wlf2o8Wrm9ZoI12BKqZ6l7k+fY3uWEkuxT3yU/j6nEK4OAejVz369G4GM9sZi28QN25m0K2EZomLHdbxghuJjCjMK2Gq/nZ7mDjn8IUM5oJridweSOIh84DdOrqCqphNqWM4ppYhzwY5t/l0OabR71ODqWCCNeM06Rdy6Uru5pbagEb2v1PRxu3RQTRJtXQr29jUz3Bz8hp3+xbRSEz13OrCmZ+fYdtP9nJAEtXo0KqI/zRZsn2FsTq62jaZm7hOtfmCXnkfVUEtEq3r6YpslOjbQFf2NPeXrvH6mjSksF1lsBqdYghdjTDk/dfcefCS1988ZqlrjERnEYyGWXorWxksHqQ7tw1Dml4SgmCGUkVP0QjxgizuhwLY+6uDfPUzZ7Lc0+koNs/0UXGloQeH/Qn4nlZydeUOLzfuc29sk9tT94Rdr9NTrkEjqdFrXyJZ3g1UhtXhfiDD8hXMunZR4q6R6rAW5jt
 6uDY1woZh4u8Y0F25/KBnjme1XbQITLt+FCRME0P6niC+MB8k87f8u2Op92tgvHBMOM5Ao3MqVbbxJB2JJflsIZVxelZH70iOX2clvZZO20LL1b8BuxRmnXIwCQtNBjVxNUeLIUcyuY0/zack/pzPRxPZymztmGXUQYd5TKHwZH9UrRzUBkyRUpDxGlSFM6gze8iXgmxIMvL4yX1u9mrIOxZp+Q5oub3XMhRnXRR41bgskCz8+pEPkVKwGfujpOALWa4a58rUDTau3RexWePBbWGPW4/o8CkjdY81fh/bUeyhpNZOQapPM6PKHuYSxA290hk+E8ecUyp9Z5It98MZzmVilCLsFggfdlNQLVyoCzeyZrjMnfENFivaWKzR0RZYQNbBcGlwFSGfnyfsQBJDcfUsJLay2Krm5Y0rrMsJ9v71eVKOx1Kw34fgD90plYZtF3EzSXydi1RRccCHkO1JaML7uCWOt5hRy4BXMcstA1wtaaXPo5Rp89yXIh0VNtloojV0C2sOZbewIJzZ4VfClH+R5VatBus4ao8KK55JZLV+iCFxuemoRiYClHKOajGGJApSJFPtVIfBK48BN2FXiZ0GD4nYjiIo5gXJrjXocqckrpu4WtMp8XmJW2Nr9KV3MVA5QU9OJ/q8IerCtcKIbVxqGGJNP0lLVBlBO7z46t8OsedfD/PB//qSi+fyeHL1JbdXnjOku8Zsv6SCt8/59u1Tibhq4awaJrNHMQjvahINGGtnmNfPsjKwhja1xxJT9a0LXJmUqH39HjcnJsWd7vL81TtWuzZZbV9krsNAg7joUOMcK4Z5ciWR1PuUUBpYLS42jibbSJh9Jaf2Z3D+UBpFSQNsXn5Ivn+ZpLpsmgVfjAntvHn5Zx6sPaevcI5V3SW+3rzBUO0wao8SSqxjKRP2v734kL7mKdrTdfSkmMchtomrp1PmVoQpU8tMy/SPG7AjvW55PU8aw1uiSUAz6dI48cfSSD+SIm6X
 QrpVJmXnS2hXGrkxtMZEqYZS1yLyjivIsc5EWzbG4/kNnk1OsyZRqu1MGhNJrZjCq1HLCS+2z6ZRePBK2ShTcUqO/OYLfvs//8jpXztRJ7+fUKgsEUrjlEfq3iASD0eRdipN2CQVg32yvEcxd6bXuNTaRsBOH2rcC7hc0UrMQU/2/dyBI//fUQIlpo3k1VLhF4gpsZ5mcYfE7aEC/1kkW6XS7SvxtnOWOzefWu4D+9O7t7x9+TX3Jmap80oUobHl9EkleVJs8RfKBOr7mQ4qpzckH6PEyn55j17HZHEIKUjHLIYFztUSQ7UXii1T2SaTungyeoVHrb3cKFBZuNWU0oAmuAmNXSXDyU24fREg4J5tGWJ8KVVDwF4XGtPrWCsbJOdsLvkHAkkSuE87oURjraRV3Hi5qJ3BkFpq7IrQhbZxvXmQudBSWsQtDefzGAoqoPNiOj0+9UymtjOlnqXVR4UhpIxWacqR5FbmotSYXLLplv+6PHJI3eVO1Kf+tAifrYnrD3nkU3XKl+KjIYwHFKO+UITW1XyjbgFtZ5MwCNcO+wtjO4rjC5c3u1QKc15mqrROomwClY4lXO6YZUE3jildy3C+OJjEcMsMl7gcqr3LpFFvMlzVj/J8Ko5fuGG1xZWD/3kGqz940qga4d2fX/P9y8c8u/mEF1cecWNinhvzq6hD1KiFd+cnbqBLNdEi7jLeYeLp1dt8vXKF+c5l7l1/zOPbD8RtpllpHUDplM2UsPzbH17y9RUR5boJuoPrKTmdxISq37K6K1ZSk/kiU87RaNo9ioU9zV/Ad6MumWXAdI3N6Ss8uLSBSpqnWOrfnJCMBT18/+INr783P3PjibzmMvfWb7OxdIe2kHL8t/nQr73ED9+/lW3bQK1oR5WmpUCQIMWmgJwLkjSqelgbWv9xAzadCl7uE9iciVexXj3EdHor4yEVjIgi1tnHk2edImpRwVrvEmtdOgwRYrP2ClKk+aqju7lkXGG+S
 keLxJP+iFZy3Kq50jdHa/oQGS6NlNmnopfIM+VXhjq8hcM7E9j/aRKZQdVkeCdJk0TL7xswKnVUBmiIscnF70gceecKUexXUCqRRN9gwk+a89wHTrh8Gk7a6UyKQlqpS+shK6KNClGbKnHmrT91I8ujkY2BVTTi2I3ulRiF88zfka1KJHzz6mvePb7P3Zk5FiU61gprKYP1NKunmWkyESzObRQxWIkoY+RsDN3idt2nEtAe8GQyUcdYQS8mb2G9kzmo9kdblpkNxbRxs2uFtcZOel3NozASMUgE7gookQjbxFiEhonASgqPKP7mmElttAsH+34ZiEJYczC1kQXhvXaHQhHBaomtlSJgGm6n1DLsnMyEALx5But6QrXlLv32o/I+UhSaY3GMx0hhSKPqJLoNSZJYKtDQ5pxHp0MK3fYlzOYMyOfJcTjgTf4OT/J3hYgTC++ckPjpX8yYbzF97nmE7vbHYYsfhcdSGQ2sZTJAtktcs/ikiFdgM6boVgv7GRWtXNfNcymvlOR9EqtDDMzoN7gyfYNbM5uWWUCp0sDVAWaUCcFGGizNvZZHi3ekJpaJdkjCbV80ChEu333mcyxcmt3IkERBk4j2s8273JhcZ0qcol2h5XL/TVpLhrg7Mk5jaA5xR8LJMt8kXNTNZOMky3PXefn6OQ82N9FGVRK735czv/CUKDrLD++/4f2tuwwUDTBUPkf04WxKPSqojKohR3CkPkZHWaSGas8SQZl++gqGxc2meHbtNpu906gSmmlJbKQtq4sZEcluRSNXxqak+R7w6v++5NGfnvDy8SuJwHM0BWfh9LsLzPQt8H/++oMkglmq/SX65pmojFRT7F5OmWsZk2V61iWO/6gBS88lLo+aB+l45zGXo2MqQWU5MZVnAvHb6k3c6RQ6JQpcauikxi2H3LPppFjl0xBaIxl7mNuaPsaEkVZE4YbjS2l2kjgm/7YiALsQLYp5NIYG5zKm6ydYNYwyUTfEfN
 MEZQEZJB+LJUzilsF8d0ONnpWWCXqzTNR7NNHkXkTqmQQmhkVtDTP4fGGPza9dKBA3KzivIPKUxLZTHnRmqigRxTe1j9PXtshk5whLVd3MZ8m+pKjo8y1nSTXOzb5JVtUm5ksNmOT15ReUKD3EBWr7mcmuJNsmSn5nvrUmiZoDUeIQ4jK2mXSdjqH6RCorXdK0FUb6/WsYds3EcCad7pBO5jVXuasfw+SaIS4jLnE0wTJLx3guhZ7YaqbjajBaCbtZhWNwLWc4uJKuM1HiLFkM5HShckqzjHKcLJCiEucYPBfHsDCI8WQABvmslYIO5r1y6T+bgM5eHDiukQH52zbzfX7Cy0OBWdIcFYwJ12olPhefSKbxdCKtJxMYU1SiDa8ic1swcduDRASS6HYspUf4tl8csc1atsM7nwqPWhRHsig9lSsuWIbOOYk6ifttwSrm2yfE4Ssw+koBiavXukYTeySdceNNnr16w/tXL7ghzbdkmMaU0cdAz3Uu960QbheP1xf+FDhJYlFNMNWkwerjY4Is8ZTKuQ0/nWq5mbYoXC8NM0+ncooN4dmVoRUKvRrpr14Qt/kLs33zVAcpubgjmAt7onD41I3Ui0omdIvcnb3L22cvWZtdJ9O9jItfhOGyM5bx4SW+e/eMh+Ji4+3zlicjTQt+lIZp6Kmd4MrSJldmL3O9f0ocWs1Qy7K811UmGnoZrOugKb6OXLdyGsM7GCvqwphWZlkgUSKGNFnTznffPLcsBHgujd8VloPTJ46c/Y0jrWmlvLm6ybWRS1Q7C8NHVVEXWU+5YJjapcJy0Wu88e8iaItn3HLmfn8yTygYDRfVDSlEK6oQtNUd+499iLYJEbcLpMM5gdSTKRKjiuTkV5BtnUhjYgd93rkYbFLpdxTHlA/qdMylXVygwz6O9L3h9GQbeLR0n/sjl+kKL6E2IJvpgnoipFATz2TQEaNlOVPDqvBDf76Bjc4F+fduaoUb4w8kCvs1yo6
 kcvDnh7DZKTHtfCyntxzn1/+0lS9+5kSOsEnYtmiqUluYKqwm30ZBlV8Ng2auEecZM0+pqjbQEdlAvVMGVY4FckAKpLkzqb+QhTakEY/j2QQfTqNglw/KL0LoOCaMJ5m93jEDxZ44mvwqWCjRMBMmXGUeQnw+g4lk2dbeVV7MrzAVm0+bRFO9XQKNh0PRW0UyIoxhvhqqOp9Ew74oVPZZtNunYLKNpm1PKBMJPXJChG9PKYVDq7iUI6AeXkS/gzCjRFWTNOtGUb+4kTTzBeEum0xG5Rht1opj28ZIjG3BkJBHy4lIhswDjKV5m6Roih1kW4Tzsnb6ErA9hKgjkdQ4CYt41tPtVMyoKPNiXBlNp+JQfBlOlThWT3AdXedzMXlU0SrRP3JnHPn2BYxKVF7KExZ3T6PDJZWirxwsDyNZGLzCt+/e8Zc/fc/Lq3JuL29ILB1gcewKD9dWGJW/STudju02idSnk2mRgq5LqmfLP+0iQoQrzyGe8P1J9JeOc//eW14+krj44DnrbUvcEYY31EwJm83z9t5tFpoN+EhUdN3uQegBcdWP3Yk5lU+/8O4l7RSXmke5srLOYIEahU0aYQcjWZAI+vLNN9wTRrtx5RXff/eO1395wpXRdVZH17g+vM5G+yTaxHZGxBjevHhrEdi62CayfAvIl6hcFVSKLqMLdUovJXJcCk5FU3A4hQrnSjY7x5kvl2aWeo7aH4DbJ/6WOyIirIsZqJ7mwdI1cn0a6c4apy3GQKuIWqV9AjWeiWx0j/y4AWO/PLfs+Ecv0s9miXKbHzoSSLVwV7xtPnafe7L7V0dx+MxdeDCT0ouF1B8PI0FObrx/MwvVur/dNyZs1HYui1ZRYpV9Jr3yuzYHcYMkg8SDh7z8+hYdog41tgXSzBmUupSTKJxXJT9DD7hS7BZBZ3AZhugqBvLrmKydozm0mPAP7bnw4WliBf6P7VYQJ87q8dVFXLeE4HquioTAJvKtsvHcnYgx
 Y5B0m3xyrUvo9quyjGQfiVMzI9DeV9pLi0QhXdIsi43j0qjiPHKy2gPFcUpHJaaYH74SQd3+NAynUhlxEcfxLyLzZBwlvtXSfD3oLyTTeyYZ/flsJsWFpuMbuJEsPCRc23I2ln7Z/+aj4XQeDZLomsJC5QRGETLVsXh0Vmb2y0YrUVx/Ql4bXIM+Lo8iKdJGYbHeBBEyYblJK/OtR8nMRTYzF1vPVKgStYiU2jqXAXmvy7lyUiUyttrkMWFeXndYXPdEAlPhasbChQPN084kwpabv39zaydgXxDKs0oWszpYSawVp8xj2rWAGYnYcXsk4XziSoOT0jInx/z930zBKK0Z3WTL8esQAdFJ8+sclGhPRRD5iQPOzjVsrn/L/3nzgm/ev+DN2/fcGl2VxplCJ0I3rxLnOBEl6FGDWjlOun0hWSLY3cppjG2D2O2KwO5LV6KPp5IXqGF59Rnf/vCGF0+e8erlSx5eu4O2fIzHyw+Zrp5lWjsukbGUDGFgr88D8dwRSpLUYJwkFU1cL6PZHZR5FjOlm6ImIpeUsykEmb+qydFI4jHKedXxw59f8MO3f+GxcOOj+TXuL86yZBplKH8IY9G0NOhLnl+/zmzPNL1Vk6Q65BJ3KoN4OYcNijY2xsUxFy4zUHaJItsUmsy17V6AQuJ68pk8gg+JKGy5QGrEMJfmHvDwwdc8e3iXS+N3uLF2w8K0usQGLmyxxvoje7w/8/1xA9r85tyy165gqtwyJCrmEHk8haIzKZQ5ZJBuncruX9px8oNgap1KmC/sxOlYCiGipENZJXT5KDH5FdLrrEAvUUonB2BAIo15MllrQCVXxy7z5soGm7oBUeF0mk4mo7ERlxQrbonSMl45wxnzd4u/ccZ/ezBFzi0YS6a53ztJ4clgvvjfv+P3/7Cd3HwtizNXmG4dI/BgOtUJOhbVBpo9k0mzK6YxRMVaWSdGx3RG3JTCN6X0S76fzOxgTdXJnPn7rsEb3Ls6y
 XqrganAYtQeZdKYI8zlNdN8OoKkvXF0CtuOOBYyoeimy7uUqjPm6dwa+sKyaTgrMO5Tx4Q05ribqLdNOH2u1QwqeizL1QbOitufCEBz2JfeC1Lked30eWTRZSNMKE7aY4m3eYzmDLEmBev+lYswSypGr3ImXbMYlLjdKSfYFKllOFlij+xD28VcykUIzTe2DvqIoEjMMjmV03UhlyH/CsukuS7zcrU84bOkWiYiai0LrvuFNRcCkxlzln3xLBAXzaXXPgzFJxfJE7G6Wj5KhQjL8Q9cybDOYjqjV1LCGLfULdL41fI+1bLduSIKhVSeDidym68c9zSuXn3Ku9ePeP3uW168eck3z1/ywDgt6SKbcomVyfsjCLOV46YwCC/dY3P2Hj1S1FcWHvLw+jWKk9rxOVNFqXchJuUQf713n1ebV3j54AXvJM6+fP6EW5fXuTzczdLIquVu9nCJySHCvW67Y/DeEYRC4nWCVxP6Wjlvfski5oXMqqaoTKzDdosrZ/5gS/CBJMpslNRIhPzmwS3ev7/F2tAcmxNXma+YYrFllhHDIqPaCS53GWiPaWCkSs9YqQ5l0Qiqgkkahd0UVmEEfWZDzGlPTGV1qEOaxAmzKQvqFgxKRy3sHX0glYTzdcz3SFopLaQ7XUlfchGbgmeX1GusaQZRSKo48B/27P9PV4IDG3/cgHZf+S1HHA7G+VNrAsyrwSWShAsgZwkgl9uky+/iCdiRSrUUtr7ASGf1ONNJ1bRJsagu5tPjVUireeCrcIVBuMQQKvB+KJG+3EEerV2hKTwR5YlwGo4oyD0ahu/2QNrS6hlKauamRIjMkGqiDotzHRJ2Kh7m9e273CrVkCKfvf1frLCySmJ28SqrLQYmhKc6g1pZK9YSedARp08j6JOcvRRSz0hsLX3OSrolgvaZZ8sE5tJjl0pfkMB8t/lBlgNc3H0Sx0+c0Zj5RmLaWG4bc1U95Egk7DgZj+pwlHCbmuvqKdpdMh
 h0lmjtm0+BVSaNPmoGhMnM+6raFYDONo/lpnnW1YMMnI9i4Fg0pV954/1bZ3S+ahZjW+ixzRCWTMdgl4HpUATDnuVc6ZeIFZOD+++8aJB/W/JIw3TSk8HDIYIAGi4LIxg8cmk8HoPeo9qybM18L6TlcWtuBRJH0xn1a6Av4G/DpmZNY+z8cBdOosTmB6H0i3AYHMwrh7q5XnOJrqh6VuNbLMvqbHfGUOjZwGx4IZ0ikvaixtm26YwFVDEdVkjViTBST0nMcinAIM6oOBhG8oEE0vYHi2N38NfXL3nz7iV/fvnGsi7z/f37tAenELjdneS9gVQE6bk8eZ/HD9/x7PU74bDv+PbxUx5tznPNNMSI+WnEYWXkHEnAkNImqaWEma5pnr15x5tvn/Hu0VvmWvW47fGmI7malxub+NuF4rfTjyTXGkqSh0kJM2Ao7ydHBMXhywASxSRuGC9zybBGgmOtYEocYVZSu1LohiwVPYp6Bkol4jeP8eT2U8vApvt3RDxu3OHqxBXKAypIE8avze6nXZLN91cfWZ4xmCtM7/C5N1/8+xE+/hdbAs5VstxzDVWSlrnKHtrkeNTFdtOSNYZREkSDCMH5z2Jw3p2N3ZYgwgXFrk8vCctOoRY2tjuYQIwkxdEszY8bUOGetOz0W3c++pkVTruDafUpoFbstcIqnkZ502K7XOp8a1GHmcchSPOlivLYp1J6UlxPlHYwSiKUryi4txRIoY6W2GJi9kZZHijSkVvCNnHQUOs0ao/n4f5ZJB6f+1B+MYe+yEYGojRc108wEFNDb2gXl4TVliMSKdtxglZpoFbVouUhIWvlncIjrUymNDMbnWsZ7BqyU0HZSWmQ81l/i4wOiXSeDEK1LxKDebir+dmBdoVcb5rk1foi/RIDPLbFk3QsmRpp6kKXBq7MPeGmKFXnYWGhXf4Un66zLMIe81ZicP3bVLb2aC35ASrU3uZF5ZmkSkQs+DIQ1elIhpMqabYJoOJ
 YEl0SvYsPSeNcqGRRXHPUXcnMxVKMThLFpdn7hbmWcloZLWwj53AcjVLcvcfDGRBmHN0XyIJHKQv5nUwEZGAUbjI/FmDcv4Rhh2QmReRGfIroOin7djZRYn+uZbDVbISAfbkJFyvzGL50BnyFUYVpF1WzIljjrDcM0ONpfuBoIdH7ralIrWVZMyzbnk7x8VAqRCAa5Fz2SbFp3VMI2O9N4J4Yy8NutB5FpDrXUiHCm703mqXWZd6/ec93b763PCTl2+fvuaY3ErDtHP6/OsmZL90xtc1KfLzHS4mm3/3fF9wcNVIVoaDctYzmoDq6SwZR+tVTcKGBsaJOdMWjTHdu8uYbcdan73jw8BFD9S2c+Z0TZ75wZU47SpSkifM70tDm9vF4dYWHG9fpzW7j7CdBWG3xJOB4Er0FeroSqjHFyvlIUNOVoqMpop5iie2pNsWEHE+nPN98p8ITafLHPH1wj3mdiOfQMvVxncJuWbSEqUg768RQWiOdmQZUhUOc/DSEY18kEXS+ho4SAy+Wr0jaMFAiMX/MtMmDB2959/41t2bu0BwvrOjdSHOiljibDEIl7Sz1CnNOyzbfe85I3yb1wv1J5yt/3IBJbonLnp8ESjb3RrEtCmNoA/XRNThsPUe5VwXd5i+Cw8vpSW9Cm1FBpfBY8fEIak7F0iJO12UlhS6cY7RLxxRdTU9KBwHiEPFnIykIqyNJVDzc/Pgp5wwMUR3EH0kjySqaXBt/tMEFjIsidLrl0yaRaq6un9azaez4yRnCd/nxsMLAepHwjRTitDDhtH82U+7FljvvTXbidKdD6T7kh0F+Vu3yIGWLOx3CsuPB1TTL9rRIYQ2Zhy8l1kgslqJyzKLwYLJE3QZWZh/z/ukdZhXSrJ+7U3IkVCKIkf6QMjodcuiX4zCY0Mxy+ywdotrtEvUizpaIG0aTv0/2/0w6XR7FJH7hQoxVDg3nFZbRD9MJnbK9FejtE+mWWDpuHYphpwMjh8JZ
 SxygI1SizIlo9If90Z0IZVzcZ+BiNqtFRmnoPIwiIpOpeuarZjBJVDOejhZxk6YMLpQUEYXOJpku20R6xd31thIz/ZUY5dganYuoFb5r9MxhVT1peejobFqrxOcGWkLr+c1/nMLfL19if5ewnYaI7a4U7Amn9FgO9W5llDgXSCwOI1McsNVRtt8rh7GYCspFWMzfpz5ae8Lbd9/w/tV7Xgqvvd28RVVQGrv+9+f8+v/5JaeE6R/ffML7P73mzeYl4clK/PZ7cE4irzKoXo5fOa9Xb1lWzlzumKKnrIdXd57y5N5rXr19xbu/vuXrpXlSHROw+jgQ56/CUYbXolX2oC3qRROWjz5GxZxug/m2RaJOFeF6IoUUiYKaJD0JF4qJtpKUJQJpUrbTLmLdGNpMV+Yg9YkG5gev8er71zx7+oKrfTPMynld7psSXtTRoRReD1Ry5oMwkval0ZXYIuJxBW3bJq11YzSnDbBquiGxeA5jugl9Xj8Prt/iv979he/ef8ub779jae42bQXjKL0riZYaizidxmLvdZ6+eMXL91/z/MUTrojjzozf+HEDlocULae7V6Kzy5SDnsudkSkW9Iuc3hNJUWg5eWfjyQ2sJtsumQTrQpS2mSgPpNEunNghTjQoimuU+NkX0EFffDO6sBTcdtixY0sgHfVd5PgkcO7XNpz5rTXxx7xJOZ1K+GF5zYce5B6MEStvZDSlnUFhnnn5vJXsdtQS2VolGvVLPOsRh5sTxhsJLKHFKlaiUZawXir1e6Twt7jg/bEP6btDSd0aRJtXM5dztIxI41ULv1VYK6QJ0ml1SKHuuCjx8ThyvRpYW3rI++9+YKmkjqaDcZTuC6fbvZD+5BI6fevp9lRSJPl/VPZnMqcTtXsJC3m96GNLSd3lTeRWP9ROBQz7KvE4kk5zqDilNMawqzi7dxGmM1FMnAhi2CqIvp0+EhuzmRT1X2ueZDisgj5pwC6JI7PCg+PmFROB5ZjsQ
 +g+7ku/i5L71+9wrWcYvVu6iIFEWCfzvXsiIOIk7XbmCy+RjMv5qBBh0znm0+pifj5DOZlWwcSZL8oUdDEVWsOQIMFwQDXjdcMMTtxDl9tNpX0aylPuBO5NJ+JkOkP5RrpyW2kX8SgXzm3yqqfNr4pKm1TiD4bg8JuTFAepef71E15/95x3b77l6/sPWRLu9P/Cno/+YRu7P42lSTnAnbERZiuryXPLxGV/EtmBbYy3Xaa12kh9jpHN6U3GqqcZytagjSqmN7+W1/cu8+KbZzyaucpU3SD2+xNIPq3Az/yMxYgG5jtGaQgsIu9MMsVuNby69piHX79gqu8ug5oFqkPbMOToUSdWk2ArPLY/mWoxkPIQNTUJOmb6V7kj7Pr29SvevLjLtZkNNozL3L90m+XJNZbGzU/InaDAtpy2/HmG269iFBfrCirmSnM/t0Y3xaWnLKPyNweusCj7sy5//82D55IE3vDm1Ttemb+SePKce+sP2Zi9g7pshIrIBp5dv8mjOyu8kyb8+skrnl67y8Ppyz9uwOZI5fJQgBT6mSIW60YYreqlu9TEQF6PKGomTg4ST0rEVu0KqPOup0MaoEk4MccqkRIH4ZfAOubi1PSlDDCjrmP/r77ig//xCRl+eQyVtWD/pRMHRX2/+ue9/OF/fkakeThO5jCp3s0YGuZZLdMxn9zEgn8+ffbias4Kqo+JY4qa9XgWiQqruFTXR61LGqFbfejx11iuOkZI8+XsDydjawCFe+PoTRSmzGjFKHynluYrPZdF8dE4tDaZaJ1yBNYLKFOYuHbpPm9/eMXzjWWabcMxSRw2XyQxj11vl4g3mVBP6gEX3CVS6STHG7zLLcvO+h1jSdoTwLk/BlJ7Ioppe3F8cS7z1cseZ2FhUd5RF/NFjyL6DrkzIOrfLzF1MqmPG1M3pZCmaAkopPdiKp2n8rjct8zXpjm6AorpuZhFx5FgxiMkoiqaedg3htonDtUZcdVoDXoXBY
 nb/RiOk6hl5m7rKIYdE8k8Gk6TVR7t7gWo5X3dPnW3PA99UphxQNhmzDxCw6eK+TIjD8an0QZWWRDgFz/dhffxKCocFCzWS9yKKCX/eAJF4n51USoWUltIE2b13eLI5/96gQ7VCu9fvOPt2x/47ukzrrROEbbropznjzi6LYTp/kWWmrtIOuiF/25XgoT3a2Oq0Jf2saSfpLt8mG/uv+PR8iZrbUMsaOYocSsm64gkCadsVrT9zBZ0YFJU4bU9GPfdIYRKdOyIraEhoJQ8uyLKJPXMNErEXblteSrwixfvxT0fMNe5Qm+ZCcUZiX3HBBMyhyiN6cZUbKQlvAhjXjN35+Z49eA6N8bm6Uxr4PrkBq+fvbA86/398ycMVhrEbDQ8WFrh/tQyK+pRtBFqSoQ7Z5vGWZb9nW0w8XhxXfbhKsumFR7ceSJN+JgfJI5/J6L05x/+yvrULNeNU0zXmejM1qISUYi9kMFIlYpnyyvcGpxjuenvRtOXWEctG85nY/TOZ6Nxmkvto6woq5kSBjPJCRwuHqc+Rk1ZaAv6tA6BSDlQaZ10ZPVQ7N3EUI5JuGKZa3PrlMVWsf0Xzmz9hRNhuyMI3eeJ9a9OceLfT7P7X6z44z+fI8q/kfuLV3i5cIOnQ5fEdZPoOBWJ7rjE0n0BeG91lXiRRneIecBrLT3eaajERSP3+pIuRTPvnoxyhz+Ve/yo3BdCsRz4eaWJG0UD1NvkoJIi0llnoJLma9oXy7y4TGVEnZykRb6VAvr6+QPumC9R+2aiFnbrjmpltnTUcle5QaLfemUf/jv8CPsymgbrZJqc5fOPx5O1Mxx3afZW4coh22LGz6ZiOhnKjH0Mw+7mkRO5zJsnoZ1PZWjHRWnAQDbKZnj1+B7PHtzn9swmt0Xh+23j6PErZKW+XzirgOqTMQxZxTAaLIUyPsdYRjNtrgm4/tFRwL6A3ohaSmzicN0WQH9BJxqbBIwS043edVRL3K4Xlux2EJH
 xLiPssIJmt0aGJQJPp4j7JLUyEdfGpcJ+TIIR9Z61JInI/e4/HTh3yIULH7panhRV55jOWGQNOcKXcVah9OVrhdkTSNyfTkniKPfuPLc8BtvcgE+uXGewsIfwk7F88pOzFKVpWC7rpMA6Ec8twThuDyHgiHkSQrplQFRnYhsPJq9xb3WR1qw6ku3CmRFX29ROUGZfRLl/A3qFTra7CEN8CzneteT4Ci8K8xdZx5BnlUCdiFpTcBO5Hqk0Z6t4+eQx3/35O96L6zy//4rpngUqk3qoSu7l7vwaM3U9LIqRxO8PI3K3O/GHXcm/EM944ximlG6mh6/y4t0Lvn1xm9kaLaN9lzGqL9MvKac0oInVkXu8uvkXbq09ZKDYwHKjgeijCRhyJR0VdTOqG+fx1WssGY2MaVq5MTvPn57eZc3QQ3OUhiSbXMo8cgg8JcJ5MF3EPIFMSReGtBYWB27/uAE1/gXLbRdSKXEspEtRwUCkqP6ZJPJOR1DhrqDobLTwQZ7EsDJqnUTBQ1WsSmRcSaxiJEzJ5YYe7owvMG/oYjhPT3mgmuLwNmKlOI79xlVO0gkOfuqDs30usWEa+lpmeCBNPhMln+WZTYNNmrCIF95/vIDH4QzKUsZY1S/R42b+2iKUQbdcIvZHEvRZMIrdvpaLK1opWLU45GhkuzTMBGvmmSTJKrTmuS/meZXW6eisJMtfzGND2YgmooOHV1d5uDDDckIVbUcjaD0lAhNUx53pJctIwalENRs1JnoDMymSz1OfzqND4p/OqYiB4EaJnHm0upVbRvSPhNdLcTcycDaOMatwy0Al8/MkJi5E03fMk74drsyGVfO4e47ewlziz9kzHJ3DUkgJrUeTGU+QmBzbKs0XTdOBYNrNNxxL/J+Q92w8ryRtnxeHPvZFH1bPQkgF2eL05u9pO80cKK7aY35gTmIrPRKNm89l0BNcIw3XRL84xZAU64Dw65II43x6O1NxtYwmyr9JJJ3ObGE8
 uxPfw2kozmXjuy2M0tNJ1AtHdQdXSjpJ5PAHvhT75qBNGOCS8RbPJbp9I833zcu3vP76JbfHF5mWiFsv+xd8OlHYrFWEa4AstxRct7vjtNWDwIMJ1Ca109U4Tn/OCDfHbrA5vGgZG+i504/qdMGOagONse30FXbTGKAkVlyvyq2ekexuWs1TztPayD6aRNzhJOqSDCjDyoWv1KRczOD26gYv35iZapUrs4vcW77O2tBNFgY2mOkUZotUWZYUpp8N5cIWV47+3oFPf3oMr0MBDLWOcHf1Jq/XbjJTa6JBIfUrjfbk+n1G2pdpL53gsvlZkDNGntxcYyxfT6XE85gT+SgFO7olns6LY/blq0hzKyXkcLplpMmcuHBzeKVwZzm5jpnYf+5FYkANRZFtpApupJiXBhqv8/Ivz37cgNlOwcsRB8Isdy1nHEylyiobd3GYfR864PBVMEpn4QEpvNTtQVQI813K6kJzNgmTFOJAeJNl7ma5fwxnPzjLxQ9dUEqhNAWrmem+zHz3MuqKURa0y6yXSAySjdwo1lLvnEPGgWjyDkbI63MpPleALknH/SsP+NOrR+Ja1TScTKHzXCrjwnEd4gR+hxNEhdIpts2j06+RmfpZ7hvHmE2ulG0ps6yJ7DgVj/5UkuWq6GRAGePmuSJZnWxUGJjLbqBLXtciJ1UvrzN6lLEoBb+UXs+gj5JR5wx05yJoOaGgRaJZw7kEuvyrmVPPYchU0XIxlxFhvkFhysvxpUzElKE1Pxn4pIKp9B7Lc/qGzVc193ox6VjCenKjuHWkuHQ4R3/mQMbn3nRLwU5I0/cktFiav0eYRudQQpnsX9KZTDp8KsQViwncEYrrkURptDwmJaI1y7Ealv3pFgczRTcyElTBoDRjh3UavXbmOaQ59AdVs1I4xaWEVuakOZdiWkU4ZDtDK5nOaGdU2UlPdAVzsl0an0La5bPMzxCsPp9nGcdY6FyEWtwo1yGXWi8dt5ee8Jcf3
 vPn77/j+3fvhHWEea6Zh812YRQX6XJNIf9gMhOdy/zpv75jUDuFw5dxuB3IoETOz7TxCvcubWDK0jAqwrbUNkmORG3zza8NSQOMa6WR68eZb+pnSDVHtH0epux+VjsksmYKBsXUUO1fSVtcPVd7BinwEIa/mCA1do5c/xKmtIOCSQaGhG8f3n0q3HyLFeMAqoB6VNEGBms7pH4DcN4WytmPXTn+c3scdgcRcjyNsWIdfUm1FBxLpEu279XDZ7x8/Y4fZF8XeibxFIH2ORxNd3oX3QoD+jiJ6dGdNAo69dZNMlA2So9sqy6njbsz64yVaekQkTav+TzzkS3WH9pz/

<TRUNCATED>