You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by du...@apache.org on 2017/07/18 00:20:47 UTC

[4/5] systemml git commit: [SYSTEMML-1185][SYSTEMML-1766] Merge experimental breast cancer updates

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb b/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
new file mode 100644
index 0000000..7e893f7
--- /dev/null
+++ b/projects/breast_cancer/Preprocessing-Save-JPEGs.ipynb
@@ -0,0 +1,610 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Imports"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%load_ext autoreload\n",
+    "%autoreload 2\n",
+    "%matplotlib inline\n",
+    "\n",
+    "import math\n",
+    "import os\n",
+    "\n",
+    "import matplotlib.pyplot as plt\n",
+    "import numpy as np\n",
+    "from PIL import Image\n",
+    "import tensorflow as tf\n",
+    "import pyspark.sql.functions as F\n",
+    "\n",
+    "from breastcancer import input_data\n",
+    "\n",
+    "plt.rcParams['figure.figsize'] = (10, 6)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# from pyspark.sql import SparkSession\n",
+    "# spark = (SparkSession.builder.appName(\"KerasResNet50\").getOrCreate())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Settings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "size = 256\n",
+    "channels = 3\n",
+    "features = size * size * channels\n",
+    "classes = 3\n",
+    "p = 1\n",
+    "val_p = 1\n",
+    "use_caching = False\n",
+    "normalize_class_distribution = False\n",
+    "seed = 123"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Read in train & val data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Read and sample from full DataFrames\n",
+    "# TODO: Pull filenames out and simply pass them in as arguments.\n",
+    "# NOTE: ***Currently hacked read_* with updated data filenames.***\n",
+    "train_df = input_data.read_train_data(spark, size, channels, p, normalize_class_distribution, seed)\n",
+    "val_df = input_data.read_val_data(spark, size, channels, val_p, normalize_class_distribution, seed)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# # Save DataFrames (Optional)\n",
+    "# mode = \"error\"\n",
+    "# tr_sample_filename = os.path.join(\"data\", \"train_{}_sample_{}.parquet\".format(p, size))\n",
+    "# val_sample_filename = os.path.join(\"data\", \"val_{}_sample_{}.parquet\".format(val_p, size))\n",
+    "# train_df.write.mode(mode).save(tr_sample_filename, format=\"parquet\")\n",
+    "# val_df.write.mode(mode).save(val_sample_filename, format=\"parquet\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "if use_caching:\n",
+    "  train_df.cache()\n",
+    "  val_df.cache()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Explore class distributions.\n",
+    "for df in [train_df, val_df]:\n",
+    "  df.select(\"tumor_score\").groupBy(\"tumor_score\").count().show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "tc = train_df.count()\n",
+    "vc = val_df.count()\n",
+    "print(tc, vc)  # updated norm vs: 1801835 498183; original: 3560187 910918"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Sanity check that there are no duplicates.\n",
+    "if p < 1:\n",
+    "  assert train_df.dropDuplicates().count() == tc\n",
+    "if val_p < 1:\n",
+    "  assert val_df.dropDuplicates().count() == vc"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Normalize Staining"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def normalize_staining(x, beta=0.15, alpha=1, light_intensity=240):\n",
+    "  \"\"\"\n",
+    "  Normalize the staining of H&E histology slides.\n",
+    "  \n",
+    "  This function normalizes the staining of H&E histoloy slides.\n",
+    "  \n",
+    "  References:\n",
+    "    - Macenko, Marc, et al. \"A method for normalizing histology slides for\n",
+    "    quantitative analysis.\" Biomedical Imaging: From Nano to Macro, 2009.\n",
+    "    ISBI'09. IEEE International Symposium on. IEEE, 2009.\n",
+    "      - http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf\n",
+    "    - https://github.com/mitkovetta/staining-normalization/blob/master/normalizeStaining.m\n",
+    "  \"\"\"\n",
+    "  # Setup.\n",
+    "  x = np.asarray(x)\n",
+    "  h, w, c = x.shape\n",
+    "  x = x.reshape(-1, c).astype(np.float64)  # shape (H*W, C)\n",
+    "  \n",
+    "  # Reference stain vectors and stain saturations.  We will normalize all slides\n",
+    "  # to these references.  To create these, grab the stain vectors and stain\n",
+    "  # saturations from a desirable slide.\n",
+    "  ## Values in reference implementation for use with eigendecomposition approach.\n",
+    "  stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)\n",
+    "  max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)\n",
+    "  ## Values for use with SVD approach.  These were computed by (1) running the\n",
+    "  ## the eigendecomposition approach to normalize an image, (2) running the\n",
+    "  ## SVD approach on the normalized image, and (3) recording the stain vectors\n",
+    "  ## and max saturations for this (ideal) normalized image.\n",
+    "#   stain_ref = np.array([0.20730702, 0.56170196, 0.80308092, 0.72012455, 0.55864554, 0.4073224]).reshape(3,2)\n",
+    "#   max_sat_ref = np.array([0.99818645, 1.96029115]).reshape(2,1)\n",
+    "  \n",
+    "  # Convert RGB to OD.\n",
+    "  OD = -np.log((x+1)/light_intensity)  # shape (H*W, C)\n",
+    "#   OD = -np.log(x/255 + 1e-8)\n",
+    "  \n",
+    "  # Remove data with OD intensity less than beta.\n",
+    "  # I.e. remove transparent pixels.\n",
+    "  # Note: This needs to be checked per channel, rather than\n",
+    "  # taking an average over all channels for a given pixel.\n",
+    "  #OD_thresh = OD[np.logical_not(np.any(OD < beta, 1)), :]\n",
+    "  OD_thresh = OD[np.all(OD >= beta, 1), :]  # shape (K, C)\n",
+    "  \n",
+    "  # Calculate eigenvectors.\n",
+    "  eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T))  # np.cov results in inf/nans\n",
+    "#   U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)\n",
+    "  \n",
+    "  # Extract two largest eigenvectors.\n",
+    "  # Note: We swap the sign of the eigvecs here to be consistent\n",
+    "  # with other implementations.  Both +/- eigvecs are valid, with\n",
+    "  # the same eigenvalue, so this is okay.\n",
+    "  top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1\n",
+    "#   top_eigvecs = V[0:2, :].T * -1  # shape (C, 2)\n",
+    "  \n",
+    "  # Project thresholded optical density values onto plane spanned by\n",
+    "  # 2 largest eigenvectors.\n",
+    "  proj = np.dot(OD_thresh, top_eigvecs)  # shape (K, 2)\n",
+    "  \n",
+    "  # Calculate angle of each point wrt the first plane direction.\n",
+    "  # Note: the parameters are `np.arctan2(y, x)`\n",
+    "  angles = np.arctan2(proj[:, 1], proj[:, 0])  # shape (K,)\n",
+    "  \n",
+    "  # Find robust extremes (a and 100-a percentiles) of the angle.\n",
+    "  min_angle = np.percentile(angles, alpha)\n",
+    "  max_angle = np.percentile(angles, 100-alpha)\n",
+    "  \n",
+    "  # Convert min/max vectors (extremes) back to OD space.\n",
+    "#   extreme_angles = np.array(\n",
+    "#     [np.cos(min_angle), np.cos(max_angle), np.sin(min_angle), np.sin(max_angle)]\n",
+    "#   ).reshape(2,2)\n",
+    "#   stains = np.dot(top_eigvecs, extreme_angles)  # shape (C, 2)\n",
+    "  min_vec = np.dot(top_eigvecs, np.array([np.cos(min_angle), np.sin(min_angle)]).reshape(2,1))\n",
+    "  max_vec = np.dot(top_eigvecs, np.array([np.cos(max_angle), np.sin(max_angle)]).reshape(2,1))\n",
+    "  \n",
+    "  # Merge vectors with hematoxylin first, and eosin second, as a heuristic.\n",
+    "  if min_vec[0] > max_vec[0]:\n",
+    "    stains = np.hstack((min_vec, max_vec))\n",
+    "  else:\n",
+    "    stains = np.hstack((max_vec, min_vec))\n",
+    "\n",
+    "  # Calculate saturations of each stain.\n",
+    "  # Note: Here, we solve\n",
+    "  #    OD = VS\n",
+    "  #     S = V^{-1}OD\n",
+    "  # where `OD` is the matrix of optical density values of our image,\n",
+    "  # `V` is the matrix of stain vectors, and `S` is the matrix of stain\n",
+    "  # saturations.  Since this is an overdetermined system, we use the\n",
+    "  # least squares solver, rather than a direct solve.\n",
+    "  sats, _, _, _ = np.linalg.lstsq(stains, OD.T)\n",
+    "  \n",
+    "  # Normalize stain saturations.\n",
+    "  max_sat = np.percentile(sats, 99, axis=1, keepdims=True)\n",
+    "  sats = sats / max_sat * max_sat_ref\n",
+    "  \n",
+    "  # Recreate image.\n",
+    "  # Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will\n",
+    "  # not return the correct values due to the initital values being outside of [0,255].\n",
+    "  # To fix this, we round to the nearest integer, and then clip to [0,255], which is the\n",
+    "  # same behavior as Matlab.\n",
+    "  x_norm = np.exp(np.dot(-stain_ref, sats)) * light_intensity #- 1\n",
+    "#   x_norm = np.exp(np.dot(-stain_ref, sats)) * 255 - 1e-8\n",
+    "  x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)\n",
+    "  x_norm = x_norm.T.reshape(h,w,c)\n",
+    "  \n",
+    "  # Debug.\n",
+    "#   print(\"OD shape: \", OD.shape)\n",
+    "#   print(\"OD_thresh shape: \", OD_thresh.shape)\n",
+    "#   print(\"eigvals: \", eigvals)\n",
+    "#   print(\"sorted eigvals: \", np.argsort(eigvals))\n",
+    "#   print(\"top_eigvecs shape: \", top_eigvecs.shape)\n",
+    "#   print(\"top_eigvecs: \", top_eigvecs)\n",
+    "#   print(\"top 2 eigval indices: \", np.argsort(eigvals)[-2:])\n",
+    "#   print(\"proj shape: \", proj.shape)\n",
+    "#   print(\"proj mean: \", np.mean(proj, axis=0))\n",
+    "#   print(\"angles shape: \", angles.shape)\n",
+    "#   print(\"angles mean: \", np.mean(angles))\n",
+    "#   print(\"min/max angles: \", min_angle, max_angle)\n",
+    "#   print(\"min_vec shape: \", min_vec.shape)\n",
+    "#   print(\"min_vec mean: \", np.mean(min_vec))\n",
+    "#   print(\"max_vec mean: \", np.mean(max_vec))\n",
+    "#   print(\"stains shape: \", stains.shape)\n",
+    "#   print(\"stains: \", stains)\n",
+    "#   print(\"sats shape: \", sats.shape)\n",
+    "#   print(\"sats mean: \", np.mean(sats, axis=1))\n",
+    "#   print(\"max_sat shape: \", max_sat.shape)\n",
+    "#   print(\"max_sat: \", max_sat)\n",
+    "#   print(\"x_norm shape: \", x_norm.shape)\n",
+    "#   print(\"x_norm mean: \", np.mean(x_norm, axis=(0,1)))\n",
+    "#   print(\"x_norm min: \", np.min(x_norm, axis=(0,1)))\n",
+    "#   print(\"x_norm max: \", np.max(x_norm, axis=(0,1)))\n",
+    "#   print(x_norm.dtype)\n",
+    "#   print()\n",
+    "# #   x = x.reshape(h,w,c).astype(np.uint8)\n",
+    "  \n",
+    "  return x_norm"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Compute image channel means"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# tr_means = input_data.compute_channel_means(train_df.rdd, channels, size)\n",
+    "# val_means = input_data.compute_channel_means(val_df.rdd, channels, size)\n",
+    "# print(tr_means.shape)\n",
+    "# print(tr_means, val_means)\n",
+    "# # Train: [ 194.27633667  145.3067627   181.27861023]\n",
+    "# # Val: [ 192.92971802  142.83534241  180.18870544]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def array_to_img(x, channels, size):\n",
+    "  x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  img = Image.fromarray(x.astype(np.uint8), 'RGB')\n",
+    "  return img\n",
+    "\n",
+    "def img_to_array(img):\n",
+    "  x = np.asarray(img).astype(np.float64)  # shape (H,W,C)\n",
+    "  x = x.transpose(2,0,1).ravel()  # shape (C*H*W)\n",
+    "  return x"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def filter_empty(row, beta=0.15, light_intensity=240):\n",
+    "  x = row.sample.values\n",
+    "#   x = array_to_img(x, channels, size)\n",
+    "  x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  h, w, c = x.shape\n",
+    "  x = x.reshape(-1, c)  # shape (H*W, C)\n",
+    "  OD = -np.log((x+1)/light_intensity)  # shape (H*W, C)\n",
+    "  # Remove data with OD intensity less than beta.\n",
+    "  # I.e. remove transparent pixels.\n",
+    "  OD_thresh = OD[np.all(OD >= beta, 1), :]\n",
+    "  return OD_thresh.size > 2*c"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Filter ~empty samples.\n",
+    "train_rdd = train_df.rdd.filter(filter_empty)\n",
+    "val_rdd = val_df.rdd.filter(filter_empty)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Sanity checks\n",
+    "\n",
+    "# first = train_df.first()\n",
+    "# s = first.sample.values\n",
+    "# i = array_to_img(s, channels, size)\n",
+    "# s2 = img_to_array(i)\n",
+    "# assert np.allclose(s, s2)\n",
+    "\n",
+    "# def assert_finite(row):\n",
+    "#   x = row.sample.values\n",
+    "#   x = x.reshape((channels,size,size)).transpose((1,2,0)) \n",
+    "#   h, w, c = x.shape\n",
+    "#   x = x.reshape(-1, c).astype(np.float64)\n",
+    "#   OD = -np.log((x+1)/240)\n",
+    "#   OD_thresh = OD[np.all(OD >= 0.15, 1), :]\n",
+    "#   assert np.all(np.isfinite(OD_thresh.T))\n",
+    "# train_df.rdd.foreach(assert_finite)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def compute_channel_means(rdd, channels, size):\n",
+    "  \"\"\"Compute the means of each color channel across the dataset.\"\"\"\n",
+    "  def helper(x):\n",
+    "    x = x.sample.values\n",
+    "#     x = array_to_img(x, channels, size)\n",
+    "    x = x.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "    x = normalize_staining(x)\n",
+    "    x = np.asarray(x).astype(np.float64)  # shape (H,W,C)\n",
+    "    mu = np.mean(x, axis=(0,1))\n",
+    "    return mu\n",
+    "\n",
+    "  means = rdd.map(helper).collect()\n",
+    "  means = np.array(means)\n",
+    "  means = np.mean(means, axis=0)\n",
+    "  return means"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "tr_means = compute_channel_means(train_rdd, channels, size)\n",
+    "val_means = compute_channel_means(val_rdd, channels, size)\n",
+    "print(tr_means.shape)\n",
+    "print(tr_means, val_means)\n",
+    "# Means: [194.27633667  145.3067627  181.27861023]\n",
+    "# Means with norm: train [189.54944625  152.73427159  176.89543273] val [187.45282379  150.25695602  175.23754894]\n",
+    "# Means with norm on updated data:\n",
+    "#    [ 177.27269518  136.06809866  165.07305029] [ 176.21991047  134.39199187  163.81433421]\n",
+    "# Means with norm on updated data v3:\n",
+    "#    [ 183.36777842  138.81743141  166.07406199] [ 182.41870536  137.15523608  164.81227273]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Save every image as a JPEG"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def helper(row, channels, size, save_dir):\n",
+    "  tumor_score = row.tumor_score\n",
+    "  sample = row.sample.values\n",
+    "#   img = array_to_img(sample, channels, size)\n",
+    "  x = sample.reshape((channels,size,size)).transpose((1,2,0))  # shape (H,W,C)\n",
+    "  x = normalize_staining(x)\n",
+    "  img = Image.fromarray(x.astype(np.uint8), 'RGB')\n",
+    "  filename = '{index}_{slide_num}_{hash}.jpeg'.format(\n",
+    "      index=row[\"__INDEX\"], slide_num=row.slide_num, hash=np.random.randint(1e4))\n",
+    "  class_dir = os.path.join(save_dir, str(tumor_score))\n",
+    "  path = os.path.join(class_dir, filename)\n",
+    "  img.save(path)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "tr_save_dir = \"images/{stage}/{p}\".format(stage=\"train_updated_norm_v3\", p=p)\n",
+    "val_save_dir = \"images/{stage}/{p}\".format(stage=\"val_updated_norm_v3\", p=val_p)\n",
+    "print(tr_save_dir, val_save_dir)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "%%bash -s \"$tr_save_dir\" \"$val_save_dir\"\n",
+    "for i in 1 2 3\n",
+    "do\n",
+    "  sudo mkdir -p $1/$i\n",
+    "  sudo mkdir -p $2/$i\n",
+    "done\n",
+    "sudo chmod 777 -R $1\n",
+    "sudo chmod 777 -R $2"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "# Note: Use this if the DataFrame doesn't have an __INDEX column yet.\n",
+    "# train_df = train_df.withColumn(\"__INDEX\", F.monotonically_increasing_id())\n",
+    "# val_df = val_df.withColumn(\"__INDEX\", F.monotonically_increasing_id())"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "train_df.rdd.filter(filter_empty).foreach(lambda row: helper(row, channels, size, tr_save_dir))\n",
+    "val_df.rdd.filter(filter_empty).foreach(lambda row: helper(row, channels, size, val_save_dir))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "def show_random_image(save_dir):\n",
+    "  c = np.random.randint(1, 4)\n",
+    "  class_dir = os.path.join(save_dir, str(c))\n",
+    "  files = os.listdir(class_dir)\n",
+    "  i = np.random.randint(0, len(files))\n",
+    "  fname = os.path.join(class_dir, files[i])\n",
+    "  print(fname)\n",
+    "  img = Image.open(fname)\n",
+    "  plt.imshow(img)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "show_random_image(tr_save_dir)"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 + Spark 2.x + SystemML",
+   "language": "python",
+   "name": "pyspark3_2.x"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/Preprocessing.ipynb
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/Preprocessing.ipynb b/projects/breast_cancer/Preprocessing.ipynb
index 9c6850b..f7cd104 100644
--- a/projects/breast_cancer/Preprocessing.ipynb
+++ b/projects/breast_cancer/Preprocessing.ipynb
@@ -2,10 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML\n",
     "## Preprocessing\n",
@@ -14,10 +11,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Setup"
    ]
@@ -26,9 +20,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -58,10 +50,7 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Execute Preprocessing & Save"
    ]
@@ -70,9 +59,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -86,9 +73,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -105,23 +90,23 @@
     "add_row_indices = True\n",
     "train_frac = 0.8\n",
     "split_seed = 24\n",
-    "folder = \"/home/MDM/breast_cancer/data\"\n",
+    "folder = \"data\"  # Linux-filesystem directory to read raw data\n",
     "save_folder = \"data\"  # Hadoop-supported directory in which to save DataFrames\n",
     "df_path = os.path.join(save_folder, \"samples_{}_{}{}.parquet\".format(\n",
     "    \"labels\" if training else \"testing\", sample_size, \"_grayscale\" if grayscale else \"\"))\n",
     "train_df_path = os.path.join(save_folder, \"train_{}{}.parquet\".format(sample_size,\n",
     "    \"_grayscale\" if grayscale else \"\"))\n",
     "val_df_path = os.path.join(save_folder, \"val_{}{}.parquet\".format(sample_size,\n",
-    "    \"_grayscale\" if grayscale else \"\"))"
+    "    \"_grayscale\" if grayscale else \"\"))\n",
+    "\n",
+    "df_path, train_df_path, val_df_path"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -135,9 +120,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -149,9 +132,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -163,9 +144,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -178,9 +157,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -192,9 +169,7 @@
   {
    "cell_type": "markdown",
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "source": [
     "---"
@@ -202,20 +177,14 @@
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "# Sample Data"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "deletable": true,
-    "editable": true
-   },
+   "metadata": {},
    "source": [
     "### TODO: Wrap this in a function with appropriate default arguments"
    ]
@@ -224,9 +193,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -239,9 +206,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -249,6 +214,7 @@
     "p=0.01\n",
     "train_sample = train.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n",
     "val_sample = val.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n",
+    "\n",
     "train_sample, val_sample"
    ]
   },
@@ -256,9 +222,7 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": false,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
@@ -270,7 +234,7 @@
     "              .map(lambda r: (r[1] + 1, *r[0]))\n",
     "              .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n",
     "train_sample = train_sample.select(train_sample[\"__INDEX\"].astype(\"int\"),\n",
-    "                                   train_sample.slide_num.astype(\"int\"), \n",
+    "                                   train_sample.slide_num.astype(\"int\"),\n",
     "                                   train_sample.tumor_score.astype(\"int\"),\n",
     "                                   train_sample.molecular_score,\n",
     "                                   train_sample[\"sample\"])\n",
@@ -281,7 +245,7 @@
     "            .map(lambda r: (r[1] + 1, *r[0]))\n",
     "            .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n",
     "val_sample = val_sample.select(val_sample[\"__INDEX\"].astype(\"int\"),\n",
-    "                               val_sample.slide_num.astype(\"int\"), \n",
+    "                               val_sample.slide_num.astype(\"int\"),\n",
     "                               val_sample.tumor_score.astype(\"int\"),\n",
     "                               val_sample.molecular_score,\n",
     "                               val_sample[\"sample\"])\n",
@@ -293,23 +257,24 @@
    "cell_type": "code",
    "execution_count": null,
    "metadata": {
-    "collapsed": true,
-    "deletable": true,
-    "editable": true
+    "collapsed": true
    },
    "outputs": [],
    "source": [
     "# Save train and validation DataFrames.\n",
-    "tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size, \"_grayscale\" if grayscale else \"\")\n",
-    "val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size, \"_grayscale\" if grayscale else \"\")\n",
-    "train_sample_path = os.path.join(\"save_folder\", tr_sample_filename)\n",
-    "val_sample_path = os.path.join(\"save_folder\", val_sample_filename)\n",
+    "tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size,\n",
+    "    \"_grayscale\" if grayscale else \"\")\n",
+    "val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size,\n",
+    "    \"_grayscale\" if grayscale else \"\")\n",
+    "train_sample_path = os.path.join(save_folder, tr_sample_filename)\n",
+    "val_sample_path = os.path.join(save_folder, val_sample_filename)\n",
     "save(train_sample, train_sample_path, sample_size, grayscale)\n",
     "save(val_sample, val_sample_path, sample_size, grayscale)"
    ]
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
    "display_name": "Python 3 + Spark 2.x + SystemML",
    "language": "python",
@@ -325,7 +290,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.6.0"
+   "version": "3.6.1"
   }
  },
  "nbformat": 4,

http://git-wip-us.apache.org/repos/asf/systemml/blob/532da1bc/projects/breast_cancer/README.md
----------------------------------------------------------------------
diff --git a/projects/breast_cancer/README.md b/projects/breast_cancer/README.md
index 179eb0f..b7402e8 100644
--- a/projects/breast_cancer/README.md
+++ b/projects/breast_cancer/README.md
@@ -19,33 +19,35 @@ limitations under the License.
 
 # Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML
 
-Note: This project is still a **work in progress**.
+Note: This project is still a **work in progress**.  There is also an [experimental branch](https://github.com/dusenberrymw/systemml/tree/breast_cancer_experimental2/projects/breast_cancer) with additional files and experiments.
 
 ## Overview
 The [Tumor Proliferation Assessment Challenge 2016 (TUPAC16)](http://tupac.tue-image.nl/) is a "Grand Challenge" that was created for the [2016 Medical Image Computing and Computer Assisted Intervention (MICCAI 2016)](http://miccai2016.org/en/) conference.  In this challenge, the goal is to develop state-of-the-art algorithms for automatic prediction of tumor proliferation scores from whole-slide histopathology images of breast tumors.
 
 ## Background
-Breast cancer is the leading cause of cancerous death in women in less-developed countries, and is the second leading cause of cancerous deaths in developed countries, accounting for 29% of all cancers in women within the U.S. [1]. Survival rates increase as early detection increases, giving incentive for pathologists and the medical world at large to develop improved methods for even earlier detection [2].  There are many forms of breast cancer including Ductal Carcinoma in Situ (DCIS), Invasive Ductal Carcinoma (IDC), Tubular Carcinoma of the Breast, Medullary Carcinoma of the Breast, Invasive Lobular Carcinoma, Inflammatory Breast Cancer and several others [3]. Within all of these forms of breast cancer, the rate in which breast cancer cells grow (proliferation), is a strong indicator of a patient’s prognosis. Although there are many means of determining the presence of breast cancer, tumor proliferation speed has been proven to help pathologists determine the treatment for the
  patient. The most common technique for determining the proliferation speed is through mitotic count (mitotic index) estimates, in which a pathologist counts the dividing cell nuclei in hematoxylin and eosin (H&E) stained slide preparations to determine the number of mitotic bodies.  Given this, the pathologist produces a proliferation score of either 1, 2, or 3, ranging from better to worse prognosis [4]. Unfortunately, this approach is known to have reproducibility problems due to the variability in counting, as well as the difficulty in distinguishing between different grades.
+Breast cancer is the leading cause of cancerous death in women in less-developed countries, and is the second leading cause of cancerous deaths in developed countries, accounting for 29% of all cancers in women within the U.S. [1]. Survival rates increase as early detection increases, giving incentive for pathologists and the medical world at large to develop improved methods for even earlier detection [2].  There are many forms of breast cancer including Ductal Carcinoma in Situ (DCIS), Invasive Ductal Carcinoma (IDC), Tubular Carcinoma of the Breast, Medullary Carcinoma of the Breast, Invasive Lobular Carcinoma, Inflammatory Breast Cancer and several others [3]. Within all of these forms of breast cancer, the rate in which breast cancer cells grow (proliferation), is a strong indicator of a patient’s prognosis. Although there are many means of determining the presence of breast cancer, tumor proliferation speed has been proven to help pathologists determine the best treatment fo
 r the patient. The most common technique for determining the proliferation speed is through mitotic count (mitotic index) estimates, in which a pathologist counts the dividing cell nuclei in hematoxylin and eosin (H&E) stained slide preparations to determine the number of mitotic bodies.  Given this, the pathologist produces a proliferation score of either 1, 2, or 3, ranging from better to worse prognosis [4]. Unfortunately, this approach is known to have reproducibility problems due to the variability in counting, as well as the difficulty in distinguishing between different grades.
 
-References:
-[1] http://emedicine.medscape.com/article/1947145-overview#a3
-[2] http://emedicine.medscape.com/article/1947145-overview#a7
-[3] http://emedicine.medscape.com/article/1954658-overview
-[4] http://emedicine.medscape.com/article/1947145-workup#c12
+References: <br />
+[1] http://emedicine.medscape.com/article/1947145-overview#a3 <br />
+[2] http://emedicine.medscape.com/article/1947145-overview#a7 <br />
+[3] http://emedicine.medscape.com/article/1954658-overview <br />
+[4] http://emedicine.medscape.com/article/1947145-workup#c12 <br />
 
 ## Goal & Approach
-In an effort to automate the process of classification, this project aims to develop a large-scale deep learning approach for predicting tumor scores directly from the pixels of whole-slide histopathology images.  Our proposed approach is based on a recent research paper from Stanford [1].  Starting with 500 extremely high-resolution tumor slide images with accompanying score labels, we aim to make use of Apache Spark in a preprocessing step to cut and filter the images into smaller square samples, generating 4.7 million samples for a total of ~7TB of data [2].  We then utilize Apache SystemML on top of Spark to develop and train a custom, large-scale, deep convolutional neural network on these samples, making use of the familiar linear algebra syntax and automatically-distributed execution of SystemML [3].  Our model takes as input the pixel values of the individual samples, and is trained to predict the correct tumor score classification for each one.  In addition to distributed l
 inear algebra, we aim to exploit task-parallelism via parallel for-loops for hyperparameter optimization, as well as hardware acceleration for faster training via a GPU-backed runtime.  Ultimately, we aim to develop a model that is sufficiently stronger than existing approaches for the task of breast cancer tumor proliferation score classification.
+In an effort to automate the process of classification, this project aims to develop a large-scale deep learning approach for predicting tumor scores directly from the pixels of whole-slide histopathology images.  Our proposed approach is based on a recent research paper from Stanford [1].  Starting with 500 extremely high-resolution tumor slide images with accompanying score labels, we aim to make use of Apache Spark in a preprocessing step to cut and filter the images into smaller square samples, generating 4.7 million samples for a total of ~7TB of data [2].  We then utilize Apache SystemML on top of Spark to develop and train a custom, large-scale, deep convolutional neural network on these samples, making use of the familiar linear algebra syntax and automatically-distributed execution of SystemML [3].  Our model takes as input the pixel values of the individual samples, and is trained to predict the correct tumor score classification for each one.  In addition to distributed l
 inear algebra, we aim to exploit task-parallelism via parallel for-loops for hyperparameter optimization, as well as hardware acceleration for faster training via a GPU-backed runtime.  We also explore a hybrid setup of using Keras for model training (currently transfer learning by fine-tuning a modified ResNet50 model) [4], and SystemML for distributed scoring of exported models.  Ultimately, we aim to develop a model that is sufficiently stronger than existing approaches for the task of breast cancer tumor proliferation score classification.
 
-References:
-[1] https://web.stanford.edu/group/rubinlab/pubs/2243353.pdf
-[2] See [`Preprocessing.ipynb`](Preprocessing.ipynb), and [`breastcancer/preprocessing.py`](breastcancer/preprocessing.py).
-[3] See [`MachineLearning.ipynb`](MachineLearning.ipynb), [`softmax_clf.dml`](softmax_clf.dml), and [`convnet.dml`](convnet.dml).
+References: <br />
+[1] https://web.stanford.edu/group/rubinlab/pubs/2243353.pdf <br />
+[2] [`Preprocessing.ipynb`](Preprocessing.ipynb), [`preprocess.py`](preprocess.py), [`breastcancer/preprocessing.py`](breastcancer/preprocessing.py) <br />
+[3] [`MachineLearning.ipynb`](MachineLearning.ipynb), [`softmax_clf.dml`](breastcancer/softmax_clf.dml), [`convnet.dml`](breastcancer/convnet.dml) <br />
+[4] [`MachineLearning-Keras-ResNet50.ipynb`](MachineLearning-Keras-ResNet50.ipynb)
 
 ![Approach](https://apache.github.io/systemml/img/projects/breast_cancer/approach.svg)
 
 ---
 
 ## Setup (*All nodes* unless other specified):
+* Spark 2.x (ideally bleeding-edge)
 * System Packages:
   * `sudo yum update`
   * `sudo yum install gcc ruby`
@@ -60,11 +62,14 @@ References:
   * `sudo yum install openslide`
 * Python packages:
   * `pip3 install -U matplotlib numpy pandas scipy jupyter ipython scikit-learn scikit-image flask openslide-python`
-* SystemML (only driver):
+* SystemML (bleeding-edge; only driver):
   * `git clone https://github.com/apache/systemml.git`
   * `cd systemml`
   * `mvn clean package`
   * `pip3 install -e src/main/python`
+* Keras (bleeding-edge; only driver):
+  * `pip3 install git+https://github.com/fchollet/keras.git`
+  * `pip3 install tensorflow-gpu` (or `pip3 install tensorflow` for CPU-only)
 * Add the following to the `data` folder (same location on *all* nodes):
   * `training_image_data` folder with the training slides.
   * `testing_image_data` folder with the testing slides.
@@ -72,12 +77,13 @@ References:
 * Layout:
   ```
   - MachineLearning.ipynb
+  - MachineLearning-Keras-ResNet50.ipynb
   - Preprocessing.ipynb
   - breastcancer/
+    - convnet.dml
+    - softmax_clf.dml
     - preprocessing.py
     - visualization.py
-  - convnet.dml
-  - nn/
   - ...
   - data/
     - training_ground_truth.csv
@@ -117,14 +123,14 @@ References:
     spark.executor.memory 50g
     ```
 
-  * Machine Learning:
+  * Machine Learning (SystemML):
     ```
     # Use all executor memory for JVM
     spark.executor.memory 100g
     ```
 
 * `cd` to this `breast_cancer` folder.
-* Start Jupyter + PySpark with the following command (could also use Yarn in client mode with `--master yarn --deploy-mode`):
+* Start Jupyter + PySpark with the following command (could also use Yarn in client mode with `--master yarn --deploy-mode client`):
   ```
   PYSPARK_PYTHON=python3 PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS="notebook" pyspark --master spark://MASTER_URL:7077 --driver-class-path $SYSTEMML_HOME/target/SystemML.jar --jars $SYSTEMML_HOME/target/SystemML.jar
   ```