You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by ni...@apache.org on 2017/04/19 22:08:32 UTC

[1/3] incubator-systemml git commit: [SYSTEMML-692] Added initial version of DML generator for Caffe

Repository: incubator-systemml
Updated Branches:
  refs/heads/master ad3e78a28 -> cc7993fc8


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala b/src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala
new file mode 100644
index 0000000..755949d
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+
+import caffe.Caffe.SolverParameter
+import org.apache.sysml.runtime.DMLRuntimeException
+import caffe.Caffe
+
+trait CaffeSolver {
+  def sourceFileName:String;
+  def update(dmlScript:StringBuilder, layer:CaffeLayer):Unit;
+  def init(dmlScript:StringBuilder, layer:CaffeLayer):Unit;
+  
+  // ----------------------------------------------------------------
+  // Used for Fine-tuning
+  private def getLayerLr(layer:CaffeLayer, paramIndex:Int):String = {
+    val param = layer.param.getParamList
+    if(param == null || param.size() <= paramIndex)
+      return "lr"
+    else
+      // TODO: Ignoring param.get(index).getDecayMult for now
+      return "(lr * " + param.get(paramIndex).getLrMult + ")"
+  }
+  // the first param { } is for the weights and the second is for the biases.
+  def getWeightLr(layer:CaffeLayer):String = getLayerLr(layer, 0)
+  def getBiasLr(layer:CaffeLayer):String = getLayerLr(layer, 1)
+  // ----------------------------------------------------------------
+  
+  def commaSep(arr:String*):String = {
+	  if(arr.length == 1) arr(0) else {
+	    var ret = arr(0)
+	    for(i <- 1 until arr.length) {
+	      ret = ret + "," + arr(i)
+	    }
+	    ret
+	  }
+	}
+  
+  def l2reg_update(lambda:Double, dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    // val donotRegularizeLayers:Boolean = layer.isInstanceOf[BatchNorm] || layer.isInstanceOf[Scale];
+    if(lambda != 0 && layer.shouldUpdateWeight) {
+      dmlScript.append("\t").append(layer.dWeight + "_reg = l2_reg::backward(" + layer.weight + ", " + lambda + ")\n")
+      dmlScript.append("\t").append(layer.dWeight + " = " + layer.dWeight + " + " + layer.dWeight + "_reg\n")
+    }
+  }
+}
+
+class LearningRatePolicy(lr_policy:String="exp", base_lr:Double=0.01) {
+  def this(solver:Caffe.SolverParameter) {
+    this(solver.getLrPolicy, solver.getBaseLr)
+    if(solver.hasGamma) setGamma(solver.getGamma)
+    if(solver.hasStepsize) setStepsize(solver.getStepsize)
+    if(solver.hasPower()) setPower(solver.getPower)
+  }
+  var gamma:Double = 0.95
+  var step:Double = 100000
+  var power:Double = 0.75
+  def setGamma(gamma1:Double):Unit = { gamma = gamma1 } 
+  def setStepsize(step1:Double):Unit = { step = step1 } 
+  def setPower(power1:Double): Unit = { power = power1 }
+  def updateLearningRate(dmlScript:StringBuilder):Unit = {
+    val new_lr = lr_policy.toLowerCase match {
+      case "fixed" => base_lr.toString
+      case "step" => "(" + base_lr + " * " +  gamma + " ^ " + " floor(e/" + step + "))"
+      case "exp" => "(" + base_lr + " * " + gamma + "^e)"
+      case "inv" =>  "(" + base_lr + "* (1 + " + gamma + " * e) ^ (-" + power + "))"
+      case "poly" => "(" + base_lr  + " * (1 - e/ max_epochs) ^ " + power + ")"
+      case "sigmoid" => "(" + base_lr + "( 1/(1 + exp(-" + gamma + "* (e - " + step + "))))"
+      case _ => throw new DMLRuntimeException("The lr policy is not supported:" + lr_policy)
+    }
+    dmlScript.append("lr = " + new_lr + "\n")
+  }
+}
+
+/**
+ * lambda: regularization parameter
+ * momentum: Momentum value. Typical values are in the range of [0.5, 0.99], usually started at the lower end and annealed towards the higher end.
+ */
+class SGD(lambda:Double=5e-04, momentum:Double=0.9) extends CaffeSolver {
+  def update(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    l2reg_update(lambda, dmlScript, layer)
+    if(momentum == 0) {
+      // Use sgd
+      if(layer.shouldUpdateWeight) dmlScript.append("\t").append(layer.weight + " = sgd::update(" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer)) + ")\n")
+      if(layer.shouldUpdateBias) dmlScript.append("\t").append(layer.bias + " = sgd::update(" + commaSep(layer.bias, layer.dBias, getBiasLr(layer)) + ")\n")
+    }
+    else {
+      // Use sgd_momentum
+      if(layer.shouldUpdateWeight) dmlScript.append("\t").append("["+ commaSep(layer.weight, layer.weight+"_v") + "] " + 
+          "= sgd_momentum::update(" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer), momentum.toString, layer.weight+"_v") + ")\n")
+      if(layer.shouldUpdateBias) dmlScript.append("\t").append("["+ commaSep(layer.bias, layer.bias+"_v") + "] " + 
+          "= sgd_momentum::update(" + commaSep(layer.bias, layer.dBias, getBiasLr(layer), momentum.toString, layer.bias+"_v") + ")\n")
+    }
+  }
+  def init(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    if(momentum != 0) {
+      if(layer.shouldUpdateWeight) dmlScript.append(layer.weight+"_v = sgd_momentum::init(" + layer.weight + ")\n")
+      if(layer.shouldUpdateBias) dmlScript.append(layer.bias+"_v = sgd_momentum::init(" + layer.bias + ")\n")
+    }
+  }
+  def sourceFileName:String = if(momentum == 0) "sgd" else "sgd_momentum" 
+}
+
+/**
+ * lambda: regularization parameter
+ * epsilon: Smoothing term to avoid divide by zero errors. Typical values are in the range of [1e-8, 1e-4].
+ * 
+ * See Adaptive Subgradient Methods for Online Learning and Stochastic Optimization, Duchi et al.
+ */
+class AdaGrad(lambda:Double=5e-04, epsilon:Double=1e-6) extends CaffeSolver {
+  def update(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    l2reg_update(lambda, dmlScript, layer)
+    if(layer.shouldUpdateWeight) dmlScript.append("\t").append("["+ commaSep(layer.weight, layer.weight+"_cache") + "] " + 
+        "= adagrad::update(" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer), epsilon.toString, layer.weight+"_cache") + ")\n")
+    if(layer.shouldUpdateBias) dmlScript.append("\t").append("["+ commaSep(layer.bias, layer.bias+"_cache") + "] " + 
+        "= adagrad::update(" + commaSep(layer.bias, layer.dBias, getBiasLr(layer), epsilon.toString, layer.bias+"_cache") + ")\n")
+  }
+  def init(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    if(layer.shouldUpdateWeight) dmlScript.append(layer.weight+"_cache = adagrad::init(" + layer.weight + ")\n")
+    if(layer.shouldUpdateBias) dmlScript.append(layer.bias+"_cache = adagrad::init(" + layer.bias + ")\n")
+  }
+  def sourceFileName:String = "adagrad"
+}
+
+/**
+ * lambda: regularization parameter
+ * momentum: Momentum value. Typical values are in the range of [0.5, 0.99], usually started at the lower end and annealed towards the higher end.
+ */
+class Nesterov(lambda:Double=5e-04, momentum:Double=0.9) extends CaffeSolver {
+  def update(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    l2reg_update(lambda, dmlScript, layer)
+    if(layer.shouldUpdateWeight) dmlScript.append("\t").append("["+ commaSep(layer.weight, layer.weight+"_v") + "] " + 
+        "= sgd_nesterov::update(" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer), momentum.toString, layer.weight+"_v") + ")\n")
+    if(layer.shouldUpdateBias) dmlScript.append("\t").append("["+ commaSep(layer.bias, layer.bias+"_v") + "] " + 
+        "= sgd_nesterov::update(" + commaSep(layer.bias, layer.dBias, getBiasLr(layer), momentum.toString, layer.bias+"_v") + ")\n")
+  }
+  def init(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {
+    if(layer.shouldUpdateWeight) dmlScript.append(layer.weight+"_v = sgd_nesterov::init(" + layer.weight + ")\n")
+    if(layer.shouldUpdateBias) dmlScript.append(layer.bias+"_v = sgd_nesterov::init(" + layer.bias + ")\n")
+  }
+  def sourceFileName:String = "sgd_nesterov"
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/DMLGenerator.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/DMLGenerator.scala b/src/main/scala/org/apache/sysml/api/dl/DMLGenerator.scala
new file mode 100644
index 0000000..ec4269a
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/DMLGenerator.scala
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+
+import java.util.HashSet
+import caffe.Caffe.LayerParameter;
+import caffe.Caffe.NetParameter;
+import caffe.Caffe.SolverParameter;
+import org.apache.sysml.runtime.DMLRuntimeException;
+import scala.collection.JavaConversions._
+import caffe.Caffe
+
+trait BaseDMLGenerator {
+  def commaSep(arr:List[String]):String = {
+	  if(arr.length == 1) arr(0) else {
+	    var ret = arr(0)
+	    for(i <- 1 until arr.length) {
+	      ret = ret + "," + arr(i)
+	    }
+	    ret
+	  }
+	}
+  def commaSep(arr:String*):String = {
+	  if(arr.length == 1) arr(0) else {
+	    var ret = arr(0)
+	    for(i <- 1 until arr.length) {
+	      ret = ret + "," + arr(i)
+	    }
+	    ret
+	  }
+	}
+  def int_add(v1:String, v2:String):String = {
+    try { (v1.toDouble + v2.toDouble).toInt.toString } catch { case _:Throwable => "("+v1+"+"+v2+")"}
+  }
+  def int_mult(v1:String, v2:String, v3:String):String = {
+    try { (v1.toDouble * v2.toDouble * v3.toDouble).toInt.toString } catch { case _:Throwable => "("+v1+"*"+v2+"*"+v3+")"}
+  }
+  def isNumber(x: String):Boolean = x forall Character.isDigit
+  def transpose(x:String):String = "t(" + x + ")"
+  def write(varName:String, fileName:String, format:String):String = "write(" + varName + ", \"" + fileName + "\", format=\"" + format + "\")\n"
+  def read(varName:String, fileName:String, sep:String="/"):String =  varName + " = read(weights + \"" + sep + fileName + "\")\n"
+  def asDMLString(str:String):String = "\"" + str + "\""
+  def assign(dmlScript:StringBuilder, lhsVar:String, rhsVar:String):Unit = {
+    dmlScript.append(lhsVar).append(" = ").append(rhsVar).append("\n")
+  }
+  def sum(dmlScript:StringBuilder, variables:List[String]):StringBuilder = {
+    if(variables.length > 1) dmlScript.append("(")
+    dmlScript.append(variables(0))
+    for(i <- 1 until variables.length) {
+      dmlScript.append(" + ").append(variables(i))
+    }
+    if(variables.length > 1) dmlScript.append(")")
+    return dmlScript
+  }
+  def addAndAssign(dmlScript:StringBuilder, lhsVar:String, rhsVars:List[String]):Unit = {
+    dmlScript.append(lhsVar).append(" = ")
+    sum(dmlScript, rhsVars)
+    dmlScript.append("\n")
+  }
+  def invoke(dmlScript:StringBuilder, namespace1:String, returnVariables:List[String], functionName:String, arguments:List[String]):Unit = {
+    if(returnVariables.length == 0) throw new DMLRuntimeException("User-defined functions should have atleast one return value")
+    if(returnVariables.length > 1) dmlScript.append("[")
+    dmlScript.append(returnVariables(0))
+    if(returnVariables.length > 1) {
+      for(i <- 1 until returnVariables.length) {
+	      dmlScript.append(",").append(returnVariables(i))
+	    }
+      dmlScript.append("]")
+    }
+    dmlScript.append(" = ")
+    dmlScript.append(namespace1)
+    dmlScript.append(functionName)
+    dmlScript.append("(")
+    if(arguments != null) {
+      if(arguments.length != 0) 
+        dmlScript.append(arguments(0))
+      if(arguments.length > 1) {
+        for(i <- 1 until arguments.length) {
+  	      dmlScript.append(",").append(arguments(i))
+  	    }
+      }
+    }
+    dmlScript.append(")\n")
+  }
+  def invoke(dmlScript:StringBuilder, namespace1:String, returnVariables:List[String], functionName:String, arguments:String*):Unit = {
+    invoke(dmlScript, namespace1, returnVariables, functionName, arguments.toList)
+  }
+  def rightIndexing(dmlScript:StringBuilder, varName:String, rl:String, ru:String, cl:String, cu:String):StringBuilder = {
+    dmlScript.append(varName).append("[")
+    if(rl != null && ru != null) dmlScript.append(rl).append(":").append(ru)
+    dmlScript.append(",")
+    if(cl != null && cu != null) dmlScript.append(cl).append(":").append(cu)
+    dmlScript.append("]")
+  }
+  // Performs assignVar = ceil(lhsVar/rhsVar)
+  def ceilDivide(dmlScript:StringBuilder, assignVar:String, lhsVar:String, rhsVar:String):Unit = 
+    dmlScript.append(assignVar).append(" = ").append("ceil(").append(lhsVar).append(" / ").append(rhsVar).append(")\n")
+  def print(arg:String):String = "print(" + arg + ")\n"
+  def dmlConcat(arg:String*):String = {
+    val ret = new StringBuilder
+    ret.append(arg(0))
+    for(i <- 1 until arg.length) {
+      ret.append(" + ").append(arg(i))
+    }
+    ret.toString
+  }
+  def matrix(init:String, rows:String, cols:String):String = "matrix(" + init + ", rows=" + rows + ", cols=" + cols + ")" 
+  def nrow(m:String):String = "nrow(" + m + ")"
+  def ncol(m:String):String = "ncol(" + m + ")"
+  def customAssert(cond:Boolean, msg:String) = if(!cond) throw new DMLRuntimeException(msg)
+}
+
+trait TabbedDMLGenerator extends BaseDMLGenerator {
+  def tabDMLScript(dmlScript:StringBuilder, numTabs:Int):StringBuilder =  tabDMLScript(dmlScript, numTabs, false)
+  def tabDMLScript(dmlScript:StringBuilder, numTabs:Int, prependNewLine:Boolean):StringBuilder =  {
+    if(prependNewLine) dmlScript.append("\n")
+	  for(i <- 0 until numTabs) dmlScript.append("\t")
+	  dmlScript
+  }
+}
+
+trait SourceDMLGenerator extends TabbedDMLGenerator {
+  val alreadyImported:HashSet[String] = new HashSet[String]
+  def source(dmlScript:StringBuilder, numTabs:Int, sourceFileName:String, dir:String):Unit = {
+	  if(sourceFileName != null && !alreadyImported.contains(sourceFileName)) {
+      tabDMLScript(dmlScript, numTabs).append("source(\"" + dir +  sourceFileName + ".dml\") as " + sourceFileName + "\n")
+      alreadyImported.add(sourceFileName)
+	  }
+  }
+  def source(dmlScript:StringBuilder, numTabs:Int, net:CaffeNetwork, solver:CaffeSolver, otherFiles:Array[String]):Unit = {
+	  // Add layers with multiple source files
+	  if(net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[SoftmaxWithLoss]).length > 0) {
+	    source(dmlScript, numTabs, "softmax", Caffe2DML.layerDir)
+	    source(dmlScript, numTabs, "cross_entropy_loss", Caffe2DML.layerDir)
+	  }
+	  net.getLayers.map(layer =>  source(dmlScript, numTabs, net.getCaffeLayer(layer).sourceFileName, Caffe2DML.layerDir))
+	  if(solver != null)
+	    source(dmlScript, numTabs, solver.sourceFileName, Caffe2DML.optimDir)
+	  if(otherFiles != null)
+	    otherFiles.map(sourceFileName => source(dmlScript, numTabs, sourceFileName, Caffe2DML.layerDir))
+	}
+}
+
+trait NextBatchGenerator extends TabbedDMLGenerator {
+	def min(lhs:String, rhs:String): String = "min(" + lhs + ", " + rhs + ")"
+	
+	def assignBatch(dmlScript:StringBuilder, Xb:String, X:String, yb:String, y:String, indexPrefix:String, N:String, i:String):StringBuilder = {
+	  dmlScript.append(indexPrefix).append("beg = ((" + i + "-1) * " + Caffe2DML.batchSize + ") %% " + N + " + 1; ")
+	  dmlScript.append(indexPrefix).append("end = min(beg + " +  Caffe2DML.batchSize + " - 1, " + N + "); ")
+	  dmlScript.append(Xb).append(" = ").append(X).append("[").append(indexPrefix).append("beg:").append(indexPrefix).append("end,]; ")
+	  if(yb != null && y != null)
+	    dmlScript.append(yb).append(" = ").append(y).append("[").append(indexPrefix).append("beg:").append(indexPrefix).append("end,]; ")
+	  dmlScript.append("\n")
+	}
+	def getTestBatch(tabDMLScript:StringBuilder):Unit = {
+    assignBatch(tabDMLScript, "Xb", Caffe2DML.X, null, null, "", Caffe2DML.numImages, "i")
+  } 
+  def getTrainingBatch(tabDMLScript:StringBuilder):Unit = {
+    assignBatch(tabDMLScript, "Xb", Caffe2DML.X, "yb", Caffe2DML.y, "", Caffe2DML.numImages, "i")
+  }
+	def getTrainingBatch(tabDMLScript:StringBuilder, X:String, y:String, numImages:String):Unit = {
+	  assignBatch(tabDMLScript, "Xb", X, "yb", y, "", numImages, "i")
+  }
+  def getTrainingMaxiBatch(tabDMLScript:StringBuilder):Unit = {
+    assignBatch(tabDMLScript, "X_group_batch", Caffe2DML.X, "y_group_batch", Caffe2DML.y, "group_", Caffe2DML.numImages, "g")
+  }
+  def getValidationBatch(tabDMLScript:StringBuilder):Unit = {
+    assignBatch(tabDMLScript, "Xb", Caffe2DML.XVal, "yb", Caffe2DML.yVal, "", Caffe2DML.numValidationImages, "iVal")
+  }
+}
+
+trait VisualizeDMLGenerator extends TabbedDMLGenerator {
+  var doVisualize = false
+  var _tensorboardLogDir:String = null
+  def setTensorBoardLogDir(log:String): Unit = { _tensorboardLogDir = log }
+  def tensorboardLogDir:String = {
+    if(_tensorboardLogDir == null) {
+      _tensorboardLogDir = java.io.File.createTempFile("temp", System.nanoTime().toString()).getAbsolutePath
+    }
+    _tensorboardLogDir
+  }
+  def visualizeLoss(): Unit = {
+	   checkTensorBoardDependency()
+	   doVisualize = true
+	   // Visualize for both training and validation
+	   visualize(" ", " ", "training_loss", "iter", "training_loss", true)
+	   visualize(" ", " ", "training_accuracy", "iter", "training_accuracy", true)
+	   visualize(" ", " ", "validation_loss", "iter", "validation_loss", false)
+	   visualize(" ", " ", "validation_accuracy", "iter", "validation_accuracy", false)
+	}
+  val visTrainingDMLScript: StringBuilder = new StringBuilder 
+  val visValidationDMLScript: StringBuilder = new StringBuilder
+	def checkTensorBoardDependency():Unit = {
+	  try {
+	    if(!doVisualize)
+	      Class.forName( "com.google.protobuf.GeneratedMessageV3")
+	  } catch {
+	    case _:ClassNotFoundException => throw new DMLRuntimeException("To use visualize() feature, you will have to include protobuf-java-3.2.0.jar in your classpath. Hint: you can download the jar from http://central.maven.org/maven2/com/google/protobuf/protobuf-java/3.2.0/protobuf-java-3.2.0.jar")   
+	  }
+	}
+  private def visualize(layerName:String, varType:String, aggFn:String, x:String, y:String,  isTraining:Boolean) = {
+    val dmlScript = if(isTraining) visTrainingDMLScript else visValidationDMLScript
+    dmlScript.append("viz_counter1 = visualize(" + 
+        commaSep(asDMLString(layerName), asDMLString(varType), asDMLString(aggFn), x, y, asDMLString(tensorboardLogDir))
+        + ");\n")
+    dmlScript.append("viz_counter = viz_counter + viz_counter1\n")
+  }
+  def appendVisualizationHeaders(dmlScript:StringBuilder, numTabs:Int): Unit = {
+    if(doVisualize) {
+	    tabDMLScript(dmlScript, numTabs).append("visualize = externalFunction(String layerName, String varType, String aggFn, Double x, Double y, String logDir) return (Double B) " +
+	        "implemented in (classname=\"org.apache.sysml.udf.lib.Caffe2DMLVisualizeWrapper\",exectype=\"mem\"); \n")
+	    tabDMLScript(dmlScript, numTabs).append("viz_counter = 0\n")
+	    System.out.println("Please use the following command for visualizing: tensorboard --logdir=" + tensorboardLogDir)
+	  }
+  }
+  def visualizeLayer(net:CaffeNetwork, layerName:String, varType:String, aggFn:String): Unit = {
+	  // 'weight', 'bias', 'dweight', 'dbias', 'output' or 'doutput'
+	  // 'sum', 'mean', 'var' or 'sd'
+	  checkTensorBoardDependency()
+	  doVisualize = true
+	  if(net.getLayers.filter(_.equals(layerName)).size == 0)
+	    throw new DMLRuntimeException("Cannot visualize the layer:" + layerName)
+	  val dmlVar = {
+	    val l = net.getCaffeLayer(layerName)
+	    varType match {
+	      case "weight" => l.weight
+	      case "bias" => l.bias
+	      case "dweight" => l.dWeight
+	      case "dbias" => l.dBias
+	      case "output" => l.out
+	      case "doutput" => l.dX
+	      case _ => throw new DMLRuntimeException("Cannot visualize the variable of type:" + varType)
+	    }
+	   }
+	  if(dmlVar == null)
+	    throw new DMLRuntimeException("Cannot visualize the variable of type:" + varType)
+	  // Visualize for both training and validation
+	  visualize(layerName, varType, aggFn, "iter", aggFn + "(" + dmlVar + ")", true)
+	  visualize(layerName, varType, aggFn, "iter", aggFn + "(" + dmlVar + ")", false)
+	}
+  
+  def appendTrainingVisualizationBody(dmlScript:StringBuilder, numTabs:Int): Unit = {
+    if(doVisualize)
+        tabDMLScript(dmlScript, numTabs).append(visTrainingDMLScript.toString)
+  }
+  def appendValidationVisualizationBody(dmlScript:StringBuilder, numTabs:Int): Unit = {
+    if(doVisualize)
+        tabDMLScript(dmlScript, numTabs).append(visValidationDMLScript.toString)
+  }
+}
+
+trait DMLGenerator extends SourceDMLGenerator with NextBatchGenerator with VisualizeDMLGenerator {
+  // Also makes "code reading" possible for Caffe2DML :)
+	var dmlScript = new StringBuilder
+	var numTabs = 0
+	def reset():Unit = {
+	  dmlScript.clear()
+	  alreadyImported.clear()
+	  numTabs = 0
+	  visTrainingDMLScript.clear()
+	  visValidationDMLScript.clear()
+	  doVisualize = false
+	}
+	// -------------------------------------------------------------------------------------------------
+	// Helper functions that calls super class methods and simplifies the code of this trait
+	def tabDMLScript():StringBuilder = tabDMLScript(dmlScript, numTabs, false)
+	def tabDMLScript(prependNewLine:Boolean):StringBuilder = tabDMLScript(dmlScript, numTabs, prependNewLine)
+	def source(net:CaffeNetwork, solver:CaffeSolver, otherFiles:Array[String]):Unit = {
+	  source(dmlScript, numTabs, net, solver, otherFiles)
+	}
+	// -------------------------------------------------------------------------------------------------
+	
+	def ifBlock(cond:String)(op: => Unit) {
+	  tabDMLScript.append("if(" + cond + ") {\n")
+	  numTabs += 1
+	  op
+	  numTabs -= 1
+	  tabDMLScript.append("}\n")
+	}
+	def forBlock(iterVarName:String, startVal:String, endVal:String)(op: => Unit) {
+	  tabDMLScript.append("for(" + iterVarName + " in " + startVal + ":" + endVal + ") {\n")
+	  numTabs += 1
+	  op
+	  numTabs -= 1
+	  tabDMLScript.append("}\n")
+	}
+	def parForBlock(iterVarName:String, startVal:String, endVal:String)(op: => Unit) {
+	  tabDMLScript.append("parfor(" + iterVarName + " in " + startVal + ":" + endVal + ") {\n")
+	  numTabs += 1
+	  op
+	  numTabs -= 1
+	  tabDMLScript.append("}\n")
+	}
+	
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/Utils.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/Utils.scala b/src/main/scala/org/apache/sysml/api/dl/Utils.scala
new file mode 100644
index 0000000..b9d6d33
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/Utils.scala
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+import scala.collection.JavaConversions._
+import caffe.Caffe.LayerParameter;
+import caffe.Caffe.NetParameter;
+import org.apache.sysml.parser.LanguageException;
+import com.google.protobuf.TextFormat;
+import org.apache.sysml.conf.ConfigurationManager;
+import org.apache.sysml.runtime.util.LocalFileUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import caffe.Caffe.SolverParameter;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import org.apache.sysml.runtime.DMLRuntimeException
+import java.io.StringReader
+import java.io.BufferedReader
+
+object Utils {
+  // ---------------------------------------------------------------------------------------------
+  // Helper methods for DML generation
+  
+  // Returns number of classes if inferred from the network
+  def numClasses(net:CaffeNetwork):String = {
+  	try {
+  		return "" + net.getCaffeLayer(net.getLayers().last).outputShape._1.toLong
+  	} catch {
+  		case _:Throwable => {
+  			Caffe2DML.LOG.warn("Cannot infer the number of classes from network definition. User needs to pass it via set(num_classes=...) method.")
+  			return "$num_classes" // Expect users to provide it
+  		}
+  	}
+  }
+  def prettyPrintDMLScript(script:String) {
+	  val bufReader = new BufferedReader(new StringReader(script))
+	  var line = bufReader.readLine();
+	  var lineNum = 1
+	  while( line != null ) {
+		  System.out.println( "%03d".format(lineNum) + "|" + line)
+		  lineNum = lineNum + 1
+		  line = bufReader.readLine()
+	  }
+  }
+  
+  // ---------------------------------------------------------------------------------------------
+  def parseSolver(solverFilePath:String): CaffeSolver = parseSolver(readCaffeSolver(solverFilePath))
+	def parseSolver(solver:SolverParameter): CaffeSolver = {
+	  val momentum = if(solver.hasMomentum) solver.getMomentum else 0.0
+	  val lambda = if(solver.hasWeightDecay) solver.getWeightDecay else 0.0
+	  val delta = if(solver.hasDelta) solver.getDelta else 0.0
+	  
+	  solver.getType.toLowerCase match {
+	    case "sgd" => new SGD(lambda, momentum)
+	    case "adagrad" => new AdaGrad(lambda, delta)
+	    case "nesterov" => new Nesterov(lambda, momentum)
+	    case _ => throw new DMLRuntimeException("The solver type is not supported: " + solver.getType + ". Try: SGD, AdaGrad or Nesterov.")
+	  }
+    
+  }
+	
+  def getPrefix():String = {
+    val f = new File("nn")
+    if(f.exists() && f.isDirectory()) {
+      Caffe2DML.LOG.info("Since nn directory exists in current folder, using it.")
+      return "nn"
+    }
+    else {
+      // TODO: Extract from the jar
+      throw new RuntimeException("In current version, we require that you download the nn folder into current directory from https://github.com/apache/incubator-systemml/tree/master/scripts/staging/SystemML-NN")
+    }
+  }
+  
+	// --------------------------------------------------------------
+	// Caffe utility functions
+	def readCaffeNet(netFilePath:String):NetParameter = {
+		val reader:InputStreamReader = getInputStreamReader(netFilePath); 
+  	val builder:NetParameter.Builder =  NetParameter.newBuilder();
+  	TextFormat.merge(reader, builder);
+  	return builder.build();
+	}
+	
+	def readCaffeSolver(solverFilePath:String):SolverParameter = {
+		val reader = getInputStreamReader(solverFilePath);
+		val builder =  SolverParameter.newBuilder();
+		TextFormat.merge(reader, builder);
+		return builder.build();
+	}
+	
+	// --------------------------------------------------------------
+	// File IO utility functions
+	def getInputStreamReader(filePath:String ):InputStreamReader = {
+		//read solver script from file
+		if(filePath == null)
+			throw new LanguageException("file path was not specified!");
+		if(filePath.startsWith("hdfs:")  || filePath.startsWith("gpfs:")) { 
+			if( !LocalFileUtils.validateExternalFilename(filePath, true) )
+				throw new LanguageException("Invalid (non-trustworthy) hdfs filename.");
+			val fs = FileSystem.get(ConfigurationManager.getCachedJobConf());
+			return new InputStreamReader(fs.open(new Path(filePath)));
+		}
+		else { 
+			if( !LocalFileUtils.validateExternalFilename(filePath, false) )
+				throw new LanguageException("Invalid (non-trustworthy) local filename.");
+			return new InputStreamReader(new FileInputStream(new File(filePath)), "ASCII");
+		}
+	}
+	// --------------------------------------------------------------
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala b/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala
index fb9697d..a104c5c 100644
--- a/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala
+++ b/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala
@@ -33,6 +33,8 @@ import org.apache.sysml.api.mlcontext._
 import org.apache.sysml.api.mlcontext.ScriptFactory._
 import org.apache.spark.sql._
 import org.apache.sysml.api.mlcontext.MLContext.ExplainLevel
+import java.util.HashMap
+import scala.collection.JavaConversions._
 
 trait HasLaplace extends Params {
   final val laplace: Param[Double] = new Param[Double](this, "laplace", "Laplace smoothing specified by the user to avoid creation of 0 probabilities.")
@@ -65,8 +67,25 @@ trait HasRegParam extends Params {
   final def getRegParam: Double = $(regParam)
 }
 
-trait BaseSystemMLEstimator {
-  
+trait BaseSystemMLEstimatorOrModel {
+  var enableGPU:Boolean = false
+  var explain:Boolean = false
+  var statistics:Boolean = false
+  val config:HashMap[String, String] = new HashMap[String, String]()
+  def setGPU(enableGPU1:Boolean):BaseSystemMLEstimatorOrModel = { enableGPU = enableGPU1; this}
+  def setExplain(explain1:Boolean):BaseSystemMLEstimatorOrModel = { explain = explain1; this}
+  def setStatistics(statistics1:Boolean):BaseSystemMLEstimatorOrModel = { statistics = statistics1; this}
+  def setConfigProperty(key:String, value:String):BaseSystemMLEstimatorOrModel = { config.put(key, value); this}
+  def updateML(ml:MLContext):Unit = {
+    ml.setGPU(enableGPU); ml.setExplain(explain); ml.setStatistics(statistics); config.map(x => ml.setConfigProperty(x._1, x._2))
+  }
+  def copyProperties(other:BaseSystemMLEstimatorOrModel):BaseSystemMLEstimatorOrModel = {
+    other.setGPU(enableGPU); other.setExplain(explain); other.setStatistics(statistics); config.map(x => other.setConfigProperty(x._1, x._2))
+    return other
+  }
+}
+
+trait BaseSystemMLEstimator extends BaseSystemMLEstimatorOrModel {
   def transformSchema(schema: StructType): StructType = schema
   
   // Returns the script and variables for X and y
@@ -79,9 +98,10 @@ trait BaseSystemMLEstimator {
   def toDouble(d:Double): java.lang.Double = {
     double2Double(d)
   }
+  
 }
 
-trait BaseSystemMLEstimatorModel {
+trait BaseSystemMLEstimatorModel extends BaseSystemMLEstimatorOrModel {
   def toDouble(i:Int): java.lang.Double = {
     double2Double(i.toDouble)
   }
@@ -96,19 +116,19 @@ trait BaseSystemMLEstimatorModel {
 }
 
 trait BaseSystemMLClassifier extends BaseSystemMLEstimator {
-  
   def baseFit(X_mb: MatrixBlock, y_mb: MatrixBlock, sc: SparkContext): MLResults = {
     val isSingleNode = true
     val ml = new MLContext(sc)
+    updateML(ml)
     y_mb.recomputeNonZeros();
     val ret = getTrainingScript(isSingleNode)
     val script = ret._1.in(ret._2, X_mb).in(ret._3, y_mb)
     ml.execute(script)
   }
-  
   def baseFit(df: ScriptsUtils.SparkDataType, sc: SparkContext): MLResults = {
     val isSingleNode = false
     val ml = new MLContext(df.rdd.sparkContext)
+    updateML(ml)
     val mcXin = new MatrixCharacteristics()
     val Xin = RDDConverterUtils.dataFrameToBinaryBlock(sc, df.asInstanceOf[DataFrame].select("features"), mcXin, false, true)
     val revLabelMapping = new java.util.HashMap[Int, String]
@@ -121,10 +141,11 @@ trait BaseSystemMLClassifier extends BaseSystemMLEstimator {
 }
 
 trait BaseSystemMLClassifierModel extends BaseSystemMLEstimatorModel {
-  
+
   def baseTransform(X: MatrixBlock, mloutput: MLResults, sc: SparkContext, probVar:String): MatrixBlock = {
     val isSingleNode = true
     val ml = new MLContext(sc)
+    updateML(ml)
     val script = getPredictionScript(mloutput, isSingleNode)
     // Uncomment for debugging
     // ml.setExplainLevel(ExplainLevel.RECOMPILE_RUNTIME)
@@ -137,11 +158,12 @@ trait BaseSystemMLClassifierModel extends BaseSystemMLEstimatorModel {
     }
     return ret
   }
-  
+
   def baseTransform(df: ScriptsUtils.SparkDataType, mloutput: MLResults, sc: SparkContext, 
       probVar:String, outputProb:Boolean=true): DataFrame = {
     val isSingleNode = false
     val ml = new MLContext(sc)
+    updateML(ml)
     val mcXin = new MatrixCharacteristics()
     val Xin = RDDConverterUtils.dataFrameToBinaryBlock(df.rdd.sparkContext, df.asInstanceOf[DataFrame].select("features"), mcXin, false, true)
     val script = getPredictionScript(mloutput, isSingleNode)
@@ -161,4 +183,4 @@ trait BaseSystemMLClassifierModel extends BaseSystemMLEstimatorModel {
     }
     
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLRegressor.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLRegressor.scala b/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLRegressor.scala
index 08154bb..5dd23e0 100644
--- a/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLRegressor.scala
+++ b/src/main/scala/org/apache/sysml/api/ml/BaseSystemMLRegressor.scala
@@ -38,6 +38,7 @@ trait BaseSystemMLRegressor extends BaseSystemMLEstimator {
   def baseFit(X_mb: MatrixBlock, y_mb: MatrixBlock, sc: SparkContext): MLResults = {
     val isSingleNode = true
     val ml = new MLContext(sc)
+    updateML(ml)
     val ret = getTrainingScript(isSingleNode)
     val script = ret._1.in(ret._2, X_mb).in(ret._3, y_mb)
     ml.execute(script)
@@ -46,6 +47,7 @@ trait BaseSystemMLRegressor extends BaseSystemMLEstimator {
   def baseFit(df: ScriptsUtils.SparkDataType, sc: SparkContext): MLResults = {
     val isSingleNode = false
     val ml = new MLContext(df.rdd.sparkContext)
+    updateML(ml)
     val mcXin = new MatrixCharacteristics()
     val Xin = RDDConverterUtils.dataFrameToBinaryBlock(sc, df.asInstanceOf[DataFrame], mcXin, false, true)
     val yin = df.select("label")
@@ -61,6 +63,7 @@ trait BaseSystemMLRegressorModel extends BaseSystemMLEstimatorModel {
   def baseTransform(X: MatrixBlock, mloutput: MLResults, sc: SparkContext, predictionVar:String): MatrixBlock = {
     val isSingleNode = true
     val ml = new MLContext(sc)
+    updateML(ml)
     val script = getPredictionScript(mloutput, isSingleNode)
     val modelPredict = ml.execute(script._1.in(script._2, X))
     val ret = modelPredict.getBinaryBlockMatrix(predictionVar).getMatrixBlock
@@ -74,6 +77,7 @@ trait BaseSystemMLRegressorModel extends BaseSystemMLEstimatorModel {
   def baseTransform(df: ScriptsUtils.SparkDataType, mloutput: MLResults, sc: SparkContext, predictionVar:String): DataFrame = {
     val isSingleNode = false
     val ml = new MLContext(sc)
+    updateML(ml)
     val mcXin = new MatrixCharacteristics()
     val Xin = RDDConverterUtils.dataFrameToBinaryBlock(df.rdd.sparkContext, df.asInstanceOf[DataFrame], mcXin, false, true)
     val script = getPredictionScript(mloutput, isSingleNode)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala b/src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala
index ce89502..9f3d844 100644
--- a/src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala
+++ b/src/main/scala/org/apache/sysml/api/ml/LogisticRegression.scala
@@ -142,4 +142,4 @@ object LogisticRegressionExample {
 
     lrmodel.transform(testing.toDF).show
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/ml/NaiveBayes.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/ml/NaiveBayes.scala b/src/main/scala/org/apache/sysml/api/ml/NaiveBayes.scala
index a7b3a74..9161a8f 100644
--- a/src/main/scala/org/apache/sysml/api/ml/NaiveBayes.scala
+++ b/src/main/scala/org/apache/sysml/api/ml/NaiveBayes.scala
@@ -106,4 +106,4 @@ class NaiveBayesModel(override val uid: String)
   def transform(X: MatrixBlock): MatrixBlock = baseTransform(X, mloutput, sc, "probs")
   def transform(df: ScriptsUtils.SparkDataType): DataFrame = baseTransform(df, mloutput, sc, "probs")
   
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/ml/SVM.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/ml/SVM.scala b/src/main/scala/org/apache/sysml/api/ml/SVM.scala
index ea24de6..db8ce3a 100644
--- a/src/main/scala/org/apache/sysml/api/ml/SVM.scala
+++ b/src/main/scala/org/apache/sysml/api/ml/SVM.scala
@@ -110,4 +110,4 @@ class SVMModel (override val uid: String)(val mloutput: MLResults, val sc: Spark
   
   def transform(X: MatrixBlock): MatrixBlock = baseTransform(X, mloutput, sc, "scores")
   def transform(df: ScriptsUtils.SparkDataType): DataFrame = baseTransform(df, mloutput, sc, "scores")
-}
\ No newline at end of file
+}



[3/3] incubator-systemml git commit: [SYSTEMML-692] Added initial version of DML generator for Caffe

Posted by ni...@apache.org.
[SYSTEMML-692] Added initial version of DML generator for Caffe

This experimental interface is called Caffe2DML and doesnot affect other functionality.

- Updated the interface to match the Caffe specification as per
  @bertholdreinwald 's suggestion.
- Added support for fine-tuning.
- Added support for explain, statistics and gpu.

Closes #422.


Project: http://git-wip-us.apache.org/repos/asf/incubator-systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-systemml/commit/cc7993fc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-systemml/tree/cc7993fc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-systemml/diff/cc7993fc

Branch: refs/heads/master
Commit: cc7993fc87ccf7d404bc8802f9529aee7da5de5e
Parents: ad3e78a
Author: Niketan Pansare <np...@us.ibm.com>
Authored: Wed Apr 19 14:07:44 2017 -0800
Committer: Niketan Pansare <np...@us.ibm.com>
Committed: Wed Apr 19 15:07:43 2017 -0700

----------------------------------------------------------------------
 docs/beginners-guide-caffe2dml.md               |  124 ++
 docs/devdocs/deep-learning.md                   |   84 ++
 pom.xml                                         |   47 +-
 .../cp/AggregateUnaryCPInstruction.java         |    2 +-
 .../sysml/runtime/util/ConvolutionUtils.java    |   12 +
 .../udf/lib/Caffe2DMLVisualizeWrapper.java      |   66 +
 .../apache/sysml/utils/TensorboardLogger.java   |  177 +++
 src/main/proto/caffe/caffe.proto                | 1424 ++++++++++++++++++
 src/main/proto/tensorflow/event.proto           |  102 ++
 src/main/proto/tensorflow/summary.proto         |  123 ++
 src/main/python/setup.py                        |    4 +-
 src/main/python/systemml/converters.py          |   31 +-
 src/main/python/systemml/mllearn/estimators.py  |  168 ++-
 .../org/apache/sysml/api/dl/Caffe2DML.scala     |  510 +++++++
 .../org/apache/sysml/api/dl/CaffeLayer.scala    |  357 +++++
 .../org/apache/sysml/api/dl/CaffeNetwork.scala  |  180 +++
 .../org/apache/sysml/api/dl/CaffeSolver.scala   |  158 ++
 .../org/apache/sysml/api/dl/DMLGenerator.scala  |  311 ++++
 .../scala/org/apache/sysml/api/dl/Utils.scala   |  127 ++
 .../sysml/api/ml/BaseSystemMLClassifier.scala   |   38 +-
 .../sysml/api/ml/BaseSystemMLRegressor.scala    |    4 +
 .../sysml/api/ml/LogisticRegression.scala       |    2 +-
 .../org/apache/sysml/api/ml/NaiveBayes.scala    |    2 +-
 .../scala/org/apache/sysml/api/ml/SVM.scala     |    2 +-
 24 files changed, 4036 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/docs/beginners-guide-caffe2dml.md
----------------------------------------------------------------------
diff --git a/docs/beginners-guide-caffe2dml.md b/docs/beginners-guide-caffe2dml.md
new file mode 100644
index 0000000..cfcc0cb
--- /dev/null
+++ b/docs/beginners-guide-caffe2dml.md
@@ -0,0 +1,124 @@
+---
+layout: global
+title: Beginner's Guide for Caffe2DML users
+description: Beginner's Guide for Caffe2DML users
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+* This will become a table of contents (this text will be scraped).
+{:toc}
+
+<br/>
+
+## Introduction
+
+Caffe2DML is an experimental API that converts an Caffe specification to DML.
+
+## Frequently asked questions
+
+- How to set batch size ?
+
+Batch size is set in `data_param` of the Data layer:
+
+	layer {
+	  name: "mnist"
+	  type: "Data"
+	  top: "data"
+	  top: "label"
+	  data_param {
+	    source: "mnist_train"
+	    batch_size: 64
+	    backend: LMDB
+	  }
+	}
+	
+- How to set maximum number of iterations for training ?
+
+Caffe allows you to set the maximum number of iterations in solver specification
+
+	# The maximum number of iterations
+	max_iter: 2000
+	
+- How to set the size of the validation dataset ?
+
+The size of the validation dataset is determined by the parameters `test_iter` and the batch size. For example: If the batch size is 64 and 
+`test_iter` is 10, then the validation size is 640. This setting generates following DML code internally:
+
+	num_images = nrow(y_full)
+	BATCH_SIZE = 64
+	num_validation = 10 * BATCH_SIZE
+	X = X_full[(num_validation+1):num_images,]; y = y_full[(num_validation+1):num_images,]
+	X_val = X_full[1:num_validation,]; y_val = y_full[1:num_validation,]
+	num_images = nrow(y) 
+
+- How to monitor loss via command-line ?
+
+To monitor loss, please set following parameters in the solver specification
+
+	# Display training loss and accuracy every 100 iterations
+	display: 100
+	# Carry out validation every 500 training iterations and display validation loss and accuracy.
+	test_iter: 10
+	test_interval: 500
+	
+ - How to pass a single jpeg image to Caffe2DML for prediction ?
+ 
+	from PIL import Image
+	import systemml as sml
+	from systemml.mllearn import Caffe2DML
+	img_shape = (3, 224, 224)
+	input_image = sml.convertImageToNumPyArr(Image.open(img_file_path), img_shape=img_shape)
+	resnet = Caffe2DML(sqlCtx, solver='ResNet_50_solver.proto', weights='ResNet_50_pretrained_weights', input_shape=img_shape)
+	resnet.predict(input_image)
+
+- How to prepare a directory of jpeg images for training with Caffe2DML ?
+
+The below example assumes that the input dataset has 2 labels `cat` and `dogs` and the filename has these labels as prefix.
+We iterate through the directory and convert each jpeg image into pyspark.ml.linalg.Vector using pyspark.
+These vectors are stored as DataFrame and randomized using Spark SQL's `orderBy(rand())` function.
+The DataFrame is then saved in parquet format to reduce the cost of preprocessing for repeated training.
+
+	from systemml.mllearn import Caffe2DML
+	from pyspark.sql import SQLContext
+	import numpy as np
+	import urllib, os, scipy.ndimage
+	from pyspark.ml.linalg import Vectors
+	from pyspark import StorageLevel
+	import systemml as sml
+	from pyspark.sql.functions import rand 
+	# ImageNet specific parameters
+	img_shape = (3, 224, 224)
+	train_dir = '/home/biuser/dogs_vs_cats/train'
+	def getLabelFeatures(filename):
+		from PIL import Image
+		vec = Vectors.dense(sml.convertImageToNumPyArr(Image.open(os.path.join(train_dir, filename)), img_shape=img_shape)[0,:])
+		if filename.lower().startswith('cat'):
+			return (1, vec)
+		elif filename.lower().startswith('dog'):
+			return (2, vec)
+		else:
+			raise ValueError('Expected the filename to start with either cat or dog')
+	
+	list_jpeg_files = os.listdir(train_dir)
+	# 10 files per partition
+	train_df = sc.parallelize(list_jpeg_files, int(len(list_jpeg_files)/10)).map(lambda filename : getLabelFeatures(filename)).toDF(['label', 'features']).orderBy(rand())
+	# Optional: but helps seperates conversion-related from training
+	# Alternatively, this dataframe can be passed directly to `caffe2dml_model.fit(train_df)`
+	train_df.write.parquet('kaggle-cats-dogs.parquet')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/docs/devdocs/deep-learning.md
----------------------------------------------------------------------
diff --git a/docs/devdocs/deep-learning.md b/docs/devdocs/deep-learning.md
index 1fb951a..329c6c8 100644
--- a/docs/devdocs/deep-learning.md
+++ b/docs/devdocs/deep-learning.md
@@ -139,3 +139,87 @@ updates for the image:
 |-----------------|---------------------------------|-----------------|
 | `w3*y1 + w1*y3` | `w4*y1 + w3*y2 + w2*y3 + w1*y4` | `w4*y2 + w2*y4` |
 | `w3*y3`         | `w4*y3 + w3*y4`                 | `w4*y4`         |
+
+# Caffe2DML examples
+
+## Training using Caffe models on Lenet
+
+The below script also demonstrates how to save the trained model.
+
+```python
+# Download the MNIST dataset
+from mlxtend.data import mnist_data
+import numpy as np
+from sklearn.utils import shuffle
+X, y = mnist_data()
+X, y = shuffle(X, y)
+num_classes = np.unique(y).shape[0]
+img_shape = (1, 28, 28)
+
+# Split the data into training and test
+n_samples = len(X)
+X_train = X[:int(.9 * n_samples)]
+y_train = y[:int(.9 * n_samples)]
+X_test = X[int(.9 * n_samples):]
+y_test = y[int(.9 * n_samples):]
+
+# Download the Lenet network
+import urllib
+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet.proto', 'lenet.proto')
+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet_solver.proto', 'lenet_solver.proto')
+
+# Train Lenet On MNIST using scikit-learn like API
+from systemml.mllearn import Caffe2DML
+lenet = Caffe2DML(sqlCtx, solver='lenet_solver.proto').set(max_iter=500, debug=True).setStatistics(True)
+print('Lenet score: %f' % lenet.fit(X_train, y_train).score(X_test, y_test))
+
+# Save the trained model
+lenet.save('lenet_model')
+```
+
+## Load the trained model and retrain (i.e. finetuning)
+
+```python
+# Fine-tune the existing trained model
+new_lenet = Caffe2DML(sqlCtx, solver='lenet_solver.proto', weights='lenet_model').set(max_iter=500, debug=True)
+new_lenet.fit(X_train, y_train)
+new_lenet.save('lenet_model')
+```
+
+## Perform prediction using the above trained model
+
+```python
+# Use the new model for prediction
+predict_lenet = Caffe2DML(sqlCtx, solver='lenet_solver.proto', weights='lenet_model')
+print('Lenet score: %f' % predict_lenet.score(X_test, y_test))
+```
+
+Similarly, you can perform prediction using the pre-trained ResNet network
+
+```python
+from systemml.mllearn import Caffe2DML
+from pyspark.sql import SQLContext
+import numpy as np
+import urllib, os, scipy.ndimage
+from PIL import Image
+import systemml as sml
+
+# ImageNet specific parameters
+img_shape = (3, 224, 224)
+
+# Downloads a jpg image, resizes it to 224 and return as numpy array in N X CHW format
+url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg'
+outFile = 'test.jpg'
+urllib.urlretrieve(url, outFile)
+input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape)
+
+# Download the ResNet network
+import urllib
+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/resnet/ilsvrc12/ResNet_50_network.proto', 'ResNet_50_network.proto')
+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/resnet/ilsvrc12/ResNet_50_solver.proto', 'ResNet_50_solver.proto')
+
+# Assumes that you have cloned the model_zoo repository
+# git clone https://github.com/niketanpansare/model_zoo.git
+resnet = Caffe2DML(sqlCtx, solver='ResNet_50_solver.proto', weights='~/model_zoo/caffe/vision/resnet/ilsvrc12/ResNet_50_pretrained_weights').set(input_shape=img_shape)
+resnet.predict(input_image)
+```
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index eba7f57..d107f64 100644
--- a/pom.xml
+++ b/pom.xml
@@ -324,6 +324,46 @@
 					</execution>
 				</executions>
 			</plugin>
+			
+			<plugin>
+			    <groupId>com.github.os72</groupId>
+			    <artifactId>protoc-jar-maven-plugin</artifactId>
+			    <version>3.0.0-b2.1</version>
+			    <executions>
+			        <execution>
+			        	<id>caffe-sources</id>
+			            <phase>generate-sources</phase>
+			            <goals>
+			                <goal>run</goal>
+			            </goals>
+			            <configuration>
+			                <protocVersion>2.5.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.0.0 -->
+			                <inputDirectories>
+			                    <include>src/main/proto/caffe</include>
+			                </inputDirectories>
+			                <outputDirectories>
+			                    <include>src/main/java</include>
+			                </outputDirectories>
+			            </configuration>
+			        </execution>
+			        <execution>
+			        	<id>tf-sources</id>
+			            <phase>generate-sources</phase>
+			            <goals>
+			                <goal>run</goal>
+			            </goals>
+			            <configuration>
+			                <protocVersion>3.0.0</protocVersion> <!-- 2.4.1, 2.5.0, 2.6.1, 3.0.0 -->
+			                <inputDirectories>
+			                    <include>src/main/proto/tensorflow</include>
+			                </inputDirectories>
+			                <outputDirectories>
+			                    <include>src/main/java</include>
+			                </outputDirectories>
+			            </configuration>
+			        </execution>
+			    </executions>
+			</plugin>
 
 			<!-- Currently, all tests are integration tests. -->
 			<plugin>
@@ -1076,7 +1116,12 @@
 
 
 	<dependencies>
-
+		<dependency>
+	  		<groupId>com.google.protobuf</groupId>
+	  		<artifactId>protobuf-java</artifactId>
+	  		<version>3.2.0</version>
+	  		<scope>provided</scope>
+	  	</dependency>
 		<dependency>
 			<groupId>org.jcuda</groupId>
 			<artifactId>jcuda</artifactId>

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java b/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java
index 8790a53..8dd372a 100644
--- a/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java
+++ b/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateUnaryCPInstruction.java
@@ -121,7 +121,7 @@ public class AggregateUnaryCPInstruction extends UnaryCPInstruction
 						rval = mc.getRows() * mc.getCols();
 				}
 				else {
-					throw new DMLRuntimeException("Invalid meta data returned by '"+opcode+"': "+rval);
+					throw new DMLRuntimeException("Invalid meta data returned by '"+opcode+"': "+rval + ":" + instString);
 				}
 			}
 			

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java b/src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java
index 80b20cd..814cf22 100644
--- a/src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java
+++ b/src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java
@@ -22,6 +22,18 @@ package org.apache.sysml.runtime.util;
 
 public class ConvolutionUtils {
 	
+	public static String getConv2dOutputMap(String H, String R, String verticalStride, String heightPadding) {
+		long padX2 = -1;
+		try {
+			padX2 = Long.parseLong(heightPadding)*2;
+			return "" + getP(Long.parseLong(H), Long.parseLong(R), Long.parseLong(verticalStride), Long.parseLong(heightPadding));
+		} catch(Exception e) {
+			  if(padX2 == -1) 			return "((" + H + " + 2*" + heightPadding + " - " + R + ") / " + verticalStride + "+ 1)";
+			  else if(padX2 == 0) 	return "((" + H + " - " + R + ") / " + verticalStride + "+ 1)";
+			  else 									return "((" + H + " + " + padX2 + " - " + R + ") / " + verticalStride + "+ 1)";
+		}
+	}
+	
 	public static long getP(long H, long R, long verticalStride, long heightPadding) {
 		long ret = (H + 2 * heightPadding - R) / verticalStride + 1;
 		if(ret <= 0) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/java/org/apache/sysml/udf/lib/Caffe2DMLVisualizeWrapper.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/udf/lib/Caffe2DMLVisualizeWrapper.java b/src/main/java/org/apache/sysml/udf/lib/Caffe2DMLVisualizeWrapper.java
new file mode 100644
index 0000000..15c867b
--- /dev/null
+++ b/src/main/java/org/apache/sysml/udf/lib/Caffe2DMLVisualizeWrapper.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysml.udf.lib;
+
+import org.apache.sysml.udf.FunctionParameter;
+import org.apache.sysml.udf.PackageFunction;
+import org.apache.sysml.udf.Scalar;
+import org.apache.sysml.udf.Scalar.ScalarValueType;
+import org.apache.sysml.utils.TensorboardLogger;
+
+public class Caffe2DMLVisualizeWrapper extends PackageFunction 
+{
+	private static final long serialVersionUID = 1L;
+	private Scalar _ret;
+
+	@Override
+	public int getNumFunctionOutputs() {
+		return 1;
+	}
+
+	@Override
+	public FunctionParameter getFunctionOutput(int pos) {
+		if (pos == 0)
+			return _ret;
+
+		throw new RuntimeException(
+				"Invalid function output being requested");
+	}
+
+	@Override
+	public void execute() {
+		String layerName = ((Scalar) this.getFunctionInput(0)).getValue();
+		String varType = ((Scalar) this.getFunctionInput(1)).getValue();
+		String aggFn = ((Scalar) this.getFunctionInput(2)).getValue();
+		double x = Double.parseDouble(((Scalar) this.getFunctionInput(3)).getValue());
+		double y = Double.parseDouble(((Scalar) this.getFunctionInput(4)).getValue());
+		String logDir = ((Scalar) this.getFunctionInput(5)).getValue();
+
+		String key = null;
+		if(aggFn.equals("training_loss") || aggFn.equals("validation_loss") ||
+				aggFn.equals("training_accuracy") || aggFn.equals("validation_accuracy"))
+			key = aggFn;
+		else
+			key = aggFn + "_" + varType + "_" + layerName;
+		TensorboardLogger.writeScalar(logDir, key, (long)x, (float)y);
+		_ret = new Scalar(ScalarValueType.Double, String.valueOf(1));
+	}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/java/org/apache/sysml/utils/TensorboardLogger.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/utils/TensorboardLogger.java b/src/main/java/org/apache/sysml/utils/TensorboardLogger.java
new file mode 100644
index 0000000..245d757
--- /dev/null
+++ b/src/main/java/org/apache/sysml/utils/TensorboardLogger.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysml.utils;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.zip.Checksum;
+import org.tensorflow.framework.Summary;
+import org.tensorflow.util.Event;
+
+import com.google.common.primitives.Ints;
+import com.google.common.primitives.Longs;
+
+public class TensorboardLogger {
+	private static Crc32c crc32 = new Crc32c();
+	
+	/**
+	 * Writes scalar of given value in tensorboard format 
+	 * 
+	 * @param logDir log directory of tensorboard 
+	 * @param tag scalar tag (for example: training_loss, validation_loss, ...)
+	 * @param step usually the iteration number
+	 * @param value value of the scalar
+	 */
+	public static void writeScalar(String logDir, String tag, long step, float value) {
+		String filePath = logDir + File.separator + "tfevents.event_systemml_scalar"; 
+		try {
+			FileOutputStream outputStream = new FileOutputStream(filePath, true);
+			Event event = Event.newBuilder()
+					.setWallTime(System.currentTimeMillis() / 1e3)
+					.setStep(step)
+					.setSummary(Summary.newBuilder().addValue(
+							Summary.Value.newBuilder().setTag(tag).setSimpleValue(value)
+							).build())
+							.build();
+			byte[] eventString = event.toByteArray();
+			byte[] header = reverse(Longs.toByteArray((long)eventString.length));
+			write(outputStream, header);
+			write(outputStream, eventString);
+			outputStream.close();
+		}
+		catch(IOException e) {
+			throw new RuntimeException("Error writing event in tensorboard directory:" + filePath, e);
+		}
+	}
+
+	private static void write(FileOutputStream outputStream, byte[] byteString) throws IOException {
+		outputStream.write(byteString);
+		outputStream.write(reverse(Ints.toByteArray((int)maskedCRC32(byteString))));
+	}
+
+	private static byte[] reverse(byte[] nums) {
+		byte[] reversed = new byte[nums.length];
+		for (int i=0; i<nums.length; i++) {
+			reversed[i] = nums[nums.length - 1 - i];
+		}
+		return reversed;
+	}
+
+	private static long maskedCRC32(byte[] data){
+		crc32.reset();
+		crc32.update(data, 0, data.length);
+		long x = u32(crc32.getValue());
+		return u32(((x >> 15) | u32(x << 17)) + 0xa282ead8);
+	}
+
+	private static long u32(long x){
+		return x & 0xffffffff;
+	}
+}
+
+class Crc32c implements Checksum {
+	private static final int[] crcTable = {
+		0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
+		0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+		0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
+		0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+		0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+		0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+		0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
+		0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+		0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
+		0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+		0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+		0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+		0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
+		0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+		0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+		0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+		0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+		0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+		0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
+		0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+		0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
+		0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+		0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+		0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+		0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+		0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+		0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
+		0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+		0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+		0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+		0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
+		0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+		0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
+		0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+		0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+		0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+		0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
+		0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+		0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
+		0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+		0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+		0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+		0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
+		0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+		0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+		0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+		0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+		0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+		0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
+		0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+		0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
+		0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+		0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+		0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+		0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+		0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+		0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
+		0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+		0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+		0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+		0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
+		0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+		0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
+		0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
+	};
+
+	private int crc = ~0;
+
+	public void update(byte[] buffer, int offset, int length) {
+		for (int i = offset; i < offset + length; i++) {
+			crc = crc32c(crc, buffer[i]);
+		}
+	}
+	public long getValue() {
+		return (crc ^ 0xFFFFFFFFL) & 0xFFFFFFFFL;
+	}
+	public void reset() {
+		crc = ~0;
+	}
+	private static int crc32c(int crc, int b) {
+		return crc >>> 8 ^ crcTable[(crc ^ b & 0xFF) & 0xFF];
+	}
+	public void update(int arg0) {
+		throw new RuntimeException("Not implemented");
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/proto/caffe/caffe.proto
----------------------------------------------------------------------
diff --git a/src/main/proto/caffe/caffe.proto b/src/main/proto/caffe/caffe.proto
new file mode 100644
index 0000000..cf53e17
--- /dev/null
+++ b/src/main/proto/caffe/caffe.proto
@@ -0,0 +1,1424 @@
+//-------------------------------------------------------------
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+//-------------------------------------------------------------
+
+syntax = "proto2";
+
+package caffe;
+
+// Specifies the shape (dimensions) of a Blob.
+message BlobShape {
+  repeated int64 dim = 1 [packed = true];
+}
+
+message BlobProto {
+  optional BlobShape shape = 7;
+  repeated float data = 5 [packed = true];
+  repeated float diff = 6 [packed = true];
+  repeated double double_data = 8 [packed = true];
+  repeated double double_diff = 9 [packed = true];
+
+  // 4D dimensions -- deprecated.  Use "shape" instead.
+  optional int32 num = 1 [default = 0];
+  optional int32 channels = 2 [default = 0];
+  optional int32 height = 3 [default = 0];
+  optional int32 width = 4 [default = 0];
+}
+
+// The BlobProtoVector is simply a way to pass multiple blobproto instances
+// around.
+message BlobProtoVector {
+  repeated BlobProto blobs = 1;
+}
+
+message Datum {
+  optional int32 channels = 1;
+  optional int32 height = 2;
+  optional int32 width = 3;
+  // the actual image data, in bytes
+  optional bytes data = 4;
+  optional int32 label = 5;
+  // Optionally, the datum could also hold float data.
+  repeated float float_data = 6;
+  // If true data contains an encoded image that need to be decoded
+  optional bool encoded = 7 [default = false];
+}
+
+message FillerParameter {
+  // The filler type.
+  optional string type = 1 [default = 'constant'];
+  optional float value = 2 [default = 0]; // the value in constant filler
+  optional float min = 3 [default = 0]; // the min value in uniform filler
+  optional float max = 4 [default = 1]; // the max value in uniform filler
+  optional float mean = 5 [default = 0]; // the mean value in Gaussian filler
+  optional float std = 6 [default = 1]; // the std value in Gaussian filler
+  // The expected number of non-zero output weights for a given input in
+  // Gaussian filler -- the default -1 means don't perform sparsification.
+  optional int32 sparse = 7 [default = -1];
+  // Normalize the filler variance by fan_in, fan_out, or their average.
+  // Applies to 'xavier' and 'msra' fillers.
+  enum VarianceNorm {
+    FAN_IN = 0;
+    FAN_OUT = 1;
+    AVERAGE = 2;
+  }
+  optional VarianceNorm variance_norm = 8 [default = FAN_IN];
+}
+
+message NetParameter {
+  optional string name = 1; // consider giving the network a name
+  // DEPRECATED. See InputParameter. The input blobs to the network.
+  repeated string input = 3;
+  // DEPRECATED. See InputParameter. The shape of the input blobs.
+  repeated BlobShape input_shape = 8;
+
+  // 4D input dimensions -- deprecated.  Use "input_shape" instead.
+  // If specified, for each input blob there should be four
+  // values specifying the num, channels, height and width of the input blob.
+  // Thus, there should be a total of (4 * #input) numbers.
+  repeated int32 input_dim = 4;
+
+  // Whether the network will force every layer to carry out backward operation.
+  // If set False, then whether to carry out backward is determined
+  // automatically according to the net structure and learning rates.
+  optional bool force_backward = 5 [default = false];
+  // The current "state" of the network, including the phase, level, and stage.
+  // Some layers may be included/excluded depending on this state and the states
+  // specified in the layers' include and exclude fields.
+  optional NetState state = 6;
+
+  // Print debugging information about results while running Net::Forward,
+  // Net::Backward, and Net::Update.
+  optional bool debug_info = 7 [default = false];
+
+  // The layers that make up the net.  Each of their configurations, including
+  // connectivity and behavior, is specified as a LayerParameter.
+  repeated LayerParameter layer = 100;  // ID 100 so layers are printed last.
+
+  // DEPRECATED: use 'layer' instead.
+  repeated V1LayerParameter layers = 2;
+}
+
+// NOTE
+// Update the next available ID when you add a new SolverParameter field.
+//
+// SolverParameter next available ID: 43 (last added: test_algo)
+message SolverParameter {
+  //////////////////////////////////////////////////////////////////////////////
+  // Specifying the train and test networks
+  //
+  // Exactly one train net must be specified using one of the following fields:
+  //     train_net_param, train_net, net_param, net
+  // One or more test nets may be specified using any of the following fields:
+  //     test_net_param, test_net, net_param, net
+  // If more than one test net field is specified (e.g., both net and
+  // test_net are specified), they will be evaluated in the field order given
+  // above: (1) test_net_param, (2) test_net, (3) net_param/net.
+  // A test_iter must be specified for each test_net.
+  // A test_level and/or a test_stage may also be specified for each test_net.
+  //////////////////////////////////////////////////////////////////////////////
+  
+  // SystemML extension
+  optional string train_algo = 41 [default = "minibatch"];
+  optional string test_algo = 42 [default = "minibatch"];
+
+  // Proto filename for the train net, possibly combined with one or more
+  // test nets.
+  optional string net = 24;
+  // Inline train net param, possibly combined with one or more test nets.
+  optional NetParameter net_param = 25;
+
+  optional string train_net = 1; // Proto filename for the train net.
+  repeated string test_net = 2; // Proto filenames for the test nets.
+  optional NetParameter train_net_param = 21; // Inline train net params.
+  repeated NetParameter test_net_param = 22; // Inline test net params.
+
+  // The states for the train/test nets. Must be unspecified or
+  // specified once per net.
+  //
+  // By default, all states will have solver = true;
+  // train_state will have phase = TRAIN,
+  // and all test_state's will have phase = TEST.
+  // Other defaults are set according to the NetState defaults.
+  optional NetState train_state = 26;
+  repeated NetState test_state = 27;
+
+  // The number of iterations for each test net.
+  repeated int32 test_iter = 3;
+
+  // The number of iterations between two testing phases.
+  optional int32 test_interval = 4 [default = 0];
+  optional bool test_compute_loss = 19 [default = false];
+  // If true, run an initial test pass before the first iteration,
+  // ensuring memory availability and printing the starting value of the loss.
+  optional bool test_initialization = 32 [default = true];
+  optional float base_lr = 5; // The base learning rate
+  // the number of iterations between displaying info. If display = 0, no info
+  // will be displayed.
+  optional int32 display = 6;
+  // Display the loss averaged over the last average_loss iterations
+  optional int32 average_loss = 33 [default = 1];
+  optional int32 max_iter = 7; // the maximum number of iterations
+  // accumulate gradients over `iter_size` x `batch_size` instances
+  optional int32 iter_size = 36 [default = 1];
+
+  // The learning rate decay policy. The currently implemented learning rate
+  // policies are as follows:
+  //    - fixed: always return base_lr.
+  //    - step: return base_lr * gamma ^ (floor(iter / step))
+  //    - exp: return base_lr * gamma ^ iter
+  //    - inv: return base_lr * (1 + gamma * iter) ^ (- power)
+  //    - multistep: similar to step but it allows non uniform steps defined by
+  //      stepvalue
+  //    - poly: the effective learning rate follows a polynomial decay, to be
+  //      zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
+  //    - sigmoid: the effective learning rate follows a sigmod decay
+  //      return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
+  //
+  // where base_lr, max_iter, gamma, step, stepvalue and power are defined
+  // in the solver parameter protocol buffer, and iter is the current iteration.
+  optional string lr_policy = 8;
+  optional float gamma = 9; // The parameter to compute the learning rate.
+  optional float power = 10; // The parameter to compute the learning rate.
+  optional float momentum = 11; // The momentum value.
+  optional float weight_decay = 12; // The weight decay.
+  // regularization types supported: L1 and L2
+  // controlled by weight_decay
+  optional string regularization_type = 29 [default = "L2"];
+  // the stepsize for learning rate policy "step"
+  optional int32 stepsize = 13;
+  // the stepsize for learning rate policy "multistep"
+  repeated int32 stepvalue = 34;
+
+  // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm,
+  // whenever their actual L2 norm is larger.
+  optional float clip_gradients = 35 [default = -1];
+
+  optional int32 snapshot = 14 [default = 0]; // The snapshot interval
+  optional string snapshot_prefix = 15; // The prefix for the snapshot.
+  // whether to snapshot diff in the results or not. Snapshotting diff will help
+  // debugging but the final protocol buffer size will be much larger.
+  optional bool snapshot_diff = 16 [default = false];
+  enum SnapshotFormat {
+    HDF5 = 0;
+    BINARYPROTO = 1;
+  }
+  optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO];
+  // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default.
+  enum SolverMode {
+    CPU = 0;
+    GPU = 1;
+  }
+  optional SolverMode solver_mode = 17 [default = GPU];
+  // the device_id will that be used in GPU mode. Use device_id = 0 in default.
+  optional int32 device_id = 18 [default = 0];
+  // If non-negative, the seed with which the Solver will initialize the Caffe
+  // random number generator -- useful for reproducible results. Otherwise,
+  // (and by default) initialize using a seed derived from the system clock.
+  optional int64 random_seed = 20 [default = -1];
+
+  // type of the solver
+  optional string type = 40 [default = "SGD"];
+
+  // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam
+  optional float delta = 31 [default = 1e-8];
+  // parameters for the Adam solver
+  optional float momentum2 = 39 [default = 0.999];
+
+  // RMSProp decay value
+  // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t)
+  optional float rms_decay = 38 [default = 0.99];
+
+  // If true, print information about the state of the net that may help with
+  // debugging learning problems.
+  optional bool debug_info = 23 [default = false];
+
+  // If false, don't save a snapshot after training finishes.
+  optional bool snapshot_after_train = 28 [default = true];
+
+  // DEPRECATED: old solver enum types, use string instead
+  enum SolverType {
+    SGD = 0;
+    NESTEROV = 1;
+    ADAGRAD = 2;
+    RMSPROP = 3;
+    ADADELTA = 4;
+    ADAM = 5;
+  }
+  // DEPRECATED: use type instead of solver_type
+  optional SolverType solver_type = 30 [default = SGD];
+}
+
+// A message that stores the solver snapshots
+message SolverState {
+  optional int32 iter = 1; // The current iteration
+  optional string learned_net = 2; // The file that stores the learned net.
+  repeated BlobProto history = 3; // The history for sgd solvers
+  optional int32 current_step = 4 [default = 0]; // The current step for learning rate
+}
+
+enum Phase {
+   TRAIN = 0;
+   TEST = 1;
+}
+
+message NetState {
+  optional Phase phase = 1 [default = TEST];
+  optional int32 level = 2 [default = 0];
+  repeated string stage = 3;
+}
+
+message NetStateRule {
+  // Set phase to require the NetState have a particular phase (TRAIN or TEST)
+  // to meet this rule.
+  optional Phase phase = 1;
+
+  // Set the minimum and/or maximum levels in which the layer should be used.
+  // Leave undefined to meet the rule regardless of level.
+  optional int32 min_level = 2;
+  optional int32 max_level = 3;
+
+  // Customizable sets of stages to include or exclude.
+  // The net must have ALL of the specified stages and NONE of the specified
+  // "not_stage"s to meet the rule.
+  // (Use multiple NetStateRules to specify conjunctions of stages.)
+  repeated string stage = 4;
+  repeated string not_stage = 5;
+}
+
+// Specifies training parameters (multipliers on global learning constants,
+// and the name and other settings used for weight sharing).
+message ParamSpec {
+  // The names of the parameter blobs -- useful for sharing parameters among
+  // layers, but never required otherwise.  To share a parameter between two
+  // layers, give it a (non-empty) name.
+  optional string name = 1;
+
+  // Whether to require shared weights to have the same shape, or just the same
+  // count -- defaults to STRICT if unspecified.
+  optional DimCheckMode share_mode = 2;
+  enum DimCheckMode {
+    // STRICT (default) requires that num, channels, height, width each match.
+    STRICT = 0;
+    // PERMISSIVE requires only the count (num*channels*height*width) to match.
+    PERMISSIVE = 1;
+  }
+
+  // The multiplier on the global learning rate for this parameter.
+  optional float lr_mult = 3 [default = 1.0];
+
+  // The multiplier on the global weight decay for this parameter.
+  optional float decay_mult = 4 [default = 1.0];
+}
+
+// NOTE
+// Update the next available ID when you add a new LayerParameter field.
+//
+// LayerParameter next available layer-specific ID: 147 (last added: recurrent_param)
+message LayerParameter {
+  optional string name = 1; // the layer name
+  optional string type = 2; // the layer type
+  repeated string bottom = 3; // the name of each bottom blob
+  repeated string top = 4; // the name of each top blob
+
+  // The train / test phase for computation.
+  optional Phase phase = 10;
+
+  // The amount of weight to assign each top blob in the objective.
+  // Each layer assigns a default value, usually of either 0 or 1,
+  // to each top blob.
+  repeated float loss_weight = 5;
+
+  // Specifies training parameters (multipliers on global learning constants,
+  // and the name and other settings used for weight sharing).
+  repeated ParamSpec param = 6;
+
+  // The blobs containing the numeric parameters of the layer.
+  repeated BlobProto blobs = 7;
+
+  // Specifies whether to backpropagate to each bottom. If unspecified,
+  // Caffe will automatically infer whether each input needs backpropagation
+  // to compute parameter gradients. If set to true for some inputs,
+  // backpropagation to those inputs is forced; if set false for some inputs,
+  // backpropagation to those inputs is skipped.
+  //
+  // The size must be either 0 or equal to the number of bottoms.
+  repeated bool propagate_down = 11;
+
+  // Rules controlling whether and when a layer is included in the network,
+  // based on the current NetState.  You may specify a non-zero number of rules
+  // to include OR exclude, but not both.  If no include or exclude rules are
+  // specified, the layer is always included.  If the current NetState meets
+  // ANY (i.e., one or more) of the specified rules, the layer is
+  // included/excluded.
+  repeated NetStateRule include = 8;
+  repeated NetStateRule exclude = 9;
+
+  // Parameters for data pre-processing.
+  optional TransformationParameter transform_param = 100;
+
+  // Parameters shared by loss layers.
+  optional LossParameter loss_param = 101;
+
+  // Layer type-specific parameters.
+  //
+  // Note: certain layers may have more than one computational engine
+  // for their implementation. These layers include an Engine type and
+  // engine parameter for selecting the implementation.
+  // The default for the engine is set by the ENGINE switch at compile-time.
+  optional AccuracyParameter accuracy_param = 102;
+  optional ArgMaxParameter argmax_param = 103;
+  optional BatchNormParameter batch_norm_param = 139;
+  optional BiasParameter bias_param = 141;
+  optional ConcatParameter concat_param = 104;
+  optional ContrastiveLossParameter contrastive_loss_param = 105;
+  optional ConvolutionParameter convolution_param = 106;
+  optional CropParameter crop_param = 144;
+  optional DataParameter data_param = 107;
+  optional DropoutParameter dropout_param = 108;
+  optional DummyDataParameter dummy_data_param = 109;
+  optional EltwiseParameter eltwise_param = 110;
+  optional ELUParameter elu_param = 140;
+  optional EmbedParameter embed_param = 137;
+  optional ExpParameter exp_param = 111;
+  optional FlattenParameter flatten_param = 135;
+  optional HDF5DataParameter hdf5_data_param = 112;
+  optional HDF5OutputParameter hdf5_output_param = 113;
+  optional HingeLossParameter hinge_loss_param = 114;
+  optional ImageDataParameter image_data_param = 115;
+  optional InfogainLossParameter infogain_loss_param = 116;
+  optional InnerProductParameter inner_product_param = 117;
+  optional InputParameter input_param = 143;
+  optional LogParameter log_param = 134;
+  optional LRNParameter lrn_param = 118;
+  optional MemoryDataParameter memory_data_param = 119;
+  optional MVNParameter mvn_param = 120;
+  optional ParameterParameter parameter_param = 145;
+  optional PoolingParameter pooling_param = 121;
+  optional PowerParameter power_param = 122;
+  optional PReLUParameter prelu_param = 131;
+  optional PythonParameter python_param = 130;
+  optional RecurrentParameter recurrent_param = 146;
+  optional ReductionParameter reduction_param = 136;
+  optional ReLUParameter relu_param = 123;
+  optional ReshapeParameter reshape_param = 133;
+  optional ScaleParameter scale_param = 142;
+  optional SigmoidParameter sigmoid_param = 124;
+  optional SoftmaxParameter softmax_param = 125;
+  optional SPPParameter spp_param = 132;
+  optional SliceParameter slice_param = 126;
+  optional TanHParameter tanh_param = 127;
+  optional ThresholdParameter threshold_param = 128;
+  optional TileParameter tile_param = 138;
+  optional WindowDataParameter window_data_param = 129;
+}
+
+// Message that stores parameters used to apply transformation
+// to the data layer's data
+message TransformationParameter {
+  // For data pre-processing, we can do simple scaling and subtracting the
+  // data mean, if provided. Note that the mean subtraction is always carried
+  // out before scaling.
+  optional float scale = 1 [default = 1];
+  // Specify if we want to randomly mirror data.
+  optional bool mirror = 2 [default = false];
+  // Specify if we would like to randomly crop an image.
+  optional uint32 crop_size = 3 [default = 0];
+  // mean_file and mean_value cannot be specified at the same time
+  optional string mean_file = 4;
+  // if specified can be repeated once (would substract it from all the channels)
+  // or can be repeated the same number of times as channels
+  // (would subtract them from the corresponding channel)
+  repeated float mean_value = 5;
+  // Force the decoded image to have 3 color channels.
+  optional bool force_color = 6 [default = false];
+  // Force the decoded image to have 1 color channels.
+  optional bool force_gray = 7 [default = false];
+}
+
+// Message that stores parameters shared by loss layers
+message LossParameter {
+  // If specified, ignore instances with the given label.
+  optional int32 ignore_label = 1;
+  // How to normalize the loss for loss layers that aggregate across batches,
+  // spatial dimensions, or other dimensions.  Currently only implemented in
+  // SoftmaxWithLoss layer.
+  enum NormalizationMode {
+    // Divide by the number of examples in the batch times spatial dimensions.
+    // Outputs that receive the ignore label will NOT be ignored in computing
+    // the normalization factor.
+    FULL = 0;
+    // Divide by the total number of output locations that do not take the
+    // ignore_label.  If ignore_label is not set, this behaves like FULL.
+    VALID = 1;
+    // Divide by the batch size.
+    BATCH_SIZE = 2;
+    // Do not normalize the loss.
+    NONE = 3;
+  }
+  optional NormalizationMode normalization = 3 [default = VALID];
+  // Deprecated.  Ignored if normalization is specified.  If normalization
+  // is not specified, then setting this to false will be equivalent to
+  // normalization = BATCH_SIZE to be consistent with previous behavior.
+  optional bool normalize = 2;
+}
+
+// Messages that store parameters used by individual layer types follow, in
+// alphabetical order.
+
+message AccuracyParameter {
+  // When computing accuracy, count as correct by comparing the true label to
+  // the top k scoring classes.  By default, only compare to the top scoring
+  // class (i.e. argmax).
+  optional uint32 top_k = 1 [default = 1];
+
+  // The "label" axis of the prediction blob, whose argmax corresponds to the
+  // predicted label -- may be negative to index from the end (e.g., -1 for the
+  // last axis).  For example, if axis == 1 and the predictions are
+  // (N x C x H x W), the label blob is expected to contain N*H*W ground truth
+  // labels with integer values in {0, 1, ..., C-1}.
+  optional int32 axis = 2 [default = 1];
+
+  // If specified, ignore instances with the given label.
+  optional int32 ignore_label = 3;
+}
+
+message ArgMaxParameter {
+  // If true produce pairs (argmax, maxval)
+  optional bool out_max_val = 1 [default = false];
+  optional uint32 top_k = 2 [default = 1];
+  // The axis along which to maximise -- may be negative to index from the
+  // end (e.g., -1 for the last axis).
+  // By default ArgMaxLayer maximizes over the flattened trailing dimensions
+  // for each index of the first / num dimension.
+  optional int32 axis = 3;
+}
+
+message ConcatParameter {
+  // The axis along which to concatenate -- may be negative to index from the
+  // end (e.g., -1 for the last axis).  Other axes must have the
+  // same dimension for all the bottom blobs.
+  // By default, ConcatLayer concatenates blobs along the "channels" axis (1).
+  optional int32 axis = 2 [default = 1];
+
+  // DEPRECATED: alias for "axis" -- does not support negative indexing.
+  optional uint32 concat_dim = 1 [default = 1];
+}
+
+message BatchNormParameter {
+  // If false, accumulate global mean/variance values via a moving average. If
+  // true, use those accumulated values instead of computing mean/variance
+  // across the batch.
+  optional bool use_global_stats = 1;
+  // How much does the moving average decay each iteration?
+  optional float moving_average_fraction = 2 [default = .999];
+  // Small value to add to the variance estimate so that we don't divide by
+  // zero.
+  optional float eps = 3 [default = 1e-5];
+}
+
+message BiasParameter {
+  // The first axis of bottom[0] (the first input Blob) along which to apply
+  // bottom[1] (the second input Blob).  May be negative to index from the end
+  // (e.g., -1 for the last axis).
+  //
+  // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
+  // top[0] will have the same shape, and bottom[1] may have any of the
+  // following shapes (for the given value of axis):
+  //    (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
+  //    (axis == 1 == -3)          3;     3x40;     3x40x60
+  //    (axis == 2 == -2)                   40;       40x60
+  //    (axis == 3 == -1)                                60
+  // Furthermore, bottom[1] may have the empty shape (regardless of the value of
+  // "axis") -- a scalar bias.
+  optional int32 axis = 1 [default = 1];
+
+  // (num_axes is ignored unless just one bottom is given and the bias is
+  // a learned parameter of the layer.  Otherwise, num_axes is determined by the
+  // number of axes by the second bottom.)
+  // The number of axes of the input (bottom[0]) covered by the bias
+  // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
+  // Set num_axes := 0, to add a zero-axis Blob: a scalar.
+  optional int32 num_axes = 2 [default = 1];
+
+  // (filler is ignored unless just one bottom is given and the bias is
+  // a learned parameter of the layer.)
+  // The initialization for the learned bias parameter.
+  // Default is the zero (0) initialization, resulting in the BiasLayer
+  // initially performing the identity operation.
+  optional FillerParameter filler = 3;
+}
+
+message ContrastiveLossParameter {
+  // margin for dissimilar pair
+  optional float margin = 1 [default = 1.0];
+  // The first implementation of this cost did not exactly match the cost of
+  // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.
+  // legacy_version = false (the default) uses (margin - d)^2 as proposed in the
+  // Hadsell paper. New models should probably use this version.
+  // legacy_version = true uses (margin - d^2). This is kept to support /
+  // reproduce existing models and results
+  optional bool legacy_version = 2 [default = false];
+}
+
+message ConvolutionParameter {
+  optional uint32 num_output = 1; // The number of outputs for the layer
+  optional bool bias_term = 2 [default = true]; // whether to have bias terms
+
+  // Pad, kernel size, and stride are all given as a single value for equal
+  // dimensions in all spatial dimensions, or once per spatial dimension.
+  repeated uint32 pad = 3; // The padding size; defaults to 0
+  repeated uint32 kernel_size = 4; // The kernel size
+  repeated uint32 stride = 6; // The stride; defaults to 1
+  // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
+  // holes. (Kernel dilation is sometimes referred to by its use in the
+  // algorithme � trous from Holschneider et al. 1987.)
+  repeated uint32 dilation = 18; // The dilation; defaults to 1
+
+  // For 2D convolution only, the *_h and *_w versions may also be used to
+  // specify both spatial dimensions.
+  optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only)
+  optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only)
+  optional uint32 kernel_h = 11; // The kernel height (2D only)
+  optional uint32 kernel_w = 12; // The kernel width (2D only)
+  optional uint32 stride_h = 13; // The stride height (2D only)
+  optional uint32 stride_w = 14; // The stride width (2D only)
+
+  optional uint32 group = 5 [default = 1]; // The group size for group conv
+
+  optional FillerParameter weight_filler = 7; // The filler for the weight
+  optional FillerParameter bias_filler = 8; // The filler for the bias
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 15 [default = DEFAULT];
+
+  // The axis to interpret as "channels" when performing convolution.
+  // Preceding dimensions are treated as independent inputs;
+  // succeeding dimensions are treated as "spatial".
+  // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
+  // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
+  // groups g>1) filters across the spatial axes (H, W) of the input.
+  // With (N, C, D, H, W) inputs, and axis == 1, we perform
+  // N independent 3D convolutions, sliding (C/g)-channels
+  // filters across the spatial axes (D, H, W) of the input.
+  optional int32 axis = 16 [default = 1];
+
+  // Whether to force use of the general ND convolution, even if a specific
+  // implementation for blobs of the appropriate number of spatial dimensions
+  // is available. (Currently, there is only a 2D-specific convolution
+  // implementation; for input blobs with num_axes != 2, this option is
+  // ignored and the ND implementation will be used.)
+  optional bool force_nd_im2col = 17 [default = false];
+}
+
+message CropParameter {
+  // To crop, elements of the first bottom are selected to fit the dimensions
+  // of the second, reference bottom. The crop is configured by
+  // - the crop `axis` to pick the dimensions for cropping
+  // - the crop `offset` to set the shift for all/each dimension
+  // to align the cropped bottom with the reference bottom.
+  // All dimensions up to but excluding `axis` are preserved, while
+  // the dimensions including and trailing `axis` are cropped.
+  // If only one `offset` is set, then all dimensions are offset by this amount.
+  // Otherwise, the number of offsets must equal the number of cropped axes to
+  // shift the crop in each dimension accordingly.
+  // Note: standard dimensions are N,C,H,W so the default is a spatial crop,
+  // and `axis` may be negative to index from the end (e.g., -1 for the last
+  // axis).
+  optional int32 axis = 1 [default = 2];
+  repeated uint32 offset = 2;
+}
+
+message DataParameter {
+  enum DB {
+    LEVELDB = 0;
+    LMDB = 1;
+  }
+  // Specify the data source.
+  optional string source = 1;
+  // Specify the batch size.
+  optional uint32 batch_size = 4;
+  // The rand_skip variable is for the data layer to skip a few data points
+  // to avoid all asynchronous sgd clients to start at the same point. The skip
+  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
+  // be larger than the number of keys in the database.
+  // DEPRECATED. Each solver accesses a different subset of the database.
+  optional uint32 rand_skip = 7 [default = 0];
+  optional DB backend = 8 [default = LEVELDB];
+  // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
+  // simple scaling and subtracting the data mean, if provided. Note that the
+  // mean subtraction is always carried out before scaling.
+  optional float scale = 2 [default = 1];
+  optional string mean_file = 3;
+  // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
+  // crop an image.
+  optional uint32 crop_size = 5 [default = 0];
+  // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
+  // data.
+  optional bool mirror = 6 [default = false];
+  // Force the encoded image to have 3 color channels
+  optional bool force_encoded_color = 9 [default = false];
+  // Prefetch queue (Number of batches to prefetch to host memory, increase if
+  // data access bandwidth varies).
+  optional uint32 prefetch = 10 [default = 4];
+}
+
+message DropoutParameter {
+  optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio
+}
+
+// DummyDataLayer fills any number of arbitrarily shaped blobs with random
+// (or constant) data generated by "Fillers" (see "message FillerParameter").
+message DummyDataParameter {
+  // This layer produces N >= 1 top blobs.  DummyDataParameter must specify 1 or N
+  // shape fields, and 0, 1 or N data_fillers.
+  //
+  // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used.
+  // If 1 data_filler is specified, it is applied to all top blobs.  If N are
+  // specified, the ith is applied to the ith top blob.
+  repeated FillerParameter data_filler = 1;
+  repeated BlobShape shape = 6;
+
+  // 4D dimensions -- deprecated.  Use "shape" instead.
+  repeated uint32 num = 2;
+  repeated uint32 channels = 3;
+  repeated uint32 height = 4;
+  repeated uint32 width = 5;
+}
+
+message EltwiseParameter {
+  enum EltwiseOp {
+    PROD = 0;
+    SUM = 1;
+    MAX = 2;
+  }
+  optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation
+  repeated float coeff = 2; // blob-wise coefficient for SUM operation
+
+  // Whether to use an asymptotically slower (for >2 inputs) but stabler method
+  // of computing the gradient for the PROD operation. (No effect for SUM op.)
+  optional bool stable_prod_grad = 3 [default = true];
+}
+
+// Message that stores parameters used by ELULayer
+message ELUParameter {
+  // Described in:
+  // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
+  // Deep Network Learning by Exponential Linear Units (ELUs). arXiv
+  optional float alpha = 1 [default = 1];
+}
+
+// Message that stores parameters used by EmbedLayer
+message EmbedParameter {
+  optional uint32 num_output = 1; // The number of outputs for the layer
+  // The input is given as integers to be interpreted as one-hot
+  // vector indices with dimension num_input.  Hence num_input should be
+  // 1 greater than the maximum possible input value.
+  optional uint32 input_dim = 2;
+
+  optional bool bias_term = 3 [default = true]; // Whether to use a bias term
+  optional FillerParameter weight_filler = 4; // The filler for the weight
+  optional FillerParameter bias_filler = 5; // The filler for the bias
+
+}
+
+// Message that stores parameters used by ExpLayer
+message ExpParameter {
+  // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0.
+  // Or if base is set to the default (-1), base is set to e,
+  // so y = exp(shift + scale * x).
+  optional float base = 1 [default = -1.0];
+  optional float scale = 2 [default = 1.0];
+  optional float shift = 3 [default = 0.0];
+}
+
+/// Message that stores parameters used by FlattenLayer
+message FlattenParameter {
+  // The first axis to flatten: all preceding axes are retained in the output.
+  // May be negative to index from the end (e.g., -1 for the last axis).
+  optional int32 axis = 1 [default = 1];
+
+  // The last axis to flatten: all following axes are retained in the output.
+  // May be negative to index from the end (e.g., the default -1 for the last
+  // axis).
+  optional int32 end_axis = 2 [default = -1];
+}
+
+// Message that stores parameters used by HDF5DataLayer
+message HDF5DataParameter {
+  // Specify the data source.
+  optional string source = 1;
+  // Specify the batch size.
+  optional uint32 batch_size = 2;
+
+  // Specify whether to shuffle the data.
+  // If shuffle == true, the ordering of the HDF5 files is shuffled,
+  // and the ordering of data within any given HDF5 file is shuffled,
+  // but data between different files are not interleaved; all of a file's
+  // data are output (in a random order) before moving onto another file.
+  optional bool shuffle = 3 [default = false];
+}
+
+message HDF5OutputParameter {
+  optional string file_name = 1;
+}
+
+message HingeLossParameter {
+  enum Norm {
+    L1 = 1;
+    L2 = 2;
+  }
+  // Specify the Norm to use L1 or L2
+  optional Norm norm = 1 [default = L1];
+}
+
+message ImageDataParameter {
+  // Specify the data source.
+  optional string source = 1;
+  // Specify the batch size.
+  optional uint32 batch_size = 4 [default = 1];
+  // The rand_skip variable is for the data layer to skip a few data points
+  // to avoid all asynchronous sgd clients to start at the same point. The skip
+  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
+  // be larger than the number of keys in the database.
+  optional uint32 rand_skip = 7 [default = 0];
+  // Whether or not ImageLayer should shuffle the list of files at every epoch.
+  optional bool shuffle = 8 [default = false];
+  // It will also resize images if new_height or new_width are not zero.
+  optional uint32 new_height = 9 [default = 0];
+  optional uint32 new_width = 10 [default = 0];
+  // Specify if the images are color or gray
+  optional bool is_color = 11 [default = true];
+  // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
+  // simple scaling and subtracting the data mean, if provided. Note that the
+  // mean subtraction is always carried out before scaling.
+  optional float scale = 2 [default = 1];
+  optional string mean_file = 3;
+  // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
+  // crop an image.
+  optional uint32 crop_size = 5 [default = 0];
+  // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
+  // data.
+  optional bool mirror = 6 [default = false];
+  optional string root_folder = 12 [default = ""];
+}
+
+message InfogainLossParameter {
+  // Specify the infogain matrix source.
+  optional string source = 1;
+}
+
+message InnerProductParameter {
+  optional uint32 num_output = 1; // The number of outputs for the layer
+  optional bool bias_term = 2 [default = true]; // whether to have bias terms
+  optional FillerParameter weight_filler = 3; // The filler for the weight
+  optional FillerParameter bias_filler = 4; // The filler for the bias
+
+  // The first axis to be lumped into a single inner product computation;
+  // all preceding axes are retained in the output.
+  // May be negative to index from the end (e.g., -1 for the last axis).
+  optional int32 axis = 5 [default = 1];
+  // Specify whether to transpose the weight matrix or not.
+  // If transpose == true, any operations will be performed on the transpose
+  // of the weight matrix. The weight matrix itself is not going to be transposed
+  // but rather the transfer flag of operations will be toggled accordingly.
+  optional bool transpose = 6 [default = false];
+}
+
+message InputParameter {
+  // This layer produces N >= 1 top blob(s) to be assigned manually.
+  // Define N shapes to set a shape for each top.
+  // Define 1 shape to set the same shape for every top.
+  // Define no shape to defer to reshaping manually.
+  repeated BlobShape shape = 1;
+}
+
+// Message that stores parameters used by LogLayer
+message LogParameter {
+  // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0.
+  // Or if base is set to the default (-1), base is set to e,
+  // so y = ln(shift + scale * x) = log_e(shift + scale * x)
+  optional float base = 1 [default = -1.0];
+  optional float scale = 2 [default = 1.0];
+  optional float shift = 3 [default = 0.0];
+}
+
+// Message that stores parameters used by LRNLayer
+message LRNParameter {
+  optional uint32 local_size = 1 [default = 5];
+  optional float alpha = 2 [default = 1.];
+  optional float beta = 3 [default = 0.75];
+  enum NormRegion {
+    ACROSS_CHANNELS = 0;
+    WITHIN_CHANNEL = 1;
+  }
+  optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
+  optional float k = 5 [default = 1.];
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 6 [default = DEFAULT];
+}
+
+message MemoryDataParameter {
+  optional uint32 batch_size = 1;
+  optional uint32 channels = 2;
+  optional uint32 height = 3;
+  optional uint32 width = 4;
+}
+
+message MVNParameter {
+  // This parameter can be set to false to normalize mean only
+  optional bool normalize_variance = 1 [default = true];
+
+  // This parameter can be set to true to perform DNN-like MVN
+  optional bool across_channels = 2 [default = false];
+
+  // Epsilon for not dividing by zero while normalizing variance
+  optional float eps = 3 [default = 1e-9];
+}
+
+message ParameterParameter {
+  optional BlobShape shape = 1;
+}
+
+message PoolingParameter {
+  enum PoolMethod {
+    MAX = 0;
+    AVE = 1;
+    STOCHASTIC = 2;
+  }
+  optional PoolMethod pool = 1 [default = MAX]; // The pooling method
+  // Pad, kernel size, and stride are all given as a single value for equal
+  // dimensions in height and width or as Y, X pairs.
+  optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
+  optional uint32 pad_h = 9 [default = 0]; // The padding height
+  optional uint32 pad_w = 10 [default = 0]; // The padding width
+  optional uint32 kernel_size = 2; // The kernel size (square)
+  optional uint32 kernel_h = 5; // The kernel height
+  optional uint32 kernel_w = 6; // The kernel width
+  optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
+  optional uint32 stride_h = 7; // The stride height
+  optional uint32 stride_w = 8; // The stride width
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 11 [default = DEFAULT];
+  // If global_pooling then it will pool over the size of the bottom by doing
+  // kernel_h = bottom->height and kernel_w = bottom->width
+  optional bool global_pooling = 12 [default = false];
+}
+
+message PowerParameter {
+  // PowerLayer computes outputs y = (shift + scale * x) ^ power.
+  optional float power = 1 [default = 1.0];
+  optional float scale = 2 [default = 1.0];
+  optional float shift = 3 [default = 0.0];
+}
+
+message PythonParameter {
+  optional string module = 1;
+  optional string layer = 2;
+  // This value is set to the attribute `param_str` of the `PythonLayer` object
+  // in Python before calling the `setup()` method. This could be a number,
+  // string, dictionary in Python dict format, JSON, etc. You may parse this
+  // string in `setup` method and use it in `forward` and `backward`.
+  optional string param_str = 3 [default = ''];
+  // Whether this PythonLayer is shared among worker solvers during data parallelism.
+  // If true, each worker solver sequentially run forward from this layer.
+  // This value should be set true if you are using it as a data layer.
+  optional bool share_in_parallel = 4 [default = false];
+}
+
+// Message that stores parameters used by RecurrentLayer
+message RecurrentParameter {
+  // The dimension of the output (and usually hidden state) representation --
+  // must be explicitly set to non-zero.
+  optional uint32 num_output = 1 [default = 0];
+
+  optional FillerParameter weight_filler = 2; // The filler for the weight
+  optional FillerParameter bias_filler = 3; // The filler for the bias
+
+  // Whether to enable displaying debug_info in the unrolled recurrent net.
+  optional bool debug_info = 4 [default = false];
+
+  // Whether to add as additional inputs (bottoms) the initial hidden state
+  // blobs, and add as additional outputs (tops) the final timestep hidden state
+  // blobs.  The number of additional bottom/top blobs required depends on the
+  // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs.
+  optional bool expose_hidden = 5 [default = false];
+}
+
+// Message that stores parameters used by ReductionLayer
+message ReductionParameter {
+  enum ReductionOp {
+    SUM = 1;
+    ASUM = 2;
+    SUMSQ = 3;
+    MEAN = 4;
+  }
+
+  optional ReductionOp operation = 1 [default = SUM]; // reduction operation
+
+  // The first axis to reduce to a scalar -- may be negative to index from the
+  // end (e.g., -1 for the last axis).
+  // (Currently, only reduction along ALL "tail" axes is supported; reduction
+  // of axis M through N, where N < num_axes - 1, is unsupported.)
+  // Suppose we have an n-axis bottom Blob with shape:
+  //     (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).
+  // If axis == m, the output Blob will have shape
+  //     (d0, d1, d2, ..., d(m-1)),
+  // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1))
+  // times, each including (dm * d(m+1) * ... * d(n-1)) individual data.
+  // If axis == 0 (the default), the output Blob always has the empty shape
+  // (count 1), performing reduction across the entire input --
+  // often useful for creating new loss functions.
+  optional int32 axis = 2 [default = 0];
+
+  optional float coeff = 3 [default = 1.0]; // coefficient for output
+}
+
+// Message that stores parameters used by ReLULayer
+message ReLUParameter {
+  // Allow non-zero slope for negative inputs to speed up optimization
+  // Described in:
+  // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities
+  // improve neural network acoustic models. In ICML Workshop on Deep Learning
+  // for Audio, Speech, and Language Processing.
+  optional float negative_slope = 1 [default = 0];
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 2 [default = DEFAULT];
+}
+
+message ReshapeParameter {
+  // Specify the output dimensions. If some of the dimensions are set to 0,
+  // the corresponding dimension from the bottom layer is used (unchanged).
+  // Exactly one dimension may be set to -1, in which case its value is
+  // inferred from the count of the bottom blob and the remaining dimensions.
+  // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8:
+  //
+  //   layer {
+  //     type: "Reshape" bottom: "input" top: "output"
+  //     reshape_param { ... }
+  //   }
+  //
+  // If "input" is 2D with shape 2 x 8, then the following reshape_param
+  // specifications are all equivalent, producing a 3D blob "output" with shape
+  // 2 x 2 x 4:
+  //
+  //   reshape_param { shape { dim:  2  dim: 2  dim:  4 } }
+  //   reshape_param { shape { dim:  0  dim: 2  dim:  4 } }
+  //   reshape_param { shape { dim:  0  dim: 2  dim: -1 } }
+  //   reshape_param { shape { dim:  0  dim:-1  dim:  4 } }
+  //
+  optional BlobShape shape = 1;
+
+  // axis and num_axes control the portion of the bottom blob's shape that are
+  // replaced by (included in) the reshape. By default (axis == 0 and
+  // num_axes == -1), the entire bottom blob shape is included in the reshape,
+  // and hence the shape field must specify the entire output shape.
+  //
+  // axis may be non-zero to retain some portion of the beginning of the input
+  // shape (and may be negative to index from the end; e.g., -1 to begin the
+  // reshape after the last axis, including nothing in the reshape,
+  // -2 to include only the last axis, etc.).
+  //
+  // For example, suppose "input" is a 2D blob with shape 2 x 8.
+  // Then the following ReshapeLayer specifications are all equivalent,
+  // producing a blob "output" with shape 2 x 2 x 4:
+  //
+  //   reshape_param { shape { dim: 2  dim: 2  dim: 4 } }
+  //   reshape_param { shape { dim: 2  dim: 4 } axis:  1 }
+  //   reshape_param { shape { dim: 2  dim: 4 } axis: -3 }
+  //
+  // num_axes specifies the extent of the reshape.
+  // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on
+  // input axes in the range [axis, axis+num_axes].
+  // num_axes may also be -1, the default, to include all remaining axes
+  // (starting from axis).
+  //
+  // For example, suppose "input" is a 2D blob with shape 2 x 8.
+  // Then the following ReshapeLayer specifications are equivalent,
+  // producing a blob "output" with shape 1 x 2 x 8.
+  //
+  //   reshape_param { shape { dim:  1  dim: 2  dim:  8 } }
+  //   reshape_param { shape { dim:  1  dim: 2  }  num_axes: 1 }
+  //   reshape_param { shape { dim:  1  }  num_axes: 0 }
+  //
+  // On the other hand, these would produce output blob shape 2 x 1 x 8:
+  //
+  //   reshape_param { shape { dim: 2  dim: 1  dim: 8  }  }
+  //   reshape_param { shape { dim: 1 }  axis: 1  num_axes: 0 }
+  //
+  optional int32 axis = 2 [default = 0];
+  optional int32 num_axes = 3 [default = -1];
+}
+
+message ScaleParameter {
+  // The first axis of bottom[0] (the first input Blob) along which to apply
+  // bottom[1] (the second input Blob).  May be negative to index from the end
+  // (e.g., -1 for the last axis).
+  //
+  // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
+  // top[0] will have the same shape, and bottom[1] may have any of the
+  // following shapes (for the given value of axis):
+  //    (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
+  //    (axis == 1 == -3)          3;     3x40;     3x40x60
+  //    (axis == 2 == -2)                   40;       40x60
+  //    (axis == 3 == -1)                                60
+  // Furthermore, bottom[1] may have the empty shape (regardless of the value of
+  // "axis") -- a scalar multiplier.
+  optional int32 axis = 1 [default = 1];
+
+  // (num_axes is ignored unless just one bottom is given and the scale is
+  // a learned parameter of the layer.  Otherwise, num_axes is determined by the
+  // number of axes by the second bottom.)
+  // The number of axes of the input (bottom[0]) covered by the scale
+  // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
+  // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.
+  optional int32 num_axes = 2 [default = 1];
+
+  // (filler is ignored unless just one bottom is given and the scale is
+  // a learned parameter of the layer.)
+  // The initialization for the learned scale parameter.
+  // Default is the unit (1) initialization, resulting in the ScaleLayer
+  // initially performing the identity operation.
+  optional FillerParameter filler = 3;
+
+  // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but
+  // may be more efficient).  Initialized with bias_filler (defaults to 0).
+  optional bool bias_term = 4 [default = false];
+  optional FillerParameter bias_filler = 5;
+}
+
+message SigmoidParameter {
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 1 [default = DEFAULT];
+}
+
+message SliceParameter {
+  // The axis along which to slice -- may be negative to index from the end
+  // (e.g., -1 for the last axis).
+  // By default, SliceLayer concatenates blobs along the "channels" axis (1).
+  optional int32 axis = 3 [default = 1];
+  repeated uint32 slice_point = 2;
+
+  // DEPRECATED: alias for "axis" -- does not support negative indexing.
+  optional uint32 slice_dim = 1 [default = 1];
+}
+
+// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
+message SoftmaxParameter {
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 1 [default = DEFAULT];
+
+  // The axis along which to perform the softmax -- may be negative to index
+  // from the end (e.g., -1 for the last axis).
+  // Any other axes will be evaluated as independent softmaxes.
+  optional int32 axis = 2 [default = 1];
+}
+
+message TanHParameter {
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 1 [default = DEFAULT];
+}
+
+// Message that stores parameters used by TileLayer
+message TileParameter {
+  // The index of the axis to tile.
+  optional int32 axis = 1 [default = 1];
+
+  // The number of copies (tiles) of the blob to output.
+  optional int32 tiles = 2;
+}
+
+// Message that stores parameters used by ThresholdLayer
+message ThresholdParameter {
+  optional float threshold = 1 [default = 0]; // Strictly positive values
+}
+
+message WindowDataParameter {
+  // Specify the data source.
+  optional string source = 1;
+  // For data pre-processing, we can do simple scaling and subtracting the
+  // data mean, if provided. Note that the mean subtraction is always carried
+  // out before scaling.
+  optional float scale = 2 [default = 1];
+  optional string mean_file = 3;
+  // Specify the batch size.
+  optional uint32 batch_size = 4;
+  // Specify if we would like to randomly crop an image.
+  optional uint32 crop_size = 5 [default = 0];
+  // Specify if we want to randomly mirror data.
+  optional bool mirror = 6 [default = false];
+  // Foreground (object) overlap threshold
+  optional float fg_threshold = 7 [default = 0.5];
+  // Background (non-object) overlap threshold
+  optional float bg_threshold = 8 [default = 0.5];
+  // Fraction of batch that should be foreground objects
+  optional float fg_fraction = 9 [default = 0.25];
+  // Amount of contextual padding to add around a window
+  // (used only by the window_data_layer)
+  optional uint32 context_pad = 10 [default = 0];
+  // Mode for cropping out a detection window
+  // warp: cropped window is warped to a fixed size and aspect ratio
+  // square: the tightest square around the window is cropped
+  optional string crop_mode = 11 [default = "warp"];
+  // cache_images: will load all images in memory for faster access
+  optional bool cache_images = 12 [default = false];
+  // append root_folder to locate images
+  optional string root_folder = 13 [default = ""];
+}
+
+message SPPParameter {
+  enum PoolMethod {
+    MAX = 0;
+    AVE = 1;
+    STOCHASTIC = 2;
+  }
+  optional uint32 pyramid_height = 1;
+  optional PoolMethod pool = 2 [default = MAX]; // The pooling method
+  enum Engine {
+    DEFAULT = 0;
+    CAFFE = 1;
+    CUDNN = 2;
+  }
+  optional Engine engine = 6 [default = DEFAULT];
+}
+
+// DEPRECATED: use LayerParameter.
+message V1LayerParameter {
+  repeated string bottom = 2;
+  repeated string top = 3;
+  optional string name = 4;
+  repeated NetStateRule include = 32;
+  repeated NetStateRule exclude = 33;
+  enum LayerType {
+    NONE = 0;
+    ABSVAL = 35;
+    ACCURACY = 1;
+    ARGMAX = 30;
+    BNLL = 2;
+    CONCAT = 3;
+    CONTRASTIVE_LOSS = 37;
+    CONVOLUTION = 4;
+    DATA = 5;
+    DECONVOLUTION = 39;
+    DROPOUT = 6;
+    DUMMY_DATA = 32;
+    EUCLIDEAN_LOSS = 7;
+    ELTWISE = 25;
+    EXP = 38;
+    FLATTEN = 8;
+    HDF5_DATA = 9;
+    HDF5_OUTPUT = 10;
+    HINGE_LOSS = 28;
+    IM2COL = 11;
+    IMAGE_DATA = 12;
+    INFOGAIN_LOSS = 13;
+    INNER_PRODUCT = 14;
+    LRN = 15;
+    MEMORY_DATA = 29;
+    MULTINOMIAL_LOGISTIC_LOSS = 16;
+    MVN = 34;
+    POOLING = 17;
+    POWER = 26;
+    RELU = 18;
+    SIGMOID = 19;
+    SIGMOID_CROSS_ENTROPY_LOSS = 27;
+    SILENCE = 36;
+    SOFTMAX = 20;
+    SOFTMAX_LOSS = 21;
+    SPLIT = 22;
+    SLICE = 33;
+    TANH = 23;
+    WINDOW_DATA = 24;
+    THRESHOLD = 31;
+  }
+  optional LayerType type = 5;
+  repeated BlobProto blobs = 6;
+  repeated string param = 1001;
+  repeated DimCheckMode blob_share_mode = 1002;
+  enum DimCheckMode {
+    STRICT = 0;
+    PERMISSIVE = 1;
+  }
+  repeated float blobs_lr = 7;
+  repeated float weight_decay = 8;
+  repeated float loss_weight = 35;
+  optional AccuracyParameter accuracy_param = 27;
+  optional ArgMaxParameter argmax_param = 23;
+  optional ConcatParameter concat_param = 9;
+  optional ContrastiveLossParameter contrastive_loss_param = 40;
+  optional ConvolutionParameter convolution_param = 10;
+  optional DataParameter data_param = 11;
+  optional DropoutParameter dropout_param = 12;
+  optional DummyDataParameter dummy_data_param = 26;
+  optional EltwiseParameter eltwise_param = 24;
+  optional ExpParameter exp_param = 41;
+  optional HDF5DataParameter hdf5_data_param = 13;
+  optional HDF5OutputParameter hdf5_output_param = 14;
+  optional HingeLossParameter hinge_loss_param = 29;
+  optional ImageDataParameter image_data_param = 15;
+  optional InfogainLossParameter infogain_loss_param = 16;
+  optional InnerProductParameter inner_product_param = 17;
+  optional LRNParameter lrn_param = 18;
+  optional MemoryDataParameter memory_data_param = 22;
+  optional MVNParameter mvn_param = 34;
+  optional PoolingParameter pooling_param = 19;
+  optional PowerParameter power_param = 21;
+  optional ReLUParameter relu_param = 30;
+  optional SigmoidParameter sigmoid_param = 38;
+  optional SoftmaxParameter softmax_param = 39;
+  optional SliceParameter slice_param = 31;
+  optional TanHParameter tanh_param = 37;
+  optional ThresholdParameter threshold_param = 25;
+  optional WindowDataParameter window_data_param = 20;
+  optional TransformationParameter transform_param = 36;
+  optional LossParameter loss_param = 42;
+  optional V0LayerParameter layer = 1;
+}
+
+// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters
+// in Caffe.  We keep this message type around for legacy support.
+message V0LayerParameter {
+  optional string name = 1; // the layer name
+  optional string type = 2; // the string to specify the layer type
+
+  // Parameters to specify layers with inner products.
+  optional uint32 num_output = 3; // The number of outputs for the layer
+  optional bool biasterm = 4 [default = true]; // whether to have bias terms
+  optional FillerParameter weight_filler = 5; // The filler for the weight
+  optional FillerParameter bias_filler = 6; // The filler for the bias
+
+  optional uint32 pad = 7 [default = 0]; // The padding size
+  optional uint32 kernelsize = 8; // The kernel size
+  optional uint32 group = 9 [default = 1]; // The group size for group conv
+  optional uint32 stride = 10 [default = 1]; // The stride
+  enum PoolMethod {
+    MAX = 0;
+    AVE = 1;
+    STOCHASTIC = 2;
+  }
+  optional PoolMethod pool = 11 [default = MAX]; // The pooling method
+  optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio
+
+  optional uint32 local_size = 13 [default = 5]; // for local response norm
+  optional float alpha = 14 [default = 1.]; // for local response norm
+  optional float beta = 15 [default = 0.75]; // for local response norm
+  optional float k = 22 [default = 1.];
+
+  // For data layers, specify the data source
+  optional string source = 16;
+  // For data pre-processing, we can do simple scaling and subtracting the
+  // data mean, if provided. Note that the mean subtraction is always carried
+  // out before scaling.
+  optional float scale = 17 [default = 1];
+  optional string meanfile = 18;
+  // For data layers, specify the batch size.
+  optional uint32 batchsize = 19;
+  // For data layers, specify if we would like to randomly crop an image.
+  optional uint32 cropsize = 20 [default = 0];
+  // For data layers, specify if we want to randomly mirror data.
+  optional bool mirror = 21 [default = false];
+
+  // The blobs containing the numeric parameters of the layer
+  repeated BlobProto blobs = 50;
+  // The ratio that is multiplied on the global learning rate. If you want to
+  // set the learning ratio for one blob, you need to set it for all blobs.
+  repeated float blobs_lr = 51;
+  // The weight decay that is multiplied on the global weight decay.
+  repeated float weight_decay = 52;
+
+  // The rand_skip variable is for the data layer to skip a few data points
+  // to avoid all asynchronous sgd clients to start at the same point. The skip
+  // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
+  // be larger than the number of keys in the database.
+  optional uint32 rand_skip = 53 [default = 0];
+
+  // Fields related to detection (det_*)
+  // foreground (object) overlap threshold
+  optional float det_fg_threshold = 54 [default = 0.5];
+  // background (non-object) overlap threshold
+  optional float det_bg_threshold = 55 [default = 0.5];
+  // Fraction of batch that should be foreground objects
+  optional float det_fg_fraction = 56 [default = 0.25];
+
+  // optional bool OBSOLETE_can_clobber = 57 [default = true];
+
+  // Amount of contextual padding to add around a window
+  // (used only by the window_data_layer)
+  optional uint32 det_context_pad = 58 [default = 0];
+
+  // Mode for cropping out a detection window
+  // warp: cropped window is warped to a fixed size and aspect ratio
+  // square: the tightest square around the window is cropped
+  optional string det_crop_mode = 59 [default = "warp"];
+
+  // For ReshapeLayer, one needs to specify the new dimensions.
+  optional int32 new_num = 60 [default = 0];
+  optional int32 new_channels = 61 [default = 0];
+  optional int32 new_height = 62 [default = 0];
+  optional int32 new_width = 63 [default = 0];
+
+  // Whether or not ImageLayer should shuffle the list of files at every epoch.
+  // It will also resize images if new_height or new_width are not zero.
+  optional bool shuffle_images = 64 [default = false];
+
+  // For ConcatLayer, one needs to specify the dimension for concatenation, and
+  // the other dimensions must be the same for all the bottom blobs.
+  // By default it will concatenate blobs along the channels dimension.
+  optional uint32 concat_dim = 65 [default = 1];
+
+  optional HDF5OutputParameter hdf5_output_param = 1001;
+}
+
+message PReLUParameter {
+  // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:
+  // Surpassing Human-Level Performance on ImageNet Classification, 2015.
+
+  // Initial value of a_i. Default is a_i=0.25 for all i.
+  optional FillerParameter filler = 1;
+  // Whether or not slope paramters are shared across channels.
+  optional bool channel_shared = 2 [default = false];
+}
\ No newline at end of file


[2/3] incubator-systemml git commit: [SYSTEMML-692] Added initial version of DML generator for Caffe

Posted by ni...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/proto/tensorflow/event.proto
----------------------------------------------------------------------
diff --git a/src/main/proto/tensorflow/event.proto b/src/main/proto/tensorflow/event.proto
new file mode 100644
index 0000000..06d1992
--- /dev/null
+++ b/src/main/proto/tensorflow/event.proto
@@ -0,0 +1,102 @@
+//-------------------------------------------------------------
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+//-------------------------------------------------------------
+syntax = "proto3";
+
+package tensorflow;
+option cc_enable_arenas = true;
+option java_outer_classname = "EventProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.util";
+
+import "summary.proto";
+
+// Protocol buffer representing an event that happened during
+// the execution of a Brain model.
+message Event {
+  // Timestamp of the event.
+  double wall_time = 1;
+
+  // Global step of the event.
+  int64 step = 2;
+
+  oneof what {
+    // An event file was started, with the specified version.
+    // This is use to identify the contents of the record IO files
+    // easily.  Current version is "brain.Event:2".  All versions
+    // start with "brain.Event:".
+    string file_version = 3;
+    // An encoded version of a GraphDef.
+    bytes graph_def = 4;
+    // A summary was generated.
+    Summary summary = 5;
+    // The user output a log message. Not all messages are logged, only ones
+    // generated via the Python tensorboard_logging module.
+    LogMessage log_message = 6;
+    // The state of the session which can be used for restarting after crashes.
+    SessionLog session_log = 7;
+    // The metadata returned by running a session.run() call.
+    TaggedRunMetadata tagged_run_metadata = 8;
+    // An encoded version of a MetaGraphDef.
+    bytes meta_graph_def = 9;
+  }
+}
+
+// Protocol buffer used for logging messages to the events file.
+message LogMessage {
+  enum Level {
+    UNKNOWN = 0;
+    // Note: The logging level 10 cannot be named DEBUG. Some software
+    // projects compile their C/C++ code with -DDEBUG in debug builds. So the
+    // C++ code generated from this file should not have an identifier named
+    // DEBUG.
+    DEBUGGING = 10;
+    INFO = 20;
+    WARN = 30;
+    ERROR = 40;
+    FATAL = 50;
+  }
+  Level level = 1;
+  string message = 2;
+}
+
+// Protocol buffer used for logging session state.
+message SessionLog {
+  enum SessionStatus {
+    STATUS_UNSPECIFIED = 0;
+    START = 1;
+    STOP = 2;
+    CHECKPOINT = 3;
+  }
+
+  SessionStatus status = 1;
+  // This checkpoint_path contains both the path and filename.
+  string checkpoint_path = 2;
+  string msg = 3;
+}
+
+// For logging the metadata output for a single session.run() call.
+message TaggedRunMetadata {
+  // Tag name associated with this metadata.
+  string tag = 1;
+  // Byte-encoded version of the `RunMetadata` proto in order to allow lazy
+  // deserialization.
+  bytes run_metadata = 2;
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/proto/tensorflow/summary.proto
----------------------------------------------------------------------
diff --git a/src/main/proto/tensorflow/summary.proto b/src/main/proto/tensorflow/summary.proto
new file mode 100644
index 0000000..fc8053c
--- /dev/null
+++ b/src/main/proto/tensorflow/summary.proto
@@ -0,0 +1,123 @@
+//-------------------------------------------------------------
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+//-------------------------------------------------------------
+syntax = "proto3";
+
+package tensorflow;
+option cc_enable_arenas = true;
+option java_outer_classname = "SummaryProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+
+// import "tensorflow/core/framework/tensor.proto";
+
+// Metadata associated with a series of Summary data
+message SummaryDescription {
+  // Hint on how plugins should process the data in this series.
+  // Supported values include "scalar", "histogram", "image", "audio"
+  string type_hint = 1;
+}
+
+// Serialization format for histogram module in
+// core/lib/histogram/histogram.h
+message HistogramProto {
+  double min = 1;
+  double max = 2;
+  double num = 3;
+  double sum = 4;
+  double sum_squares = 5;
+
+  // Parallel arrays encoding the bucket boundaries and the bucket values.
+  // bucket(i) is the count for the bucket i.  The range for
+  // a bucket is:
+  //   i == 0:  -DBL_MAX .. bucket_limit(0)
+  //   i != 0:  bucket_limit(i-1) .. bucket_limit(i)
+  repeated double bucket_limit = 6 [packed = true];
+  repeated double bucket = 7 [packed = true];
+};
+
+// A Summary is a set of named values to be displayed by the
+// visualizer.
+//
+// Summaries are produced regularly during training, as controlled by
+// the "summary_interval_secs" attribute of the training operation.
+// Summaries are also produced at the end of an evaluation.
+message Summary {
+  message Image {
+    // Dimensions of the image.
+    int32 height = 1;
+    int32 width = 2;
+    // Valid colorspace values are
+    //   1 - grayscale
+    //   2 - grayscale + alpha
+    //   3 - RGB
+    //   4 - RGBA
+    //   5 - DIGITAL_YUV
+    //   6 - BGRA
+    int32 colorspace = 3;
+    // Image data in encoded format.  All image formats supported by
+    // image_codec::CoderUtil can be stored here.
+    bytes encoded_image_string = 4;
+  }
+
+  message Audio {
+    // Sample rate of the audio in Hz.
+    float sample_rate = 1;
+    // Number of channels of audio.
+    int64 num_channels = 2;
+    // Length of the audio in frames (samples per channel).
+    int64 length_frames = 3;
+    // Encoded audio data and its associated RFC 2045 content type (e.g.
+    // "audio/wav").
+    bytes encoded_audio_string = 4;
+    string content_type = 5;
+  }
+
+  message Value {
+    // Name of the node that output this summary; in general, the name of a
+    // TensorSummary node. If the node in question has multiple outputs, then
+    // a ":\d+" suffix will be appended, like "some_op:13".
+    // Might not be set for legacy summaries (i.e. those not using the tensor
+    // value field)
+    string node_name = 7;
+
+    // Tag name for the data.  Will only be used by legacy summaries
+    // (ie. those not using the tensor value field)
+    // For legacy summaries, will be used as the title of the graph
+    // in the visualizer.
+    //
+    // Tag is usually "op_name:value_name", where "op_name" itself can have
+    // structure to indicate grouping.
+    string tag = 1;
+
+    // Value associated with the tag.
+    oneof value {
+      float simple_value = 2;
+      bytes obsolete_old_style_histogram = 3;
+      Image image = 4;
+      HistogramProto histo = 5;
+      Audio audio = 6;
+      // TensorProto tensor = 8;
+    }
+  }
+
+  // Set of values for the summary.
+  repeated Value value = 1;
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/python/setup.py
----------------------------------------------------------------------
diff --git a/src/main/python/setup.py b/src/main/python/setup.py
index 635dad7..fcda255 100644
--- a/src/main/python/setup.py
+++ b/src/main/python/setup.py
@@ -38,10 +38,12 @@ ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split("-")[0]
 
 numpy_version = '1.8.2'
 scipy_version = '0.15.1'
+pillow_version = '2.0.0'
 REQUIRED_PACKAGES = [
     'numpy >= %s' % numpy_version,
     'scipy >= %s' % scipy_version,
-    'pandas'
+    'pandas',
+    'Pillow >= %s' % pillow_version
 ]
 
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/python/systemml/converters.py
----------------------------------------------------------------------
diff --git a/src/main/python/systemml/converters.py b/src/main/python/systemml/converters.py
index 9651f14..8bf05d7 100644
--- a/src/main/python/systemml/converters.py
+++ b/src/main/python/systemml/converters.py
@@ -19,7 +19,7 @@
 #
 #-------------------------------------------------------------
 
-__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF']
+__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr']
 
 import numpy as np
 import pandas as pd
@@ -118,6 +118,35 @@ def convertToNumPyArr(sc, mb):
     else:
         raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves
 
+# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
+# The above call returns a numpy array of shape (6, 50176) in NCHW format
+def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False):
+    from PIL import Image
+    if img_shape is not None:
+        num_channels = img_shape[0]
+        size = (img_shape[1], img_shape[2])
+    else:
+        num_channels = 1 if im.mode == 'L' else 3
+        size = None
+    if num_channels != 1 and num_channels != 3:
+        raise ValueError('Expected the number of channels to be either 1 or 3')
+    if size is not None:
+        im = im.resize(size, Image.LANCZOS)
+    expected_mode = 'L' if num_channels == 1 else 'RGB'
+    if expected_mode is not im.mode:
+        im = im.convert(expected_mode)
+    def _im2NumPy(im):
+        if expected_mode == 'L':
+            return np.asarray(im.getdata()).reshape((1, -1))
+        else:
+            # (H,W,C) --> (C,H,W) --> (1, C*H*W)
+            return np.asarray(im).transpose(2, 0, 1).reshape((1, -1))
+    ret = _im2NumPy(im)
+    if add_rotated_images:
+        ret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))
+    if add_mirrored_images:
+        ret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))
+    return ret
 
 def convertToPandasDF(X):
     if not isinstance(X, pd.DataFrame):

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/python/systemml/mllearn/estimators.py
----------------------------------------------------------------------
diff --git a/src/main/python/systemml/mllearn/estimators.py b/src/main/python/systemml/mllearn/estimators.py
index d6ad069..94aa1f2 100644
--- a/src/main/python/systemml/mllearn/estimators.py
+++ b/src/main/python/systemml/mllearn/estimators.py
@@ -19,7 +19,7 @@
 #
 #-------------------------------------------------------------
 
-__all__ = ['LinearRegression', 'LogisticRegression', 'SVM', 'NaiveBayes']
+__all__ = ['LinearRegression', 'LogisticRegression', 'SVM', 'NaiveBayes', 'Caffe2DML']
 
 import numpy as np
 from pyspark.ml import Estimator
@@ -45,6 +45,7 @@ def assemble(sparkSession, pdf, inputCols, outputCol):
 class BaseSystemMLEstimator(Estimator):
     features_col = 'features'
     label_col = 'label'
+    do_visualize = False
     
     def set_features_col(self, colName):
         """
@@ -66,6 +67,21 @@ class BaseSystemMLEstimator(Estimator):
         """
         self.label_col = colName
 
+    def setGPU(self, enableGPU):
+        self.estimator.setGPU(enableGPU)
+        return self
+    
+    def setExplain(self, explain):
+        self.estimator.setExplain(explain)
+        return self
+            
+    def setStatistics(self, stat):
+        self.estimator.setStatistics(stat)
+        return self
+    
+    def setConfigProperty(self, propertyName, propertyValue):
+        self.estimator.setConfigProperty(propertyName, propertyValue)
+        return self
     
     def _fit_df(self):
         try:
@@ -158,6 +174,11 @@ class BaseSystemMLEstimator(Estimator):
         ----------
         X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame
         """
+        try:
+            if self.estimator is not None and self.model is not None:
+                self.estimator.copyProperties(self.model)
+        except AttributeError:
+            pass
         if isinstance(X, SUPPORTED_TYPES):
             if self.transferUsingDF:
                 pdfX = convertToPandasDF(X)
@@ -206,6 +227,13 @@ class BaseSystemMLClassifier(BaseSystemMLEstimator):
         else:
             return [ self.labelMap[int(i)] for i in y ]
         
+    def predict(self, X):
+        predictions = np.asarray(super(BaseSystemMLClassifier, self).predict(X))
+        try:
+            return np.asarray(predictions, dtype='double')
+        except ValueError:
+            return np.asarray(predictions, dtype='str')
+            
     def score(self, X, y):
         """
         Scores the predicted value with ground truth 'y'
@@ -215,8 +243,11 @@ class BaseSystemMLClassifier(BaseSystemMLEstimator):
         X: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
         y: NumPy ndarray, Pandas DataFrame, scipy sparse matrix
         """
-        return accuracy_score(y, self.predict(X))
-
+        predictions = np.asarray(self.predict(X))
+        if np.issubdtype(predictions.dtype.type, np.number):
+            return accuracy_score(y, predictions)
+        else:
+            return accuracy_score(np.asarray(y, dtype='str'), np.asarray(predictions, dtype='str'))
 
 class BaseSystemMLRegressor(BaseSystemMLEstimator):
 
@@ -499,4 +530,133 @@ class NaiveBayes(BaseSystemMLClassifier):
         self.estimator = self.sc._jvm.org.apache.sysml.api.ml.NaiveBayes(self.uid, self.sc._jsc.sc())
         self.estimator.setLaplace(laplace)
         self.transferUsingDF = transferUsingDF
-        self.setOutputRawPredictionsToFalse = False
\ No newline at end of file
+        self.setOutputRawPredictionsToFalse = False
+
+class Caffe2DML(BaseSystemMLClassifier):
+    """
+    Performs training/prediction for a given caffe network.
+    
+    Examples
+    --------
+    
+    >>> from systemml.mllearn import Caffe2DML
+    >>> from pyspark.sql import SQLContext
+    >>> sqlCtx = SQLContext(sc)
+    >>> from mlxtend.data import mnist_data
+    >>> import numpy as np
+    >>> from sklearn.utils import shuffle
+    >>> X, y = mnist_data()
+    >>> X, y = shuffle(X, y)
+    >>> imgShape = (1, 28, 28)
+    >>> import urllib
+    >>> urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet.proto', 'lenet.proto')
+    >>> urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet_solver.proto', 'lenet_solver.proto')
+    >>> caffe2DML = Caffe2DML(sqlCtx, 'lenet_solver.proto').set(max_iter=500)
+    >>> caffe2DML.fit(X, y)
+    """
+    def __init__(self, sqlCtx, solver, input_shape, weights=None, ignore_weights=None, transferUsingDF=False, tensorboard_log_dir=None):
+        """
+        Performs training/prediction for a given caffe network. 
+
+        Parameters
+        ----------
+        sqlCtx: PySpark SQLContext
+        solver: caffe solver file path
+        input_shape: 3-element list (number of channels, input height, input width)
+        weights: directory whether learned weights are stored (default: None)
+        ignore_weights: names of layers to not read from the weights directory (list of string, default:None)
+        transferUsingDF: whether to pass the input dataset via PySpark DataFrame (default: False)
+        tensorboard_log_dir: directory to store the event logs (default: None, we use a temporary directory)
+        """
+        self.sqlCtx = sqlCtx
+        self.sc = sqlCtx._sc
+        self.uid = "Caffe2DML"
+        self.model = None
+        if len(input_shape) != 3:
+            raise ValueError('Expected input_shape as list of 3 element')
+        solver = self.sc._jvm.org.apache.sysml.api.dl.Utils.readCaffeSolver(solver)
+        self.estimator = self.sc._jvm.org.apache.sysml.api.dl.Caffe2DML(self.sc._jsc.sc(), solver, str(input_shape[0]), str(input_shape[1]), str(input_shape[2]))
+        self.weights = weights
+        if weights is not None:
+            self.estimator.setInput("$weights", str(weights))
+            self._loadLabelTxt()
+            if ignore_weights is not None:
+                self.estimator.setWeightsToIgnore(ignore_weights)
+        self.transferUsingDF = transferUsingDF
+        self.setOutputRawPredictionsToFalse = False
+        self.visualize_called = False
+        if tensorboard_log_dir is not None:
+            self.estimator.setTensorBoardLogDir(tensorboard_log_dir)
+    
+    def _loadLabelTxt(self, format="binary", sep="/"):
+        if(self.weights is not None):
+            self.model = self.sc._jvm.org.apache.sysml.api.dl.Caffe2DMLModel(self.estimator)
+            df = self.sqlCtx.read.csv(self.weights + sep + 'labels.txt', header=False).toPandas()
+            keys = np.asarray(df._c0, dtype='int')
+            values = np.asarray(df._c1, dtype='str')
+            self.labelMap = {}
+            self.le = None
+            for i in range(len(keys)):
+                self.labelMap[int(keys[i])] = values[i]
+            # self.encode(classes) # Giving incorrect results
+    
+    def set(self, num_classes=None, debug=None):
+        """
+        Set input to Caffe2DML
+        
+        Parameters
+        ----------
+        debug: to add debugging DML code such as classification report, print DML script, etc (default: False)
+        """
+        if debug is not None: self.estimator.setInput("$debug", str(debug).upper())
+        return self
+    
+    def visualize(self, layerName=None, varType='weight', aggFn='mean'):
+        """
+        Use this to visualize the training procedure (requires validation_percentage to be non-zero).
+        When one provides no argument to this method, we visualize training and validation loss.
+        
+        Parameters
+        ----------
+        layerName: Name of the layer in the Caffe prototype
+        varType: should be either 'weight', 'bias', 'dweight', 'dbias', 'output' or 'doutput'
+        aggFn: should be either 'sum', 'mean', 'var' or 'sd'
+        """
+        valid_vis_var_types = ['weight', 'bias', 'dweight', 'dbias', 'output', 'doutput']
+        valid_vis_aggFn = [ 'sum', 'mean', 'var', 'sd' ]
+        if layerName is not None and varType is not None and aggFn is not None:
+            # Visualize other values
+            if varType not in valid_vis_var_types:
+                raise ValueError('The second argument should be either weight, bias, dweight, dbias, output or doutput')
+            if aggFn not in valid_vis_aggFn:
+                raise ValueError('The third argument should be either sum, mean, var, sd.')
+            if self.visualize_called:
+                self.estimator.visualizeLoss()
+            self.estimator.visualizeLayer(layerName, varType, aggFn)
+        else:
+            self.estimator.visualizeLoss()
+        self.visualize_called = True
+        return self
+    
+    def save(self, outputDir, format='binary', sep='/'):
+        """
+        Save a trained model.
+        
+        Parameters
+        ----------
+        outputDir: Directory to save the model to
+        format: optional format (default: 'binary')
+        sep: seperator to use (default: '/')
+        """
+        if self.model != None:
+            self.model.save(outputDir, format, sep)
+            if self.le is not None:
+                labelMapping = dict(enumerate(list(self.le.classes_), 1))
+            else:
+                labelMapping = self.labelMap
+            lStr = [ [ int(k), str(labelMapping[k]) ] for k in labelMapping ]
+            df = self.sqlCtx.createDataFrame(lStr)
+            df.write.csv(outputDir + sep + 'labels.txt', mode='overwrite', header=False)
+        else:
+            raise Exception('Cannot save as you need to train the model first using fit')
+        return self

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala b/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
new file mode 100644
index 0000000..7ab9160
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
@@ -0,0 +1,510 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+
+import caffe.Caffe.LayerParameter;
+import caffe.Caffe.NetParameter;
+import caffe.Caffe.SolverParameter;
+
+import org.apache.sysml.parser.LanguageException;
+import org.apache.sysml.runtime.DMLRuntimeException;
+import org.apache.sysml.api.ml.ScriptsUtils
+import org.apache.sysml.runtime.matrix.MatrixCharacteristics
+import org.apache.sysml.runtime.matrix.data.MatrixBlock
+import scala.collection.JavaConversions._
+import java.util.ArrayList
+import caffe.Caffe.Phase
+import caffe.Caffe
+import java.util.HashSet
+import org.apache.sysml.api.DMLScript
+import java.io.File
+import org.apache.spark.SparkContext
+import org.apache.spark.ml.{ Model, Estimator }
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.types.StructType
+import org.apache.spark.ml.param.{ Params, Param, ParamMap, DoubleParam }
+import org.apache.sysml.runtime.matrix.MatrixCharacteristics
+import org.apache.sysml.runtime.matrix.data.MatrixBlock
+import org.apache.sysml.runtime.DMLRuntimeException
+import org.apache.sysml.runtime.instructions.spark.utils.{ RDDConverterUtilsExt => RDDConverterUtils }
+import org.apache.sysml.api.mlcontext._
+import org.apache.sysml.api.mlcontext.ScriptFactory._
+import org.apache.sysml.api.ml._
+import java.util.Random
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer
+
+
+object Caffe2DML  {
+  val LOG = LogFactory.getLog(classOf[Caffe2DML].getName())
+  def fileSep():String = { if(File.separator.equals("\\")) "\\\\" else File.separator }
+  def setNNLibraryPath(path:String):Unit = { prefix = path + fileSep + "nn"}  
+  // ------------------------------------------------------------------------
+  var prefix = Utils.getPrefix()
+  def layerDir = prefix + fileSep + "layers" + fileSep
+  def optimDir = prefix + fileSep + "optim" + fileSep
+  
+  // Naming conventions:
+  val X = "X"; val y = "y"; val batchSize = "BATCH_SIZE"; val numImages = "num_images"; val numValidationImages = "num_validation"
+  val XVal = "X_val"; val yVal = "y_val"
+}
+
+class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter, 
+    val solver:CaffeSolver, val net:CaffeNetwork, 
+    val lrPolicy:LearningRatePolicy, val numChannels:String, val height:String, val width:String) extends Estimator[Caffe2DMLModel] 
+  with BaseSystemMLClassifier with DMLGenerator {
+  // --------------------------------------------------------------
+  // Invoked by Python, MLPipeline
+  def this(sc: SparkContext, solver1:Caffe.SolverParameter, networkPath:String, numChannels:String, height:String, width:String) {
+    this(sc, solver1, Utils.parseSolver(solver1), 
+        new CaffeNetwork(networkPath, caffe.Caffe.Phase.TRAIN, numChannels, height, width),
+        new LearningRatePolicy(solver1), numChannels, height, width)
+  }
+  def this(sc: SparkContext, solver1:Caffe.SolverParameter, numChannels:String, height:String, width:String) {
+    this(sc, solver1, Utils.parseSolver(solver1), new CaffeNetwork(solver1.getNet, caffe.Caffe.Phase.TRAIN, numChannels, height, width), 
+        new LearningRatePolicy(solver1), numChannels, height, width)
+  } 
+  val uid:String = "caffe_classifier_" + (new Random).nextLong
+  override def copy(extra: org.apache.spark.ml.param.ParamMap): Estimator[Caffe2DMLModel] = {
+    val that = new Caffe2DML(sc, solverParam, solver, net, lrPolicy, numChannels, height, width)
+    copyValues(that, extra)
+  }
+  // Note: will update the y_mb as this will be called by Python mllearn
+  def fit(X_mb: MatrixBlock, y_mb: MatrixBlock): Caffe2DMLModel = {
+    val ret = baseFit(X_mb, y_mb, sc)
+    new Caffe2DMLModel(ret, Utils.numClasses(net), sc, solver, net, lrPolicy, this)
+  }
+  def fit(df: ScriptsUtils.SparkDataType): Caffe2DMLModel = {
+    val ret = baseFit(df, sc)
+    new Caffe2DMLModel(ret, Utils.numClasses(net), sc, solver, net, lrPolicy, this)
+  }
+	// --------------------------------------------------------------
+  
+  // Used for simplifying transfer learning
+  private val layersToIgnore:HashSet[String] = new HashSet[String]() 
+  def setWeightsToIgnore(layerName:String):Unit = layersToIgnore.add(layerName)
+  def setWeightsToIgnore(layerNames:ArrayList[String]):Unit = layersToIgnore.addAll(layerNames)
+  	  
+  // Input parameters to prediction and scoring script
+  val inputs:java.util.HashMap[String, String] = new java.util.HashMap[String, String]()
+  def setInput(key: String, value:String):Unit = inputs.put(key, value)
+  customAssert(solverParam.getTestIterCount <= 1, "Multiple test_iter variables are not supported")
+  customAssert(solverParam.getMaxIter > 0, "Please set max_iter to a positive value")
+  customAssert(net.getLayers.filter(net.getCaffeLayer(_).isInstanceOf[IsLossLayer]).length == 1, "Expected exactly one loss layer")
+    
+  // TODO: throw error or warning if user tries to set solver_mode == GPU instead of using setGPU method
+  
+  // Method called by Python mllearn to visualize variable of certain layer
+  def visualizeLayer(layerName:String, varType:String, aggFn:String): Unit = visualizeLayer(net, layerName, varType, aggFn)
+  
+  // -------------------------------------------------------------------------------------------
+  // Helper functions to generate DML
+  // Initializes Caffe2DML.X, Caffe2DML.y, Caffe2DML.XVal, Caffe2DML.yVal and Caffe2DML.numImages
+  private def trainTestSplit(numValidationBatches:Int):Unit = {
+    if(numValidationBatches > 0) {
+      if(solverParam.getDisplay <= 0) 
+        throw new DMLRuntimeException("Since test_iter and test_interval is greater than zero, you should set display to be greater than zero")
+      tabDMLScript.append(Caffe2DML.numValidationImages).append(" = " + numValidationBatches + " * " + Caffe2DML.batchSize + "\n")
+      tabDMLScript.append("# Sanity check to ensure that validation set is not too large\n")
+      val maxValidationSize = "ceil(0.3 * " + Caffe2DML.numImages + ")"
+      ifBlock(Caffe2DML.numValidationImages  + " > " + maxValidationSize) {
+        assign(tabDMLScript, "max_test_iter", "floor(" + maxValidationSize + " / " + Caffe2DML.batchSize + ")")
+        tabDMLScript.append("stop(" +
+            dmlConcat(asDMLString("Too large validation size. Please reduce test_iter to "), "max_test_iter") 
+            + ")\n")
+      }
+      val one = "1"
+      val rl = int_add(Caffe2DML.numValidationImages, one)
+      rightIndexing(tabDMLScript.append(Caffe2DML.X).append(" = "), "X_full", rl, Caffe2DML.numImages, null, null)
+      tabDMLScript.append("; ")
+      rightIndexing(tabDMLScript.append(Caffe2DML.y).append(" = "), "y_full", rl, Caffe2DML.numImages, null, null)
+      tabDMLScript.append("; ")
+      rightIndexing(tabDMLScript.append(Caffe2DML.XVal).append(" = "), "X_full", one, Caffe2DML.numValidationImages, null, null)
+      tabDMLScript.append("; ")
+      rightIndexing(tabDMLScript.append(Caffe2DML.yVal).append(" = "), "y_full", one, Caffe2DML.numValidationImages, null, null)
+      tabDMLScript.append("; ")
+      tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(y)\n")
+    }
+    else {
+      assign(tabDMLScript, Caffe2DML.X, "X_full")
+	    assign(tabDMLScript, Caffe2DML.y, "y_full")
+	    tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(" + Caffe2DML.y + ")\n")
+    }
+  }
+  
+  private def printClassificationReport():Unit = {
+    ifBlock("debug"){
+      assign(tabDMLScript, "num_rows_error_measures", min("10", ncol("yb")))
+      assign(tabDMLScript, "error_measures", matrix("0", "num_rows_error_measures", "5"))
+      forBlock("class_i", "1", "num_rows_error_measures") {
+        assign(tabDMLScript, "tp", "sum( (true_yb == predicted_yb) * (true_yb == class_i) )")
+        assign(tabDMLScript, "tp_plus_fp", "sum( (predicted_yb == class_i) )")
+        assign(tabDMLScript, "tp_plus_fn", "sum( (true_yb == class_i) )")
+        assign(tabDMLScript, "precision", "tp / tp_plus_fp")
+        assign(tabDMLScript, "recall", "tp / tp_plus_fn")
+        assign(tabDMLScript, "f1Score", "2*precision*recall / (precision+recall)")
+        assign(tabDMLScript, "error_measures[class_i,1]", "class_i")
+        assign(tabDMLScript, "error_measures[class_i,2]", "precision")
+        assign(tabDMLScript, "error_measures[class_i,3]", "recall")
+        assign(tabDMLScript, "error_measures[class_i,4]", "f1Score")
+        assign(tabDMLScript, "error_measures[class_i,5]", "tp_plus_fn")
+      }
+      val dmlTab = "\\t"
+      val header = "class    " + dmlTab + "precision" + dmlTab + "recall  " + dmlTab + "f1-score" + dmlTab + "num_true_labels\\n"
+      val errorMeasures = "toString(error_measures, decimal=7, sep=" + asDMLString(dmlTab) + ")"
+      tabDMLScript.append(print(dmlConcat(asDMLString(header), errorMeasures)))
+    }
+  }
+  
+  // Append the DML to display training and validation loss
+  private def displayLoss(lossLayer:IsLossLayer, shouldValidate:Boolean):Unit = {
+    if(solverParam.getDisplay > 0) {
+      // Append the DML to compute training loss
+      tabDMLScript.append("# Compute training loss & accuracy\n")
+      ifBlock("iter  %% " + solverParam.getDisplay + " == 0") {
+        assign(tabDMLScript, "loss", "0"); assign(tabDMLScript, "accuracy", "0")
+        lossLayer.computeLoss(dmlScript, numTabs)
+        assign(tabDMLScript, "training_loss", "loss"); assign(tabDMLScript, "training_accuracy", "accuracy")
+        tabDMLScript.append(print( dmlConcat( asDMLString("Iter:"), "iter", 
+            asDMLString(", training loss:"), "training_loss", asDMLString(", training accuracy:"), "training_accuracy" )))
+        appendTrainingVisualizationBody(dmlScript, numTabs)
+        printClassificationReport
+      }
+      if(shouldValidate) {
+        // Append the DML to compute validation loss
+        val numValidationBatches = if(solverParam.getTestIterCount > 0) solverParam.getTestIter(0) else 0
+        tabDMLScript.append("# Compute validation loss & accuracy\n")
+        ifBlock("iter  %% " + solverParam.getTestInterval + " == 0") {
+          assign(tabDMLScript, "loss", "0"); assign(tabDMLScript, "accuracy", "0")
+          solverParam.getTestAlgo.toLowerCase match {
+            case "minibatch" => {
+              assign(tabDMLScript, "validation_loss", "0")
+              assign(tabDMLScript, "validation_accuracy", "0")
+              forBlock("iVal", "1", "num_iters_per_epoch") {
+    	          getValidationBatch(tabDMLScript)
+    	          tabDMLScript.append("iter = start_iter + i\n")
+    	          forward;  lossLayer.computeLoss(dmlScript, numTabs)
+                tabDMLScript.append("validation_loss = validation_loss + loss\n")
+                tabDMLScript.append("validation_accuracy = validation_accuracy + accuracy\n")
+    	        }
+              tabDMLScript.append("validation_accuracy = validation_accuracy / num_iters_per_epoch\n")
+            }
+            case "batch" => {
+              assign(tabDMLScript, "Xb", Caffe2DML.XVal); assign(tabDMLScript, "yb", Caffe2DML.yVal)
+              net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
+              lossLayer.computeLoss(dmlScript, numTabs)
+              assign(tabDMLScript, "validation_loss", "loss"); assign(tabDMLScript, "validation_accuracy", "accuracy")
+              
+            }
+            case _ => throw new DMLRuntimeException("Unsupported test algo:" + solverParam.getTestAlgo)
+          }
+          tabDMLScript.append(print( dmlConcat( asDMLString("Iter:"), "iter", 
+              asDMLString(", validation loss:"), "validation_loss", asDMLString(", validation accuracy:"), "validation_accuracy" )))
+          appendValidationVisualizationBody(dmlScript, numTabs)
+        }
+      }
+    }
+  }
+  
+  private def performSnapshot():Unit = {
+    if(solverParam.getSnapshot > 0) {
+      ifBlock("iter %% snapshot == 0") {
+        tabDMLScript.append("snapshot_dir= \"" + solverParam.getSnapshotPrefix + "\" + \"/iter_\" + iter + \"/\"\n")
+        net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => tabDMLScript.append(write(l.weight, "snapshot_dir + \"" + l.param.getName + "_weight.mtx\"", "binary")))
+  		  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => tabDMLScript.append(write(l.bias, "snapshot_dir + \"" + l.param.getName + "_bias.mtx\"", "binary")))
+      }
+  	}
+  }
+  
+  private def forward():Unit = {
+    tabDMLScript.append("# Perform forward pass\n")
+	  net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, false))
+  }
+  private def backward():Unit = backward("")
+  private def backward(suffix:String):Unit = {
+    tabDMLScript.append("# Perform backward pass\n")
+    net.getLayers.reverse.map(layer => net.getCaffeLayer(layer).backward(tabDMLScript, suffix))
+  }
+  private def update():Unit = {
+    tabDMLScript.append("# Update the parameters\n")
+    net.getLayers.map(layer => solver.update(tabDMLScript, net.getCaffeLayer(layer)))
+  }
+  private def initAggGradients():Unit = {
+    tabDMLScript.append("# Data structure to store gradients computed in parallel")
+    net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
+      if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + "_agg", matrix("0", "parallel_batches", multiply(nrow(l.weight), ncol(l.weight))))
+      if(l.shouldUpdateBias) assign(tabDMLScript, l.dBias + "_agg", matrix("0", "parallel_batches", multiply(nrow(l.bias), ncol(l.bias)))) 
+    })
+  }
+  private def flattenAndStoreAggGradients_j():Unit = {
+    tabDMLScript.append("# Flatten and store gradients for this parallel execution\n")
+    net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
+      if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight + "_agg[j,]", 
+          matrix(l.dWeight, "1", multiply(nrow(l.weight), ncol(l.weight)))) 
+      if(l.shouldUpdateWeight) assign(tabDMLScript, l.dBias + "_agg[j,]", 
+          matrix(l.dBias, "1", multiply(nrow(l.bias), ncol(l.bias))))
+    })
+  }
+  private def aggregateAggGradients():Unit = {
+    tabDMLScript.append("# Aggregate the gradients\n")
+    net.getLayers.map(layer => net.getCaffeLayer(layer)).map(l => {
+      if(l.shouldUpdateWeight) assign(tabDMLScript, l.dWeight, 
+          matrix(colSums(l.dWeight + "_agg"), nrow(l.weight), ncol(l.weight))) 
+      if(l.shouldUpdateWeight) assign(tabDMLScript, l.dBias, 
+          matrix(colSums(l.dBias + "_agg"), nrow(l.bias), ncol(l.bias)))
+    })
+  }
+  // -------------------------------------------------------------------------------------------
+  
+  private def multiply(v1:String, v2:String):String = v1 + "*" + v2
+  private def colSums(m:String):String = "colSums(" + m + ")"
+  
+	// Script generator
+	def getTrainingScript(isSingleNode:Boolean):(Script, String, String)  = {
+	  val startTrainingTime = System.nanoTime()
+	  val DEBUG_TRAINING = if(inputs.containsKey("$debug")) inputs.get("$debug").toLowerCase.toBoolean else false
+    reset()
+	  
+	  // Add source for layers as well as solver as well as visualization header
+	  source(net, solver, Array[String]("l2_reg"))
+	  appendVisualizationHeaders(dmlScript, numTabs)
+	  
+	  // Read and convert to one-hote encoding
+	  assign(tabDMLScript, "X_full", "read(\" \", format=\"csv\")")
+	  assign(tabDMLScript, "y_full", "read(\" \", format=\"csv\")")
+	  tabDMLScript.append(Caffe2DML.numImages).append(" = nrow(y_full)\n")
+	  tabDMLScript.append("weights = ifdef($weights, \" \")\n")
+	  tabDMLScript.append("debug = ifdef($debug, FALSE)\n")
+	  tabDMLScript.append("# Convert to one-hot encoding (Assumption: 1-based labels) \n")
+	  tabDMLScript.append("y_full = table(seq(1," + Caffe2DML.numImages + ",1), y_full, " + Caffe2DML.numImages + ", " + Utils.numClasses(net) + ")\n")
+	  
+	  // Initialize the layers and solvers
+	  tabDMLScript.append("# Initialize the layers and solvers\n")
+	  net.getLayers.map(layer => net.getCaffeLayer(layer).init(tabDMLScript))
+	  if(inputs.containsKey("$weights")) {
+		  // Loading existing weights. Note: keeping the initialization code in case the layer wants to initialize non-weights and non-bias
+		  tabDMLScript.append("# Load the weights. Note: keeping the initialization code in case the layer wants to initialize non-weights and non-bias\n")
+		  net.getLayers.filter(l => !layersToIgnore.contains(l)).map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => tabDMLScript.append(read(l.weight, l.param.getName + "_weight.mtx")))
+		  net.getLayers.filter(l => !layersToIgnore.contains(l)).map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => tabDMLScript.append(read(l.bias, l.param.getName + "_bias.mtx")))
+	  }
+	  net.getLayers.map(layer => solver.init(tabDMLScript, net.getCaffeLayer(layer)))
+	  
+	  // Split into training and validation set
+	  // Initializes Caffe2DML.X, Caffe2DML.y, Caffe2DML.XVal, Caffe2DML.yVal and Caffe2DML.numImages
+	  val shouldValidate = solverParam.getTestInterval > 0 && solverParam.getTestIterCount > 0 && solverParam.getTestIter(0) > 0
+	  trainTestSplit(if(shouldValidate) solverParam.getTestIter(0) else 0)
+	  
+	  // Set iteration-related variables such as max_epochs, num_iters_per_epoch, lr, etc.
+	  val lossLayers = net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[IsLossLayer]).map(layer => net.getCaffeLayer(layer).asInstanceOf[IsLossLayer])
+	  if(lossLayers.length != 1) throw new DMLRuntimeException("Expected exactly one loss layer")
+	  solverParam.getTrainAlgo.toLowerCase match {
+	    case "batch" => 
+	      assign(tabDMLScript, "max_epochs", solverParam.getMaxIter.toString)
+	    case _ => {
+	      ceilDivide(tabDMLScript, "num_iters_per_epoch", Caffe2DML.numImages, Caffe2DML.batchSize)
+	      ceilDivide(tabDMLScript, "max_epochs", solverParam.getMaxIter.toString, "num_iters_per_epoch")
+	    }
+	  }
+	  assign(tabDMLScript, "start_iter", "0")
+	  assign(tabDMLScript, "lr", solverParam.getBaseLr.toString)
+	  
+	  // ----------------------------------------------------------------------------
+	  // Main logic
+	  forBlock("e", "1", "max_epochs") {
+	    solverParam.getTrainAlgo.toLowerCase match {
+	      case "minibatch" => 
+	        forBlock("i", "1", "num_iters_per_epoch") {
+	          getTrainingBatch(tabDMLScript)
+	          tabDMLScript.append("iter = start_iter + i\n")
+	          forward; backward; update
+	          displayLoss(lossLayers(0), shouldValidate)
+            performSnapshot
+	        }
+	      case "batch" => {
+          tabDMLScript.append("iter = start_iter + i\n")
+          forward; backward; update
+          displayLoss(lossLayers(0), shouldValidate)
+          performSnapshot
+	      }
+	      case "allreduce" => {
+	        forBlock("i", "1", "num_iters_per_epoch") {
+	          getTrainingBatch(tabDMLScript)
+	          assign(tabDMLScript, "X_group_batch", "Xb")
+	          assign(tabDMLScript, "y_group_batch", "yb")
+	          tabDMLScript.append("iter = start_iter + i\n")
+	          initAggGradients
+	          parForBlock("j", "1", "nrow(y_group_batch)") {
+	            assign(tabDMLScript, "Xb", "X_group_batch[j,]")
+	            assign(tabDMLScript, "yb", "y_group_batch[j,]")
+	            forward; backward("_agg")
+              flattenAndStoreAggGradients_j
+	          }
+	          aggregateAggGradients
+            tabDMLScript.append("iter = start_iter + parallel_batches\n")    
+	          update
+            displayLoss(lossLayers(0), shouldValidate)
+            performSnapshot
+	        }
+	      }
+	      case _ => throw new DMLRuntimeException("Unsupported train algo:" + solverParam.getTrainAlgo)
+	    }
+	    // After every epoch, update the learning rate
+	    tabDMLScript.append("# Learning rate\n")
+	    lrPolicy.updateLearningRate(tabDMLScript)
+	    tabDMLScript.append("start_iter = start_iter + num_iters_per_epoch\n")
+	  }
+	  // ----------------------------------------------------------------------------
+	  
+	  // Check if this is necessary
+	  if(doVisualize) tabDMLScript.append("print(" + asDMLString("Visualization counter:") + " + viz_counter)")
+	  
+	  val trainingScript = tabDMLScript.toString()
+	  // Print script generation time and the DML script on stdout
+	  System.out.println("Time taken to generate training script from Caffe proto: " + ((System.nanoTime() - startTrainingTime)*1e-9) + " seconds." )
+	  if(DEBUG_TRAINING) Utils.prettyPrintDMLScript(trainingScript)
+	  
+	  // Set input/output variables and execute the script
+	  val script = dml(trainingScript).in(inputs)
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.out(l.weight))
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.out(l.bias))
+	  (script, "X_full", "y_full")
+	}
+}
+
+class Caffe2DMLModel(val mloutput: MLResults,  
+    val numClasses:String, val sc: SparkContext, val solver:CaffeSolver,
+    val net:CaffeNetwork, val lrPolicy:LearningRatePolicy,
+    val estimator:Caffe2DML) 
+  extends Model[Caffe2DMLModel] with HasMaxOuterIter with BaseSystemMLClassifierModel with DMLGenerator {
+  // --------------------------------------------------------------
+  // Invoked by Python, MLPipeline
+  val uid:String = "caffe_model_" + (new Random).nextLong 
+  def this(estimator:Caffe2DML) =  {
+    this(null, Utils.numClasses(estimator.net), estimator.sc, estimator.solver,
+        estimator.net,
+        // new CaffeNetwork(estimator.solverParam.getNet, caffe.Caffe.Phase.TEST, estimator.numChannels, estimator.height, estimator.width), 
+        estimator.lrPolicy, estimator) 
+  }
+      
+  override def copy(extra: org.apache.spark.ml.param.ParamMap): Caffe2DMLModel = {
+    val that = new Caffe2DMLModel(mloutput, numClasses, sc, solver, net, lrPolicy, estimator)
+    copyValues(that, extra)
+  }
+  // --------------------------------------------------------------
+  
+  def save(outputDir:String, format:String="binary", sep:String="/"):Unit = {
+	  if(mloutput == null) throw new DMLRuntimeException("Cannot save as you need to train the model first using fit")
+	  val dmlScript = new StringBuilder
+	  dmlScript.append("print(\"Saving the model to " + outputDir + "...\")\n")
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => dmlScript.append(write(l.weight, outputDir + sep + l.param.getName + "_weight.mtx", format)))
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => dmlScript.append(write(l.bias, outputDir + sep + l.param.getName + "_bias.mtx", format)))
+	  
+	  val script = dml(dmlScript.toString)
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.in(l.weight, mloutput.getBinaryBlockMatrix(l.weight)))
+	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.in(l.bias, mloutput.getBinaryBlockMatrix(l.bias)))
+	  val ml = new MLContext(sc)
+	  ml.execute(script)
+	}
+    
+  def getPredictionScript(mloutput: MLResults, isSingleNode:Boolean): (Script, String)  = {
+    reset()
+    val startPredictionTime = System.nanoTime()
+	  val DEBUG_PREDICTION = if(estimator.inputs.containsKey("$debug")) estimator.inputs.get("$debug").toLowerCase.toBoolean else false
+	  
+	  // Append source statements for each layer
+	  source(net, solver, null)
+    tabDMLScript.append("weights = ifdef($weights, \" \")\n")
+	  // Initialize the layers and solvers
+	  tabDMLScript.append("# Initialize the layers and solvers\n")
+	  net.getLayers.map(layer => net.getCaffeLayer(layer).init(tabDMLScript))
+	  if(mloutput == null && estimator.inputs.containsKey("$weights")) {
+		  // fit was not called
+		  net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => tabDMLScript.append(read(l.weight, l.param.getName + "_weight.mtx")))
+		  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => tabDMLScript.append(read(l.bias, l.param.getName + "_bias.mtx")))
+	  }
+	  else if(mloutput == null) {
+		  throw new DMLRuntimeException("Cannot call predict/score without calling either fit or by providing weights")
+	  }
+	  net.getLayers.map(layer => solver.init(tabDMLScript, net.getCaffeLayer(layer)))
+	  
+//	  if(estimator.inputs.containsKey("$debug") && estimator.inputs.get("$debug").equals("TRUE")) {
+//		  System.out.println("The output shape of layers:")
+//		  net.getLayers.map(layer =>  System.out.println(net.getCaffeLayer(layer).param.getName + " " + net.getCaffeLayer(layer).outputShape))
+//	  }
+	  
+	  // Donot update mean and variance in batchnorm
+	  net.getLayers.filter(net.getCaffeLayer(_).isInstanceOf[BatchNorm]).map(net.getCaffeLayer(_).asInstanceOf[BatchNorm].update_mean_var = false)
+	  tabDMLScript.append("X_full = read(\" \", format=\"csv\")\n")
+	  assign(tabDMLScript, "X", "X_full")
+	  tabDMLScript.append(Caffe2DML.numImages + " = nrow(X_full)\n")
+	  
+	  val lossLayers = net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[IsLossLayer]).map(layer => net.getCaffeLayer(layer).asInstanceOf[IsLossLayer])
+	  customAssert(lossLayers.length == 1, "Expected exactly one loss layer, but found " + lossLayers.length + ":" + net.getLayers.filter(layer => net.getCaffeLayer(layer).isInstanceOf[IsLossLayer]))
+	  assign(tabDMLScript, "Prob", matrix("0", Caffe2DML.numImages, numClasses))
+	  estimator.solverParam.getTestAlgo.toLowerCase match {
+      case "minibatch" => {
+        ceilDivide(tabDMLScript(), "num_iters", Caffe2DML.numImages, Caffe2DML.batchSize)
+        forBlock("i", "1", "num_iters") {
+          getTestBatch(tabDMLScript)
+          net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
+          assign(tabDMLScript, "Prob[beg:end,]", lossLayers(0).out)
+        }
+      }
+      case "batch" => {
+        assign(tabDMLScript, "Xb", "X_full")
+        net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
+        assign(tabDMLScript, "Prob", lossLayers(0).out)
+      }
+      case "allreduce" => {
+        ceilDivide(tabDMLScript(), "num_iters", Caffe2DML.numImages, Caffe2DML.batchSize)
+        parForBlock("i", "1", "num_iters") {
+          getTestBatch(tabDMLScript)
+          net.getLayers.map(layer => net.getCaffeLayer(layer).forward(tabDMLScript, true))
+          assign(tabDMLScript, "Prob[beg:end,]", lossLayers(0).out)
+        }
+      }
+      case _ => throw new DMLRuntimeException("Unsupported test algo:" + estimator.solverParam.getTestAlgo)
+    }
+		
+		val predictionScript = dmlScript.toString()
+		System.out.println("Time taken to generate prediction script from Caffe proto:" + ((System.nanoTime() - startPredictionTime)*1e-9) + "secs." )
+		if(DEBUG_PREDICTION) Utils.prettyPrintDMLScript(predictionScript)
+		
+		// Reset
+		net.getLayers.filter(net.getCaffeLayer(_).isInstanceOf[BatchNorm]).map(net.getCaffeLayer(_).asInstanceOf[BatchNorm].update_mean_var = true)
+		
+	  val script = dml(predictionScript).out("Prob").in(estimator.inputs)
+	  if(mloutput != null) {
+	    // fit was called
+  	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.weight != null).map(l => script.in(l.weight, mloutput.getBinaryBlockMatrix(l.weight)))
+  	  net.getLayers.map(net.getCaffeLayer(_)).filter(_.bias != null).map(l => script.in(l.bias, mloutput.getBinaryBlockMatrix(l.bias)))
+	  }
+	  
+	  (script, "X_full")
+  }
+  
+  // Prediction
+  def transform(X: MatrixBlock): MatrixBlock = {
+	  baseTransform(X, mloutput, sc, "Prob")
+  }
+  def transform(df: ScriptsUtils.SparkDataType): DataFrame = {
+	  baseTransform(df, mloutput, sc, "Prob")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala b/src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala
new file mode 100644
index 0000000..4faa203
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+
+import caffe.Caffe.LayerParameter
+import scala.collection.JavaConversions._
+import org.apache.sysml.parser.LanguageException
+import java.util.HashSet
+import java.io.File
+import org.apache.sysml.api.DMLScript
+import org.apache.sysml.runtime.util.ConvolutionUtils
+import caffe.Caffe.EltwiseParameter.EltwiseOp
+import org.apache.sysml.runtime.DMLRuntimeException;
+import java.util.ArrayList
+
+trait CaffeLayer extends BaseDMLGenerator {
+  // -------------------------------------------------
+  // Any layer that wants to reuse SystemML-NN has to override following methods that help in generating the DML for the given layer:
+  def sourceFileName:String;
+  def init(dmlScript:StringBuilder):Unit;
+  def forward(dmlScript:StringBuilder, isPrediction:Boolean):Unit;
+  def backward(dmlScript:StringBuilder, outSuffix:String):Unit;
+  var computedOutputShape:(String, String, String) = null
+  def outputShape:(String, String, String) = {
+    if(computedOutputShape == null) computedOutputShape = bottomLayerOutputShape
+    computedOutputShape
+  }
+  // -------------------------------------------------
+  var computedBottomLayerOutputShape:(String, String, String) = null
+  def bottomLayerOutputShape:(String, String, String) = {
+    if(computedBottomLayerOutputShape == null) {
+      val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
+      if(ret.size == 0) throw new LanguageException("Expected atleast 1 bottom layer for " + param.getName)
+      computedBottomLayerOutputShape = ret(0).outputShape
+    }
+    computedBottomLayerOutputShape
+  }
+  def param:LayerParameter
+  def id:Int
+  def net:CaffeNetwork
+  // --------------------------------------------------------------------------------------
+  // No need to override these methods in subclasses
+  // Exception: Only Data layer overrides "out" method to use 'Xb' for consistency
+  // Naming of the below methods is consistent with the nn library:
+  // X (feature map from the previous layer) ----> Forward pass  ----> out (feature map to the next layer)
+  // dX (errors to the previous layer)       <---- Backward pass <---- dout (errors from the next layer)
+  def out = "out" + id  
+  var computedX:String = null
+  def X:String = {
+    if(computedX == null) {
+      val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
+      if(ret.size == 0) throw new LanguageException("Expected atleast 1 bottom layer for " + param.getName)
+      else if(ret.size == 1)    computedX = ret(0).out
+      else                      computedX = sum(new StringBuilder, ret.map(_.out).toList).toString()
+    }
+    computedX
+  }
+  var computedDout:String = null
+  def dout: String = {
+    if(computedDout == null) {
+      val ret = net.getTopLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
+      if(ret.size == 0) throw new LanguageException("Expected atleast 1 top layer for " + param.getName)
+      else if(ret.size == 1)     computedDout = ret(0).dX
+      else                       computedDout = sum(new StringBuilder, ret.map(_.dX).toList).toString()
+    }
+    computedDout
+  }
+  val dX = "dOut" + id
+  // --------------------------------------------------------------------------------------
+  // No need to override these methods in subclasses, instead classes that have weights and biases 
+  // should implement HasWeight and HasBias traits.
+  def dWeight():String = throw new DMLRuntimeException("dWeight is not implemented in super class")
+  def dBias():String = throw new DMLRuntimeException("dBias is not implemented in super class")
+  def weight():String = null;
+  def bias():String = null;
+  def shouldUpdateWeight():Boolean = if(weight != null) true else false
+  def shouldUpdateBias():Boolean = if(bias != null) true else false
+  // --------------------------------------------------------------------------------------
+  // Helper methods to simplify the code of subclasses
+  def invokeInit(dmlScript:StringBuilder, returnVariables:List[String], arguments:String*):Unit = {
+    invoke(dmlScript, sourceFileName + "::", returnVariables, "init", arguments.toList)
+  }
+  def invokeForward(dmlScript:StringBuilder, returnVariables:List[String], arguments:String*):Unit = {
+    invoke(dmlScript, sourceFileName + "::", returnVariables, "forward", arguments.toList)
+  }
+  def invokeBackward(dmlScript:StringBuilder, outSuffix:String, resultVariables:List[String],  arguments:String*):Unit = {
+    invoke(dmlScript, sourceFileName + "::", resultVariables.map(_ + outSuffix), "backward", arguments.toList)
+  }
+  // --------------------------------------------------------------------------------------
+}
+
+
+trait IsLossLayer extends CaffeLayer {
+  def computeLoss(dmlScript:StringBuilder, numTabs:Int):Unit
+}
+
+trait HasWeight extends CaffeLayer {
+  override def weight = "W" + id
+  override def dWeight = "dW" + id
+}
+
+trait HasBias extends CaffeLayer {
+  override def bias = "b" + id
+  override def dBias = "db" + id
+}
+
+class Data(val param:LayerParameter, val id:Int, val net:CaffeNetwork, val numChannels:String, val height:String, val width:String) extends CaffeLayer {
+  // -------------------------------------------------
+  override def sourceFileName = null
+  override def init(dmlScript:StringBuilder) = {
+    if(param.hasTransformParam && param.getTransformParam.hasScale) {
+      dmlScript.append("X_full = X_full * " + param.getTransformParam.getScale + "\n")
+    }
+    dmlScript.append("BATCH_SIZE = " + param.getDataParam.getBatchSize + "\n")
+  }
+  var dataOutputShape = ("$num_channels", "$height", "$width")
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = { }
+  override def out = "Xb"
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = { }
+  override def outputShape = (numChannels, height, width)
+  // -------------------------------------------------
+}
+
+
+// ------------------------------------------------------------------
+// weight is ema_mean and bias is ema_var
+// Fuse 
+class BatchNorm(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
+  // val scale =  
+  override def sourceFileName = "batch_norm2d"
+  override def init(dmlScript:StringBuilder) = invokeInit(dmlScript, List[String](gamma, beta, ema_mean, ema_var), numChannels)
+  var update_mean_var = true
+  def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = {
+    val mode = if(isPrediction) "\"test\"" else "\"train\""
+    invokeForward(dmlScript, List[String](out, withSuffix(ema_mean), withSuffix(ema_var), withSuffix(cache_mean), withSuffix(cache_var), withSuffix(cache_norm)), 
+        X, gamma, beta, numChannels, Hin, Win, mode, ema_mean, ema_var,  ma_fraction, eps)  
+  }
+  
+  def backward(dmlScript: StringBuilder, outSuffix:String): Unit = {
+    invokeBackward(dmlScript, outSuffix, List[String](dX, dgamma, dbeta), dout, out, ema_mean, ema_var, cache_mean, cache_var, cache_norm, X, gamma, beta, numChannels, 
+          Hin, Win, "\"train\"", ema_mean, ema_var,  ma_fraction, eps)
+  }
+  
+  private def withSuffix(str:String):String = if(update_mean_var) str else str + "_ignore"
+  override def weight = "ema_mean" + id
+  override def bias = "ema_var" + id
+  def cache_mean(): String = "cache_mean" + id
+  def cache_var():String = "cache_mean" + id
+  def cache_norm():String = "cache_norm" + id
+  var scaleLayer:Scale = null
+  def gamma():String = { checkNextLayer(); scaleLayer.weight }
+  def ma_fraction():String = if(param.getBatchNormParam.hasMovingAverageFraction()) param.getBatchNormParam.getMovingAverageFraction.toString else "0.999"
+  def eps():String = if(param.getBatchNormParam.hasEps()) param.getBatchNormParam.getEps.toString else "1e-5"
+  def beta():String = { checkNextLayer(); scaleLayer.bias }
+  def dgamma():String = { checkNextLayer();  scaleLayer.dWeight }
+  def dbeta():String = { checkNextLayer();  scaleLayer.dBias }
+  override def shouldUpdateWeight():Boolean = false
+  override def shouldUpdateBias():Boolean = false
+  def ema_mean(): String = weight
+  def ema_var(): String = bias
+  def checkNextLayer(): Unit = {
+    if(scaleLayer == null) {
+      val topLayers = net.getTopLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
+      if(topLayers.length != 1 && !topLayers(0).isInstanceOf[Scale]) throw new LanguageException("Only one top layer of type Scale allowed for BatchNorm")
+      scaleLayer = topLayers(0).asInstanceOf[Scale]
+    }
+  }
+  def numChannels = bottomLayerOutputShape._1
+  def Hin = bottomLayerOutputShape._2
+  def Win = bottomLayerOutputShape._3
+}
+// weight is gamma and bias is beta
+class Scale(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
+  if(!param.getScaleParam.getBiasTerm) throw new LanguageException("Add \"scale_param { bias_term: true }\" to the layer " + param.getName)
+  override def sourceFileName = null
+  override def init(dmlScript: StringBuilder): Unit = {}
+  def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = assign(dmlScript, out, X)
+  override def backward(dmlScript: StringBuilder, outSuffix:String): Unit = assign(dmlScript, dX + outSuffix, dout)
+}
+// ------------------------------------------------------------------
+
+class Elementwise(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer {
+  override def sourceFileName = null
+  override def init(dmlScript: StringBuilder): Unit = {}
+  if(param.getEltwiseParam.hasOperation && param.getEltwiseParam.getOperation != EltwiseOp.SUM)
+    throw new LanguageException("Currently only elementwise sum operation supported")
+  def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = {
+    addAndAssign(dmlScript, out, param.getBottomList.map(b => net.getCaffeLayer(b).out).toList)
+  }
+  override def backward(dmlScript: StringBuilder, outSuffix:String): Unit = assign(dmlScript, dX + outSuffix, dout)
+  override def outputShape = {
+    if(_out == null) _out = net.getCaffeLayer(net.getBottomLayers(param.getName).take(1).toSeq.get(0)).outputShape
+    _out
+  }
+  var _out:(String, String, String) = null
+  
+}
+
+class SoftmaxWithLoss(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer with IsLossLayer {
+  // -------------------------------------------------
+  override def sourceFileName = "softmax"
+  override def init(dmlScript:StringBuilder) = {}
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = 
+    invokeForward(dmlScript, List[String](out), scores)
+  override def backward(dmlScript:StringBuilder, outSuffix:String) =  {
+    invoke(dmlScript, "cross_entropy_loss::", List[String]("dProbs" + outSuffix), "backward", out, "yb")
+    invoke(dmlScript.append("\t"), "softmax::", List[String](dX + outSuffix), "backward", "dProbs", scores)
+  }
+  override def computeLoss(dmlScript:StringBuilder, numTabs:Int) = {
+    val tabBuilder = new StringBuilder
+    for(i <- 0 until numTabs) tabBuilder.append("\t")
+    val tabs = tabBuilder.toString
+    dmlScript.append("tmp_loss = cross_entropy_loss::forward(" + commaSep(out, "yb") + ")\n")
+    dmlScript.append(tabs).append("loss = loss + tmp_loss\n")
+    dmlScript.append(tabs).append("true_yb = rowIndexMax(yb)\n")
+    dmlScript.append(tabs).append("predicted_yb = rowIndexMax(" + out + ")\n")
+    dmlScript.append(tabs).append("accuracy = mean(predicted_yb == true_yb)*100\n")
+  }
+  def scores():String = {
+	  val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
+	  if(ret.size == 1) return ret.get(0).out
+	  else if(ret.size == 2) {
+		  val ret1 = if(!ret.get(0).out.equals("Xb")) ret.get(0).out else ""; 
+		  val ret2 = if(!ret.get(1).out.equals("Xb")) ret.get(1).out else "";
+		  if(!ret1.equals("") && !ret2.equals("")) throw new LanguageException("Atleast one of the output of previous layer should be Xb")
+		  else if(!ret1.equals("")) return ret1
+		  else return ret2
+	  }
+	  else 
+		  throw new LanguageException("More than 2 bottom layers is not supported")
+  }
+  // -------------------------------------------------
+}
+
+class ReLU(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer {
+  // -------------------------------------------------
+  override def sourceFileName = "relu"
+  override def init(dmlScript:StringBuilder) = { }
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = invokeForward(dmlScript, List[String](out), X)
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = invokeBackward(dmlScript, outSuffix, List[String](dX), dout, X)
+  // -------------------------------------------------
+}
+
+class Dropout(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer {
+  // -------------------------------------------------
+  override def sourceFileName = "dropout"
+  override def init(dmlScript:StringBuilder) = { }
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) =
+    if(!isPrediction)
+      invokeForward(dmlScript, List[String](out, mask), X, p, seed)
+    else
+      assign(dmlScript, out, X) // Forward-pass not required to be performed during prediction for Dropout layer
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = invokeBackward(dmlScript, outSuffix, List[String](dX), dout, X, p, mask)
+  // -------------------------------------------------
+  def mask = "mask" + id
+  def p = param.getDropoutParam.getDropoutRatio.toString
+  def seed = "-1"
+}
+
+class InnerProduct(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
+  // -------------------------------------------------
+  override def sourceFileName = "affine"
+  override def init(dmlScript:StringBuilder) = invokeInit(dmlScript, List[String](weight, bias), numFeatures, numNeurons)
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = 
+      invokeForward(dmlScript, List[String](out), X, weight, bias)
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = 
+      invokeBackward(dmlScript, outSuffix, List[String](dX, dWeight, dBias), dout, X, weight, bias)
+  // -------------------------------------------------
+  def numNeurons = param.getInnerProductParam.getNumOutput.toString
+  def numFeatures = int_mult(bottomLayerOutputShape._1, bottomLayerOutputShape._2, bottomLayerOutputShape._3)
+  override def outputShape = ( param.getInnerProductParam.getNumOutput.toString, "1", "1" )
+}
+
+class MaxPooling(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer {
+  // -------------------------------------------------
+  override def sourceFileName = "max_pool2d_builtin"
+  override def init(dmlScript:StringBuilder) = {}
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = 
+    invokeForward(dmlScript, List[String](out, "ignoreHout_"+id, "ignoreWout_"+id), 
+        X, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = 
+    invokeBackward(dmlScript, outSuffix, List[String](dX), dout, Hout, Wout, X, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
+  override def outputShape = ( numChannels, Hout, Wout )
+  // -------------------------------------------------
+  def Hin = bottomLayerOutputShape._2
+  def Win = bottomLayerOutputShape._3
+  def Hout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._2, kernel_h, stride_h, pad_h)
+  def Wout =  ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._3, kernel_w, stride_w, pad_w)
+  def poolingParam = param.getPoolingParam
+  def numChannels = bottomLayerOutputShape._1
+  def kernel_h = if(poolingParam.hasKernelH) poolingParam.getKernelH.toString 
+                   else poolingParam.getKernelSize.toString 
+  def kernel_w = if(poolingParam.hasKernelW) poolingParam.getKernelW.toString 
+                   else poolingParam.getKernelSize.toString
+  def stride_h = if(poolingParam.hasStrideH) poolingParam.getStrideH.toString 
+                   else poolingParam.getStride.toString
+  def stride_w = if(poolingParam.hasStrideW) poolingParam.getStrideW.toString 
+                   else poolingParam.getStride.toString
+  def pad_h =   if(poolingParam.hasPadH) poolingParam.getPadH.toString 
+                   else poolingParam.getPad.toString
+  def pad_w =   if(poolingParam.hasPadW) poolingParam.getPadW.toString 
+                   else poolingParam.getPad.toString
+}
+
+class Convolution(val param:LayerParameter, val id:Int, val net:CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
+  // -------------------------------------------------
+  override def sourceFileName = "conv2d_builtin";
+  override def init(dmlScript:StringBuilder) = invokeInit(dmlScript, List[String](weight, bias), numKernels, numChannels, kernel_h, kernel_w)
+  override def forward(dmlScript:StringBuilder, isPrediction:Boolean) = 
+    invokeForward(dmlScript, List[String](out, "ignoreHout_"+id, "ignoreWout_"+id), 
+        X, weight, bias, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
+  override def backward(dmlScript:StringBuilder, outSuffix:String) = 
+    invokeBackward(dmlScript, outSuffix, List[String](dX, dWeight, dBias), dout, Hout, Wout, X, weight, bias, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
+  override def outputShape = ( numKernels, Hout, Wout )
+  // -------------------------------------------------
+  def numChannels = bottomLayerOutputShape._1
+  def Hin = bottomLayerOutputShape._2
+  def Win = bottomLayerOutputShape._3
+  def Hout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._2, kernel_h, stride_h, pad_h) 
+  def Wout =  ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._3, kernel_w, stride_w, pad_w)
+  def convParam = param.getConvolutionParam
+  def numKernels = convParam.getNumOutput.toString
+  def kernel_h = if(convParam.hasKernelH) convParam.getKernelH.toString 
+                   else if(convParam.getKernelSizeCount > 0)  convParam.getKernelSize(0).toString 
+                   else throw new LanguageException("Incorrect kernel parameters")
+  def kernel_w = if(convParam.hasKernelW) convParam.getKernelW.toString 
+                   else if(convParam.getKernelSizeCount > 0)  convParam.getKernelSize(0).toString 
+                   else throw new LanguageException("Incorrect kernel parameters")
+  def stride_h = if(convParam.hasStrideH) convParam.getStrideH.toString 
+                   else if(convParam.getStrideCount > 0)  convParam.getStride(0).toString 
+                   else throw new LanguageException("Incorrect stride parameters:" + convParam.getStrideH + " " + convParam.getStrideList + " " + param.getName)
+  def stride_w = if(convParam.hasStrideW) convParam.getStrideW.toString 
+                   else if(convParam.getStrideCount > 0)  convParam.getStride(0).toString 
+                   else throw new LanguageException("Incorrect stride parameters")
+  def pad_h =   if(convParam.hasPadH) convParam.getPadH.toString 
+                   else if(convParam.getPadCount > 0)  convParam.getPad(0).toString 
+                   else throw new LanguageException("Incorrect pad parameters")
+  def pad_w =   if(convParam.hasPadW) convParam.getPadW.toString 
+                   else if(convParam.getPadCount > 0)  convParam.getPad(0).toString 
+                   else throw new LanguageException("Incorrect pad parameters")
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/cc7993fc/src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala b/src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala
new file mode 100644
index 0000000..e585e30
--- /dev/null
+++ b/src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.sysml.api.dl
+
+import org.apache.sysml.runtime.DMLRuntimeException
+import scala.collection.JavaConversions._
+import caffe.Caffe.NetParameter
+import caffe.Caffe.LayerParameter
+import caffe.Caffe.Phase
+import java.util.ArrayList
+import java.util.HashSet
+import scala.collection.mutable.Stack
+import org.apache.sysml.parser.LanguageException;
+import java.util.HashMap
+import caffe.Caffe.PoolingParameter
+import org.apache.commons.logging.LogFactory
+
+trait Network {
+  def getLayers(): List[String]
+  def getCaffeLayer(layerName:String):CaffeLayer
+  def getBottomLayers(layerName:String): Set[String]
+  def getTopLayers(layerName:String): Set[String]
+  def getLayerID(layerName:String): Int
+}
+
+object CaffeNetwork {
+  val LOG = LogFactory.getLog(classOf[CaffeNetwork].getName)
+}
+
+class CaffeNetwork(netFilePath:String, val currentPhase:Phase, 
+     val numChannels:String, val height:String, val width:String
+    ) extends Network {
+  private def isIncludedInCurrentPhase(l:LayerParameter): Boolean = {
+    if(l.getIncludeCount == 0) true else l.getIncludeList.filter(r => r.hasPhase() && r.getPhase != currentPhase).length == 0
+  }
+  private var id = 1
+  
+  // --------------------------------------------------------------------------------
+  private var _caffeLayerParams:List[LayerParameter] = Utils.readCaffeNet(netFilePath).getLayerList.filter(l => isIncludedInCurrentPhase(l)).toList
+  // --------------------------------------------------------------------------------
+  
+  private var _layerNames: List[String] = _caffeLayerParams.map(l => l.getName).toList
+  CaffeNetwork.LOG.debug("Layers in current phase:" + _layerNames)
+  
+  // Condition 1: assert that each name is unique
+  private val _duplicateLayerNames =_layerNames.diff(_layerNames.distinct)
+  if(_duplicateLayerNames.size != 0) throw new LanguageException("Duplicate layer names is not supported:" + _duplicateLayerNames)
+  
+  // Condition 2: only 1 top name, except Data layer
+  private val _condition2Exceptions = Set("data")
+  _caffeLayerParams.filter(l => !_condition2Exceptions.contains(l.getType.toLowerCase)).map(l => if(l.getTopCount != 1) throw new LanguageException("Multiple top layers is not supported for " + l.getName))
+
+  // Condition 3: Replace top layer names referring to a Data layer with its name
+  // Example: layer{ name: mnist, top: data, top: label, ... }
+  private val _topToNameMappingForDataLayer = new HashMap[String, String]()
+  private def containsOnly(list:java.util.List[String], v:String): Boolean = list.toSet.diff(Set(v)).size() == 0
+  private def isData(l:LayerParameter):Boolean = l.getType.equalsIgnoreCase("data")
+  private def replaceTopWithNameOfDataLayer(l:LayerParameter):LayerParameter =  {
+    if(containsOnly(l.getTopList,l.getName))
+      return l
+    else {
+      val builder = l.toBuilder(); 
+      for(i <- 0 until l.getTopCount) {
+        if(! l.getTop(i).equals(l.getName)) { _topToNameMappingForDataLayer.put(l.getTop(i), l.getName) }
+        builder.setTop(i, l.getName) 
+      }
+      return builder.build() 
+    }
+  }
+  // 3a: Replace top of DataLayer with its names
+  // Example: layer{ name: mnist, top: mnist, top: mnist, ... }
+  _caffeLayerParams = _caffeLayerParams.map(l => if(isData(l)) replaceTopWithNameOfDataLayer(l) else l)
+  private def replaceBottomOfNonDataLayers(l:LayerParameter):LayerParameter = {
+    val builder = l.toBuilder();
+    // Note: Top will never be Data layer
+    for(i <- 0 until l.getBottomCount) {
+      if(_topToNameMappingForDataLayer.containsKey(l.getBottom(i))) 
+        builder.setBottom(i, _topToNameMappingForDataLayer.get(l.getBottom(i)))
+    }
+    return builder.build()
+  }
+  // 3a: If top/bottom of other layers refer DataLayer, then replace them
+  // layer { name: "conv1_1", type: "Convolution", bottom: "data"
+  _caffeLayerParams = if(_topToNameMappingForDataLayer.size == 0) _caffeLayerParams else _caffeLayerParams.map(l => if(isData(l)) l else replaceBottomOfNonDataLayers(l))
+  
+  // Condition 4: Deal with fused layer
+  // Example: layer { name: conv1, top: conv1, ... } layer { name: foo, bottom: conv1, top: conv1 }
+  private def isFusedLayer(l:LayerParameter): Boolean = l.getTopCount == 1 && l.getBottomCount == 1 && l.getTop(0).equalsIgnoreCase(l.getBottom(0))
+  private def containsReferencesToFusedLayer(l:LayerParameter):Boolean = l.getBottomList.foldLeft(false)((prev, bLayer) => prev || _fusedTopLayer.containsKey(bLayer))
+  private val _fusedTopLayer = new HashMap[String, String]()
+  _caffeLayerParams = _caffeLayerParams.map(l => {
+    if(isFusedLayer(l)) {
+      val builder = l.toBuilder();
+      if(_fusedTopLayer.containsKey(l.getBottom(0))) {
+        builder.setBottom(0, _fusedTopLayer.get(l.getBottom(0)))
+      }
+      builder.setTop(0, l.getName)
+      _fusedTopLayer.put(l.getBottom(0), l.getName)
+      builder.build()
+    }
+    else if(containsReferencesToFusedLayer(l)) {
+      val builder = l.toBuilder();
+      for(i <- 0 until l.getBottomCount) {
+        if(_fusedTopLayer.containsKey(l.getBottomList.get(i))) {
+          builder.setBottom(i, _fusedTopLayer.get(l.getBottomList.get(i)))
+        }
+      }
+      builder.build()
+    }
+    else l
+  })
+
+  // --------------------------------------------------------------------------------
+  
+  // Helper functions to extract bottom and top layers
+  private def convertTupleListToMap(m:List[(String, String)]):Map[String, Set[String]] = m.groupBy(_._1).map(x => (x._1, x._2.map(y => y._2).toSet)).toMap
+  private def flipKeyValues(t:List[(String, Set[String])]): List[(String, String)] = t.flatMap(x => x._2.map(b => b -> x._1))
+  private def expandBottomList(layerName:String, bottomList:java.util.List[String]): List[(String, String)] = bottomList.filter(b => !b.equals(layerName)).map(b => layerName -> b).toList 
+  
+  // The bottom layers are the layers available in the getBottomList (from Caffe .proto files)
+  private val _bottomLayers:Map[String, Set[String]] = convertTupleListToMap(
+      _caffeLayerParams.flatMap(l => expandBottomList(l.getName, l.getBottomList)))
+  CaffeNetwork.LOG.info("Bottom layers:" + _bottomLayers)
+  
+  // Find the top layers by reversing the bottom list
+  private val _topLayers:Map[String, Set[String]] = convertTupleListToMap(flipKeyValues(_bottomLayers.toList))
+  CaffeNetwork.LOG.info("Top layers:" + _topLayers)
+  
+  private val _layers: Map[String, CaffeLayer] = _caffeLayerParams.map(l => l.getName -> convertLayerParameterToCaffeLayer(l)).toMap
+  CaffeNetwork.LOG.info("Layers:" + _layers)
+  private val _layerIDs: Map[String, Int] = _layers.entrySet().map(x => x.getKey -> x.getValue.id).toMap
+  
+  
+  private def throwException(layerName:String) = throw new LanguageException("Layer with name " + layerName + " not found")                              
+  def getLayers(): List[String] =  _layerNames
+  def getCaffeLayer(layerName:String):CaffeLayer = if(checkKey(_layers, layerName)) _layers.get(layerName).get else throwException(layerName)
+  def getBottomLayers(layerName:String): Set[String] =  if(checkKey(_bottomLayers, layerName)) _bottomLayers.get(layerName).get else throwException(layerName)
+  def getTopLayers(layerName:String): Set[String] = if(checkKey(_topLayers, layerName)) _topLayers.get(layerName).get else throwException(layerName)
+  def getLayerID(layerName:String): Int = if(checkKey(_layerIDs, layerName))  _layerIDs.get(layerName).get else throwException(layerName)
+  
+  // Helper functions
+  private def checkKey(m:Map[String, Any], key:String): Boolean = {
+    if(m == null) throw new LanguageException("Map is null")
+    else if(key == null) throw new LanguageException("key is null")
+    else m.containsKey(key)
+  }
+  private def convertLayerParameterToCaffeLayer(param:LayerParameter):CaffeLayer = {
+    id = id + 1
+    param.getType.toLowerCase() match {
+      case "convolution" => new Convolution(param, id, this)
+      case "pooling" => if(param.getPoolingParam.getPool == PoolingParameter.PoolMethod.MAX)  new MaxPooling(param, id, this)
+                        else throw new LanguageException("Only maxpooling is supported:" + param.getPoolingParam.getPool.name)
+      case "innerproduct" => new InnerProduct(param, id, this)
+      case "relu" => new ReLU(param, id, this)
+      case "softmaxwithloss" => new SoftmaxWithLoss(param, id, this)
+      case "dropout" => new Dropout(param, id, this)
+      case "data" => new Data(param, id, this, numChannels, height, width)
+      case "batchnorm" => new BatchNorm(param, id, this)
+      case "scale" => new Scale(param, id, this)
+      case "eltwise" => new Elementwise(param, id, this)
+      case _ => throw new LanguageException("Layer of type " + param.getType + " is not supported")
+    }
+  }
+}
\ No newline at end of file