You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by ni...@apache.org on 2017/04/30 18:16:58 UTC

[3/3] incubator-systemml git commit: [SYSTEMML-769] Adding support for native BLAS for Linux

[SYSTEMML-769] Adding support for native BLAS for Linux

- Both MKL and OpenBLAS show 2-3x performance benefits on conv2d
  operators on Lenet.
- There are several OpenMP related issues on Mac, hence we have explicitly
  disabled native support for Mac and Windows. Since we have already have
  cmake setup in place, we can always support BLAS on Mac and Windows in
  future when OpenMP related issues are resolved.

Closes #344.


Project: http://git-wip-us.apache.org/repos/asf/incubator-systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-systemml/commit/39a37ae4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-systemml/tree/39a37ae4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-systemml/diff/39a37ae4

Branch: refs/heads/master
Commit: 39a37ae40c84d934236a7748252b196fb0a8b08d
Parents: 4b6d468
Author: Niketan Pansare <np...@us.ibm.com>
Authored: Sun Apr 30 10:16:23 2017 -0800
Committer: Niketan Pansare <np...@us.ibm.com>
Committed: Sun Apr 30 11:16:23 2017 -0700

----------------------------------------------------------------------
 conf/SystemML-config.xml.template               |   3 +
 docs/native-backend.md                          | 204 ++++++++++
 docs/troubleshooting-guide.md                   |   6 +-
 pom.xml                                         |   4 +
 src/assembly/source.xml                         |   1 +
 src/main/cpp/CMakeLists.txt                     |  76 ++++
 src/main/cpp/check-dependency-linux-x86_64.sh   |  45 +++
 src/main/cpp/cmake/FindMKL.cmake                | 132 +++++++
 src/main/cpp/cmake/FindOpenBLAS.cmake           |  79 ++++
 src/main/cpp/config.h.cmake                     |  30 ++
 .../cpp/lib/libpreload_systemml-Linux-x86_64.so | Bin 0 -> 7976 bytes
 .../cpp/lib/libsystemml_mkl-Linux-x86_64.so     | Bin 0 -> 27408 bytes
 .../lib/libsystemml_openblas-Linux-x86_64.so    | Bin 0 -> 27416 bytes
 src/main/cpp/libmatrixdnn.cpp                   | 333 ++++++++++++++++
 src/main/cpp/libmatrixdnn.h                     |  38 ++
 src/main/cpp/libmatrixmult.cpp                  |  63 +++
 src/main/cpp/libmatrixmult.h                    |  62 +++
 src/main/cpp/preload/preload_systemml.cpp       |  35 ++
 src/main/cpp/preload/preload_systemml.h         |  40 ++
 src/main/cpp/systemml.cpp                       | 225 +++++++++++
 src/main/cpp/systemml.h                         | 106 +++++
 .../java/org/apache/sysml/api/DMLScript.java    |   3 +-
 .../java/org/apache/sysml/conf/DMLConfig.java   |   4 +-
 .../org/apache/sysml/hops/ConvolutionOp.java    |   9 +-
 .../apache/sysml/lops/ConvolutionTransform.java |   5 +-
 .../instructions/CPInstructionParser.java       |   1 +
 .../instructions/GPUInstructionParser.java      |   1 -
 .../cp/AggregateBinaryCPInstruction.java        |  14 +-
 .../cp/ConvolutionCPInstruction.java            |  49 ++-
 .../gpu/ConvolutionGPUInstruction.java          |   9 +-
 .../spark/ConvolutionSPInstruction.java         |  21 +-
 .../matrix/data/ConvolutionParameters.java      |   2 +-
 .../runtime/matrix/data/LibMatrixCUDA.java      |  35 --
 .../sysml/runtime/matrix/data/LibMatrixDNN.java | 387 +++++++++++++------
 .../runtime/matrix/data/LibMatrixMult.java      |  13 +-
 .../runtime/matrix/data/LibMatrixNative.java    | 200 ++++++++++
 .../sysml/runtime/matrix/data/MatrixBlock.java  |  24 +-
 .../sysml/runtime/util/ConvolutionUtils.java    | 116 +++---
 .../apache/sysml/utils/EnvironmentHelper.java   |  27 ++
 .../org/apache/sysml/utils/NativeHelper.java    | 259 +++++++++++++
 .../java/org/apache/sysml/utils/Statistics.java |  19 +
 .../test/integration/AutomatedTestBase.java     |   5 +-
 .../functions/tensor/Conv2DTest.java            |  25 +-
 .../functions/tensor/Conv2DBackwardDataTest.R   |   6 +-
 .../functions/tensor/Conv2DBackwardDataTest.dml |   8 +-
 .../functions/tensor/Conv2DBackwardTest.R       |   6 +-
 .../functions/tensor/Conv2DBackwardTest.dml     |   8 +-
 src/test/scripts/functions/tensor/Conv2DTest.R  |   6 +-
 .../scripts/functions/tensor/Conv2DTest.dml     |   8 +-
 src/test/scripts/functions/tensor/PoolTest.R    |   3 +-
 src/test/scripts/functions/tensor/PoolTest.dml  |   4 +-
 51 files changed, 2469 insertions(+), 290 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/conf/SystemML-config.xml.template
----------------------------------------------------------------------
diff --git a/conf/SystemML-config.xml.template b/conf/SystemML-config.xml.template
index fe4437f..0ccf7da 100644
--- a/conf/SystemML-config.xml.template
+++ b/conf/SystemML-config.xml.template
@@ -65,6 +65,9 @@
    
    <!-- if codegen.enabled, compile literals as constants: 1..heuristic, 2..always -->
    <codegen.literals>1</codegen.literals>
+   
+   <!-- enables native blas for matrix multiplication and convolution, experimental feature -->
+   <native.blas>true</native.blas>
 
    <!-- prints extra statistics information for GPU -->
    <systemml.stats.extraGPU>false</systemml.stats.extraGPU>

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/docs/native-backend.md
----------------------------------------------------------------------
diff --git a/docs/native-backend.md b/docs/native-backend.md
new file mode 100644
index 0000000..86a1340
--- /dev/null
+++ b/docs/native-backend.md
@@ -0,0 +1,204 @@
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+# User Guide
+
+By default, SystemML implements all its matrix operations in Java.
+This simplifies deployment especially in a distributed environment.
+
+In some cases (such as deep learning), the user might want to use native BLAS
+rather than SystemML's internal Java library for performing single-node
+operations such matrix multiplication, convolution, etc.
+By default, SystemML will first attempt to use Intel MKL (if installed)
+and then OpenBLAS (if installed).
+If both Intel MKL and OpenBLAS are not available, SystemML
+falls back to its internal Java library.
+
+To force SystemML to use internal Java library rather than native BLAS,
+please set the configuration property `native.blas` to `false`.
+
+The current version of SystemML only supports BLAS on Linux machines.
+
+
+## Step 1: Install BLAS
+
+### Option 1: Install Intel MKL (recommended)
+
+Download and install the [community version of Intel MKL](https://software.intel.com/sites/campaigns/nest/).
+Intel requires you to first register your email address and then sends the download link to your email address
+with license key.
+
+* Linux users will have to extract the downloaded `.tgz` file, execute `install.sh` and follow the guided setup.
+
+### Option 2: Install OpenBLAS  
+
+```bash
+# The default OpenBLAS (via yum/apt-get) uses its internal threading rather than OpenMP, 
+# which can lead to performance degradation when using SystemML. So, instead we recommend that you
+# compile OpenBLAS from the source. 
+# RedHat / CentOS: sudo yum install openblas
+# Ubuntu: sudo apt-get install openblas
+git clone https://github.com/xianyi/OpenBLAS.git
+cd OpenBLAS/
+make clean
+make USE_OPENMP=1
+sudo make install
+# After installation, you may also want to add `/opt/OpenBLAS/lib` to your LD_LIBRARY_PATH or `java.library.path`.
+```
+
+You can check if the OpenBLAS on you system is compiled with OpenMP or not using following commands:
+
+```bash
+$ ldconfig -p | grep libopenblas.so
+libopenblas.so (libc6,x86-64) => /opt/OpenBLAS/lib/libopenblas.so
+$ ldd /opt/OpenBLAS/lib/libopenblas.so | grep libgomp
+libgomp.so.1 => /lib64/libgomp.so.1
+```
+
+If you don't see any output after the second command, then OpenBLAS installed on your system is using its internal threading.
+In this case, we highly recommend that you reinstall OpenBLAS using the above commands.
+
+
+## Step 2: Install other dependencies
+
+```bash
+# Centos/RedHat
+sudo yum install gcc-c++
+# Ubuntu
+sudo apt-get install g++ 
+```
+
+We also depend on GNU OpenMP (gomp) which will be installed by GCC.
+To find the location of `gomp` on your system, please use the command `ldconfig -p | grep libgomp`.
+If gomp is available as `/lib64/libgomp.so.1` instead of `/lib64/libgomp.so`,
+please add a softlink to it:
+
+```bash
+sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so
+```
+	
+## Step 3: Provide the location of the native libraries
+
+1. Add the location of the native libraries (i.e. BLAS and other dependencies) 
+to the environment variable `LD_LIBRARY_PATH` (on Linux). 
+If you want to use SystemML with Spark, please add the following line to `spark-env.sh`
+
+	```bash
+	export LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies
+	# Or export SPARK_LIBRARY_PATH=/path/to/blas-n-other-dependencies
+	```
+
+2. Alternatively, you can pass the location of the native libraries using command-line options:
+
+- Java: `-Djava.library.path=/path/to/blas-n-other-dependencies`
+- [Spark](http://spark.apache.org/docs/latest/configuration.html): `--driver-library-path`
+
+## Common issues on Linux
+
+1. Unable to load `gomp`
+
+First make sure if gomp is available on your system.
+
+	```bash
+	ldconfig -p | grep  libgomp
+	```
+
+If the above command returns no results, then you may have to install `gcc`.
+On the other hand, if the above command only returns libgomp with major suffix (such as `so.1`),
+then please execute the below command:
+
+	```bash
+	sudo ln -s /lib64/libgomp.so.1 /usr/lib64/libgomp.so
+	```
+
+2. Unable to load `mkl_rt`
+
+By default, Intel MKL libraries will be installed in the location `/opt/intel/mkl/lib/intel64/`.
+Make sure that this path is accessible to Java as per instructions provided in the above section.
+
+3. Unable to load `openblas`
+
+By default, OpenBLAS libraries will be installed in the location `/opt/OpenBLAS/lib/`.
+Make sure that this path is accessible to Java as per instructions provided in the above section.
+
+# Developer Guide
+
+This section describes how to compile shared libraries in the folder `src/main/cpp/lib`. 
+This is required when the developer makes changes to cpp directory or while validating the source package during the release process.
+
+To force SystemML to use OpenBLAS instead of Intel MKL if both are installed,
+please set the environment variable `SYSTEMML_BLAS` to `openblas`.
+This environment variable is used internally for testing and is not required for users.
+
+## Intro to CMake
+If you are familiar with cmake, skip this section.
+
+In a regular project with a Makefile, the compiled object files are placed in the same directory as the source.
+Sometimes we don't want to pollute the source tree. We might also want to have different binaries for different configurations. For instance, if we want to link a binary with separate libraries.
+CMake supports out of source tree builds. As an illustration, you can create a directory called "BUILD" and invoke cmake like so : `cmake <path/to/source>`. The makefile and other config files are placed in this "BUILD" directory. You can now say `make` and the compiled objects and binary files are created in this directory. You can then create another "BUILD2" directory and repeat the process.
+You can pass options to cmake as well. In this instance, it might be to specify whether to build with Intel MKL or OpenBLAS. This can be done from the command line with a "-D" appended to it, but more interestingly, it can also be done form a n-curses GUI which is invoked as `ccmake <path/to/source>`. (You may need to install this separately).
+Also, the C, C++ compilers and their flags are picked up by cmake when set in standard environment variables. These are respectively `CC`, `CXX`, `CFLAGS` & `CXFLAGS`. As an example, they may be specified as:
+
+	CXX=gcc-6 cmake ..
+
+For this project, I typically make a directory in the `cpp` folder (this folder) and name it the config I use. For instance, `INTEL` for Intel MKL and `OPENBLAS` for OpenBLAS.
+
+1. Install `g++`, OpenBLAS and MKL using the above instructions
+
+2. Set `JAVA_HOME` to JDK.
+
+	export JAVA_HOME=<path to JDK 1.8>
+
+3. Install cmake
+
+	```bash
+	# Centos/RedHat
+	sudo yum install cmake3
+	# Ubuntu
+	sudo apt-get install cmake
+	```
+
+4. Compile the libs using the below script. 
+
+	```bash
+	mkdir INTEL && cd INTEL
+	cmake -DUSE_INTEL_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DCMAKE_CXX_FLAGS="-DUSE_GNU_THREADING -m64" ..
+	make install
+	cd ..
+	mkdir OPENBLAS && cd OPENBLAS
+	cmake -DUSE_OPEN_BLAS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DCMAKE_CXX_FLAGS="-m64" ..
+	make install
+	cd ..
+	# The below script helps maintain this document as well as avoid accidental inclusion of non-standard dependencies.
+	./check-dependency-linux-x86_64.sh
+	```
+
+
+The generated library files are placed in src/main/cpp/lib. This location can be changed from the CMakeLists.txt file.
+
+The above script also validates whether additional dependencies have been added while compiling and warns the developer.  
+The current set of dependencies other than MKL and OpenBLAS, are as follows:
+
+- GNU Standard C++ Library: `libstdc++.so.6`
+- GCC version 4.8 shared support library: `libgcc_s.so.1`
+- The GNU libc libraries: `libm.so.6, libdl.so.2, libc.so.6, libpthread.so.0`
+- GCC OpenMP v3.0 shared support library: `libgomp.so.1`
+- Additional OpenBLAS dependencies: Fortran runtime (`libgfortran.so.3`) and GCC `__float128` shared support library (`libquadmath.so.0`)
+
+If CMake cannot detect your OpenBLAS installation, set the `OpenBLAS_HOME` environment variable to the OpenBLAS Home.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/docs/troubleshooting-guide.md
----------------------------------------------------------------------
diff --git a/docs/troubleshooting-guide.md b/docs/troubleshooting-guide.md
index 4731f51..e80cad4 100644
--- a/docs/troubleshooting-guide.md
+++ b/docs/troubleshooting-guide.md
@@ -135,4 +135,8 @@ by setting the configuration `systemml.stats.extraGPU` and `systemml.stats.extra
 
 Out-Of-Memory on executors is often caused due to side-effects of lazy evaluation and in-memory input data of Spark for large-scale problems. 
 Though we are constantly improving our optimizer to address this scenario, a quick hack to resolve this is reducing the number of cores allocated to the executor.
-We would highly appreciate if you file a bug report on our [issue tracker](https://issues.apache.org/jira/browse/SYSTEMML) if and when you encounter OOM.
\ No newline at end of file
+We would highly appreciate if you file a bug report on our [issue tracker](https://issues.apache.org/jira/browse/SYSTEMML) if and when you encounter OOM.
+
+## Native BLAS errors
+
+Please see [the user guide of native backend](http://apache.github.io/incubator-systemml/native-backend).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f4f6016..9a33853 100644
--- a/pom.xml
+++ b/pom.xml
@@ -111,6 +111,10 @@
 			</excludes>
 			<targetPath>kernels</targetPath>
 		</resource>
+		<resource>
+			<directory>src/main/cpp/lib</directory>
+			<targetPath>lib</targetPath>
+		</resource>
 	</resources>
 
 		<plugins>

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/assembly/source.xml
----------------------------------------------------------------------
diff --git a/src/assembly/source.xml b/src/assembly/source.xml
index 8831dd9..4acb5b3 100644
--- a/src/assembly/source.xml
+++ b/src/assembly/source.xml
@@ -51,6 +51,7 @@
 				<exclude>**/temp/**/*</exclude>
 				<!--SYSTEML-479 -->
 				<exclude>./src/test/config/hadoop_bin_windows/**</exclude>
+				<exclude>./src/main/cpp/lib/**</exclude>
 			</excludes>
 		</fileSet>
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/src/main/cpp/CMakeLists.txt b/src/main/cpp/CMakeLists.txt
new file mode 100644
index 0000000..c492959
--- /dev/null
+++ b/src/main/cpp/CMakeLists.txt
@@ -0,0 +1,76 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+cmake_minimum_required(VERSION 2.8)
+
+project (systemml)
+
+# All custom find modules
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
+
+# OpenMP is required
+find_package(OpenMP REQUIRED)
+
+# Options to Use OpenBLAS or Intel MKL
+option(USE_OPEN_BLAS "Whether to use OpenBLAS (Defaults to compiling with Intel MKL, if both set, MKL has priority)" OFF)
+option(USE_INTEL_MKL "Whether to use Intel MKL (Defaults to compiling with Intel MKL)" ON)
+
+# Build a shared libraray
+add_library(systemml SHARED libmatrixdnn.cpp  libmatrixmult.cpp  systemml.cpp)
+add_library(preload SHARED preload/preload_systemml.cpp)
+
+set(MATH_LIBRARIES "")
+
+# sets the installation path to src/main/cpp/lib
+set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR})
+install(TARGETS systemml preload LIBRARY DESTINATION lib)
+
+set(CMAKE_BUILD_TYPE Release)
+
+set_target_properties(preload PROPERTIES OUTPUT_NAME "preload_systemml-${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_PROCESSOR}")
+
+if (USE_OPEN_BLAS)
+  find_package(OpenBLAS REQUIRED)
+  # sets the name of the output to include the os and the architecture
+  set_target_properties(systemml PROPERTIES OUTPUT_NAME "systemml_openblas-${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_PROCESSOR}")
+  include_directories(${OpenBLAS_INCLUDE_DIR})
+  set(MATH_LIBRARIES "${OpenBLAS_LIB}")
+elseif(USE_INTEL_MKL)
+  find_package(MKL REQUIRED)
+  # sets the name of the output to include the os and the architecture
+  set_target_properties(systemml PROPERTIES OUTPUT_NAME "systemml_mkl-${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_PROCESSOR}")
+  include_directories(${MKL_INCLUDE_DIR})
+  set(MATH_LIBRARIES "${MKL_LIBRARIES}")
+endif()
+
+# Option written to a config.h file
+configure_file(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+# Include directories. (added for Linux & Darwin, fix later for windows)
+# include paths can be spurious
+include_directories($ENV{JAVA_HOME}/include/)
+include_directories($ENV{JAVA_HOME}/include/darwin)
+include_directories($ENV{JAVA_HOME}/include/linux)
+include_directories($ENV{JAVA_HOME}/include/win32)
+
+# Include the binary dir which contains "config.h"
+include_directories(${CMAKE_BINARY_DIR})
+
+
+# Setting CXX compiler flags
+set_target_properties(systemml PROPERTIES LINK_FLAGS "${OpenMP_CXX_FLAGS} ${MATH_LIBRARIES}")

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/check-dependency-linux-x86_64.sh
----------------------------------------------------------------------
diff --git a/src/main/cpp/check-dependency-linux-x86_64.sh b/src/main/cpp/check-dependency-linux-x86_64.sh
new file mode 100755
index 0000000..40f0bb0
--- /dev/null
+++ b/src/main/cpp/check-dependency-linux-x86_64.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+# This shell script compiles the required shared libraries for 64-bit Linux on x86 machine
+
+# yum whatprovides libgcc_s.so.1
+# GNU Standard C++ Library: libstdc++.so.6
+# GCC version 4.8 shared support library: libgcc_s.so.1
+# The GNU libc libraries: libm.so.6, libdl.so.2, libc.so.6, libpthread.so.0
+# GCC OpenMP v3.0 shared support library: libgomp.so.1
+gcc_toolkit="libgcc_s.so\|libm.so\|libstdc++\|libc.so\|libdl.so\|libgomp.so\|libpthread.so"
+linux_loader="linux-vdso.so\|ld-linux-x86-64.so"
+intel_mkl="libmkl_rt.so"
+
+# Fortran runtime: libgfortran.so.3
+# GCC __float128 shared support library: libquadmath.so.0
+openblas="libopenblas.so\|libgfortran.so\|libquadmath.so"
+
+echo "-----------------------------------------------------------------------"
+echo "Check for unexpected dependencies added after code change or new setup:"
+echo "Non-standard dependencies for libpreload_systemml-linux-x86_64.so"
+ldd lib/libpreload_systemml-Linux-x86_64.so | grep -v $gcc_toolkit"\|"$linux_loader
+echo "Non-standard dependencies for libsystemml_mkl-linux-x86_64.so"
+ldd lib/libsystemml_mkl-Linux-x86_64.so | grep -v $gcc_toolkit"\|"$linux_loader"\|"$intel_mkl
+echo "Non-standard dependencies for libsystemml_openblas-linux-x86_64.so"
+ldd lib/libsystemml_openblas-Linux-x86_64.so | grep -v $gcc_toolkit"\|"$linux_loader"\|"$openblas
+echo "-----------------------------------------------------------------------"

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/cmake/FindMKL.cmake
----------------------------------------------------------------------
diff --git a/src/main/cpp/cmake/FindMKL.cmake b/src/main/cpp/cmake/FindMKL.cmake
new file mode 100644
index 0000000..f2a50eb
--- /dev/null
+++ b/src/main/cpp/cmake/FindMKL.cmake
@@ -0,0 +1,132 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# Find the MKL libraries
+#
+# Options:
+#
+#   MKL_USE_SINGLE_DYNAMIC_LIBRARY  : use single dynamic library interface
+#   MKL_USE_STATIC_LIBS             : use static libraries
+#   MKL_MULTI_THREADED              : use multi-threading
+#
+# This module defines the following variables:
+#
+#   MKL_FOUND            : True mkl is found
+#   MKL_INCLUDE_DIR      : unclude directory
+#   MKL_LIBRARIES        : the libraries to link against.
+
+
+# ---[ Options
+option(MKL_USE_SINGLE_DYNAMIC_LIBRARY "Use single dynamic library interface" ON)
+option(MKL_USE_STATIC_LIBS "Use static libraries" OFF)
+option(MKL_MULTI_THREADED  "Use multi-threading"   ON)
+
+# ---[ Root folders
+set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs")
+find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKLROOT} ${INTEL_ROOT}/mkl
+                                   DOC "Folder contains MKL")
+
+# ---[ Find include dir
+find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT} PATH_SUFFIXES include)
+set(__looked_for MKL_INCLUDE_DIR)
+
+# ---[ Find libraries
+if(CMAKE_SIZEOF_VOID_P EQUAL 4)
+  set(__path_suffixes lib lib/ia32)
+else()
+  set(__path_suffixes lib lib/intel64)
+endif()
+
+set(__mkl_libs "")
+if(MKL_USE_SINGLE_DYNAMIC_LIBRARY)
+  list(APPEND __mkl_libs rt)
+else()
+  if(CMAKE_SIZEOF_VOID_P EQUAL 4)
+    if(WIN32)
+      list(APPEND __mkl_libs intel_c)
+    else()
+      list(APPEND __mkl_libs intel gf)
+    endif()
+  else()
+    list(APPEND __mkl_libs intel_lp64 gf_lp64)
+  endif()
+
+  if(MKL_MULTI_THREADED)
+    list(APPEND __mkl_libs intel_thread)
+  else()
+     list(APPEND __mkl_libs sequential)
+  endif()
+
+  list(APPEND __mkl_libs core cdft_core)
+endif()
+
+
+foreach (__lib ${__mkl_libs})
+  set(__mkl_lib "mkl_${__lib}")
+  string(TOUPPER ${__mkl_lib} __mkl_lib_upper)
+
+  if(MKL_USE_STATIC_LIBS)
+    set(__mkl_lib "lib${__mkl_lib}.a")
+  endif()
+
+  find_library(${__mkl_lib_upper}_LIBRARY
+        NAMES ${__mkl_lib}
+        PATHS ${MKL_ROOT} "${MKL_INCLUDE_DIR}/.."
+        PATH_SUFFIXES ${__path_suffixes}
+        DOC "The path to Intel(R) MKL ${__mkl_lib} library")
+  mark_as_advanced(${__mkl_lib_upper}_LIBRARY)
+
+  list(APPEND __looked_for ${__mkl_lib_upper}_LIBRARY)
+  list(APPEND MKL_LIBRARIES ${${__mkl_lib_upper}_LIBRARY})
+endforeach()
+
+
+if(NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY)
+  if (MKL_USE_STATIC_LIBS)
+    set(__iomp5_libs iomp5 libiomp5mt.lib)
+  else()
+    set(__iomp5_libs iomp5 libiomp5md.lib)
+  endif()
+
+  if(WIN32)
+    find_path(INTEL_INCLUDE_DIR omp.h PATHS ${INTEL_ROOT} PATH_SUFFIXES include)
+    list(APPEND __looked_for INTEL_INCLUDE_DIR)
+  endif()
+
+  find_library(MKL_RTL_LIBRARY ${__iomp5_libs}
+     PATHS ${INTEL_RTL_ROOT} ${INTEL_ROOT}/compiler ${MKL_ROOT}/.. ${MKL_ROOT}/../compiler
+     PATH_SUFFIXES ${__path_suffixes}
+     DOC "Path to Path to OpenMP runtime library")
+
+  list(APPEND __looked_for MKL_RTL_LIBRARY)
+  list(APPEND MKL_LIBRARIES ${MKL_RTL_LIBRARY})
+endif()
+
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(MKL DEFAULT_MSG ${__looked_for})
+
+if(MKL_FOUND)
+  message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}")
+endif()
+
+unset(__looked_for)
+unset(__mkl_libs)
+unset(__path_suffixes)
+unset(__lib_suffix)
+unset(__iomp5_libs)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/cmake/FindOpenBLAS.cmake
----------------------------------------------------------------------
diff --git a/src/main/cpp/cmake/FindOpenBLAS.cmake b/src/main/cpp/cmake/FindOpenBLAS.cmake
new file mode 100644
index 0000000..bd6f2aa
--- /dev/null
+++ b/src/main/cpp/cmake/FindOpenBLAS.cmake
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+SET(Open_BLAS_INCLUDE_SEARCH_PATHS
+  /usr/include
+  /usr/include/openblas
+  /usr/include/openblas-base
+  /usr/local/include
+  /usr/local/include/openblas
+  /usr/local/include/openblas-base
+  /opt/OpenBLAS/include
+  $ENV{OpenBLAS_HOME}
+  $ENV{OpenBLAS_HOME}/include
+)
+
+SET(Open_BLAS_LIB_SEARCH_PATHS
+        /lib/
+        /lib/openblas-base
+        /lib64/
+        /usr/lib
+        /usr/lib/openblas-base
+        /usr/lib64
+        /usr/local/lib
+        /usr/local/lib64
+        /opt/OpenBLAS/lib
+        $ENV{OpenBLAS}cd
+        $ENV{OpenBLAS}/lib
+        $ENV{OpenBLAS_HOME}
+        $ENV{OpenBLAS_HOME}/lib
+ )
+
+FIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS})
+FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS})
+
+SET(OpenBLAS_FOUND ON)
+
+#    Check include files
+IF(NOT OpenBLAS_INCLUDE_DIR)
+    SET(OpenBLAS_FOUND OFF)
+    MESSAGE(STATUS "Could not find OpenBLAS include. Turning OpenBLAS_FOUND off")
+ENDIF()
+
+#    Check libraries
+IF(NOT OpenBLAS_LIB)
+    SET(OpenBLAS_FOUND OFF)
+    MESSAGE(STATUS "Could not find OpenBLAS lib. Turning OpenBLAS_FOUND off")
+ENDIF()
+
+IF (OpenBLAS_FOUND)
+  IF (NOT OpenBLAS_FIND_QUIETLY)
+    MESSAGE(STATUS "Found OpenBLAS libraries: ${OpenBLAS_LIB}")
+    MESSAGE(STATUS "Found OpenBLAS include: ${OpenBLAS_INCLUDE_DIR}")
+  ENDIF (NOT OpenBLAS_FIND_QUIETLY)
+ELSE (OpenBLAS_FOUND)
+  IF (OpenBLAS_FIND_REQUIRED)
+    MESSAGE(FATAL_ERROR "Could not find OpenBLAS")
+  ENDIF (OpenBLAS_FIND_REQUIRED)
+ENDIF (OpenBLAS_FOUND)
+
+MARK_AS_ADVANCED(
+    OpenBLAS_INCLUDE_DIR
+    OpenBLAS_LIB
+    OpenBLAS
+)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/config.h.cmake
----------------------------------------------------------------------
diff --git a/src/main/cpp/config.h.cmake b/src/main/cpp/config.h.cmake
new file mode 100644
index 0000000..e3f9407
--- /dev/null
+++ b/src/main/cpp/config.h.cmake
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef CONFIG_H
+#define CONFIG_H
+
+/* Whether to use Open Blas */
+#cmakedefine USE_OPEN_BLAS
+
+/* Whether to use Intel MKL */
+#cmakedefine USE_INTEL_MKL
+
+
+#endif // CONFIG_H

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/lib/libpreload_systemml-Linux-x86_64.so
----------------------------------------------------------------------
diff --git a/src/main/cpp/lib/libpreload_systemml-Linux-x86_64.so b/src/main/cpp/lib/libpreload_systemml-Linux-x86_64.so
new file mode 100755
index 0000000..07e89be
Binary files /dev/null and b/src/main/cpp/lib/libpreload_systemml-Linux-x86_64.so differ

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/lib/libsystemml_mkl-Linux-x86_64.so
----------------------------------------------------------------------
diff --git a/src/main/cpp/lib/libsystemml_mkl-Linux-x86_64.so b/src/main/cpp/lib/libsystemml_mkl-Linux-x86_64.so
new file mode 100755
index 0000000..0a6427a
Binary files /dev/null and b/src/main/cpp/lib/libsystemml_mkl-Linux-x86_64.so differ

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/lib/libsystemml_openblas-Linux-x86_64.so
----------------------------------------------------------------------
diff --git a/src/main/cpp/lib/libsystemml_openblas-Linux-x86_64.so b/src/main/cpp/lib/libsystemml_openblas-Linux-x86_64.so
new file mode 100755
index 0000000..ffdcd5a
Binary files /dev/null and b/src/main/cpp/lib/libsystemml_openblas-Linux-x86_64.so differ

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/libmatrixdnn.cpp
----------------------------------------------------------------------
diff --git a/src/main/cpp/libmatrixdnn.cpp b/src/main/cpp/libmatrixdnn.cpp
new file mode 100644
index 0000000..a521804
--- /dev/null
+++ b/src/main/cpp/libmatrixdnn.cpp
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "config.h"
+#include "libmatrixmult.h"
+#include "libmatrixdnn.h"
+#include <cstdlib>
+#include <iostream>
+#include <cstdio>
+#include <cmath>
+#include <cstring>
+#include "omp.h"
+
+void rotate180(double* inputArray, double* outputArray, int N, int C, int H, int W,
+            int K, int R, int S, int stride_h, int stride_w, int pad_h,
+            int pad_w, int P, int Q) {
+    int PQ = P*Q;
+    int KQ = K*Q;
+	for (int k = 0; k < K; k++) {
+		for (int p = 0; p < P; p++) {
+			for (int q = 0; q < Q; q++) {
+				outputArray[p*KQ + q*K + k] = inputArray[k*PQ + p*Q + q];
+			}
+		}
+	}
+}
+
+void col2im(double* inputArray, double* outputArray, int N, int C, int H, int W,
+            int K, int R, int S, int stride_h, int stride_w, int pad_h,
+            int pad_w, int P, int Q) {
+	for (int p = 0; p < P; p++) {
+		// h = p*stride_h + r - pad_h
+		//   = r + hOffset
+		// Based on restrictions: h >= 0 and r >= 0 and h < H and r < R, we get
+		// max(0, - hOffset) <= r < min(R, H - hOffset)
+		int hOffset = p*stride_h - pad_h;
+		int rStart = MAX(0, - hOffset);
+		int rEnd = MIN(R, H - hOffset);
+		for (int q = 0; q < Q; q++) {
+			// Using the same logic as above on following:
+			// w = q*stride_w + s - pad_w
+			int wOffset = q*stride_w - pad_w;
+			int sStart = MAX(0, - wOffset);
+			int sEnd = MIN(S, W - wOffset);
+			int tempOffset = (p*Q + q)*C*R*S;
+			for (int c = 0; c < C; c++) {
+				int outOffset = c*H*W;
+				int inputOffset = tempOffset + c*R*S;
+				for (int r = rStart; r < rEnd; r++) {
+					for (int s = sStart; s < sEnd; s++) {
+						int inputIndex = inputOffset + r*S + s;
+						int outIndex = outOffset + (hOffset + r)*W + wOffset + s;
+						outputArray[outIndex] += inputArray[inputIndex];
+					}
+				}
+			}
+		}
+	}
+}
+
+void im2col(double* inputArray, double* outputArray, int N, int C, int H, int W,
+            int K, int R, int S, int stride_h, int stride_w, int pad_h,
+            int pad_w, int P, int Q) {
+  int CRS = C * R * S;
+  std::size_t size = Q * sizeof(double);
+  if (stride_h == 1 && stride_w == 1 && pad_h == 0 && pad_w == 0) {
+    for (int c = 0; c < CRS; ++c) {
+      int wOffset = c % S;
+      int hOffset = (c / S) % R;
+      int cInput = c / R / S;
+      for (int h = 0; h < P; ++h) {
+        int hPadded = h + hOffset;
+        int outOffset = (c * P + h) * Q;
+        int inputOffset = (cInput * H + hPadded) * W;
+        std::memcpy(outputArray + outOffset, inputArray + inputOffset + wOffset,
+                    size);
+        int w = Q - 1;
+        int wPadded = w + wOffset;
+        if (hPadded < H && wPadded < W)
+          outputArray[outOffset + w] = inputArray[inputOffset + wPadded];
+        else
+          outputArray[outOffset + w] = 0;
+      }
+    }
+  } else {
+    for (int c = 0; c < CRS; ++c) {
+      int wOffset = c % S;
+      int hOffset = (c / S) % R;
+      int cInput = c / R / S;
+      for (int h = 0; h < P; ++h) {
+        int outOffset = (c * P + h) * Q;
+        int hPadded = h * stride_h - pad_h + hOffset;
+        int inputOffset = (cInput * H + hPadded) * W;
+        if (hPadded < 0 || hPadded >= H) {
+          std::memset(outputArray + outOffset, 0, size);
+        } else {
+          for (int w = 0; w < Q; ++w) {
+            int wPadded = w * stride_w - pad_w + wOffset;
+            if (wPadded >= 0 && wPadded < W)
+              outputArray[outOffset + w] = inputArray[inputOffset + wPadded];
+            else
+              outputArray[outOffset + w] = 0;
+          }
+        }
+      }
+    }
+  }
+} 
+
+
+void conv2dBackwardFilterDense(double* inputPtr, double* doutPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads) {
+  // First step: Avoids oversubscription and other openmp/internal blas threading issues
+  setNumThreadsForBLAS(1);
+  
+  int CHW = C * H * W;
+  int CRS = C*R*S;
+  int PQ = P*Q;
+  int KPQ = K*PQ;
+  int numRotatedElem = KPQ;
+  int numIm2ColElem = CRS * PQ;
+  int numTempElem = CRS * K;
+  
+  int m1 = CRS;
+  int n1 = K;
+  int k1 = PQ;
+  
+  // Allocate temporary data structures used in parallel for
+  int numOpenMPThreads = MIN(numThreads, N);
+  double* temp = new double[numTempElem*numOpenMPThreads];
+  std::memset(temp, 0, numTempElem*numOpenMPThreads*sizeof(double));
+  double* rotatedDoutPtrArrays = new double[numRotatedElem*numOpenMPThreads];
+  double* loweredMatArrays = new double[numIm2ColElem*numOpenMPThreads];
+
+#pragma omp parallel for num_threads(numOpenMPThreads)
+  for (int n = 0; n < N; n++) {
+  	double* loweredMat = loweredMatArrays + numIm2ColElem*omp_get_thread_num();
+
+    // Step 1: Perform im2col
+    im2col(inputPtr + n * CHW, loweredMat, 1, C, H, W, K,
+           R, S, stride_h, stride_w, pad_h, pad_w,
+           P, Q);
+           
+    // Step 2: Rotate dout
+    double* rotatedDoutPtr = rotatedDoutPtrArrays + numRotatedElem*omp_get_thread_num();
+    rotate180(doutPtr + n * KPQ, rotatedDoutPtr, 1, C, H, W, K,
+           R, S, stride_h, stride_w, pad_h, pad_w,
+           P, Q);
+    
+    // Multiply to get CRS X K
+    double* temp1 = temp + numTempElem*omp_get_thread_num();
+    // Step 3: loweredMat (CRS X PQ) %*% rotated_dout (PQ X K) 
+    matmult(loweredMat, rotatedDoutPtr, temp1, C * R * S, P * Q, K, 1);
+              
+  } // end omp parallel for
+  
+  // Inplace transpose addition
+  int numRow = CRS;
+  for(int t = 0; t < numOpenMPThreads; t++) {
+  	int iter = 0;
+  	double* temp1 = temp + numTempElem*t;
+	for(int i = 0; i < CRS; i++) {
+		for(int j = 0; j < K; j++, iter++) {
+			int index = j*numRow+i;
+			retPtr[index] += temp1[iter];
+		}
+	}
+  }
+  
+  delete [] temp;
+  delete [] loweredMatArrays;
+  delete [] rotatedDoutPtrArrays;
+}
+
+void conv2dBackwardDataDense(double* filterPtr, double* doutPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads) {
+   // First step: Avoids oversubscription and other openmp/internal blas threading issues
+  setNumThreadsForBLAS(1);
+  
+  int CRS = C * R * S;
+  int CHW = C * H * W;
+  int PQ = P * Q;
+  int KPQ = K * PQ;
+  int numRotatedElem = PQ * K;
+  int numCol2ImElem = PQ * CRS;
+
+  // Allocate temporary data structures used in parallel for
+  int numOpenMPThreads = MIN(numThreads, N);
+  double* rotatedDoutPtrArrays = new double[numRotatedElem*numOpenMPThreads];
+  double* col2imInputArrays = new double[numCol2ImElem*numOpenMPThreads];
+
+#pragma omp parallel for num_threads(numOpenMPThreads)
+  for (int n = 0; n < N; n++) {
+    // Step 1: Rotate dout
+    double* rotatedDoutPtr = rotatedDoutPtrArrays + numRotatedElem*omp_get_thread_num();
+    rotate180(doutPtr + n * KPQ, rotatedDoutPtr, 1, C, H, W, K,
+           R, S, stride_h, stride_w, pad_h, pad_w,
+           P, Q);
+
+    // Step 2: t(rotatedDout (PQ X K) %*% filter (K X CRS))
+    double* col2imInput = col2imInputArrays + numCol2ImElem*omp_get_thread_num();
+    matmult(rotatedDoutPtr, filterPtr, col2imInput,
+            PQ, K, CRS, 1);
+
+    // Step 3: Perform col2im
+    col2im(col2imInput, retPtr + n * CHW, 1, C, H, W, K,
+           R, S, stride_h, stride_w, pad_h, pad_w,
+           P, Q);
+
+  } // end omp parallel for
+  
+  delete [] rotatedDoutPtrArrays;
+  delete [] col2imInputArrays;
+    
+}
+
+void conv2dSparse(int apos, int alen, int* aix, double* avals, double* filterPtr, double* retPtr, int N, int C, int H, int W, 
+			int K, int R, int S, int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads) {
+	setNumThreadsForBLAS(1);
+	double* loweredMat = new double[C * R * S * P * Q];
+	
+	// Step 1: Perform im2col
+	double* temp = new double[C * H * W];
+	std::size_t size = C * H * W * sizeof(double);
+	std::memset(temp, 0, size);
+	for(int j=apos; j<apos+alen; j++)
+		temp[ aix[j] ] = avals[j];
+	im2col(temp, loweredMat, 1, C, H, W, K,
+       R, S, stride_h, stride_w, pad_h, pad_w,
+       P, Q);	
+	delete [] temp;
+	
+	// Step 2: filter (K X CRS) %*% loweredMat (CRS X PQ)
+    matmult(filterPtr, loweredMat, retPtr, K, C * R * S, P * Q, 1);
+    
+	delete [] loweredMat;
+}
+
+void conv2dBackwardFilterSparseDense(int apos, int alen, int* aix, double* avals, double* rotatedDoutPtr, double* retPtr, int N, int C, int H, int W, 
+			int K, int R, int S, int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads) {
+	setNumThreadsForBLAS(1);
+	int CHW = C * H * W;
+	int CRS = C*R*S;
+	int PQ = P*Q;
+	int KPQ = K*PQ;
+	int m1 = CRS;
+	int n1 = K;
+	int k1 = PQ;
+	
+	double* loweredMat = new double[CRS * PQ];
+	
+	// Step 1: Perform im2col
+	double* temp = new double[C * H * W];
+	std::size_t size = C * H * W * sizeof(double);
+	std::memset(temp, 0, size);
+	for(int j=apos; j<apos+alen; j++)
+		temp[ aix[j] ] = avals[j];
+	im2col(temp, loweredMat, 1, C, H, W, K,
+       R, S, stride_h, stride_w, pad_h, pad_w,
+       P, Q);
+    delete [] temp;
+	
+	// Multiply to get CRS X K
+	double* temp1 = new double[CRS * K];
+	// Step 3: loweredMat (CRS X PQ) %*% rotatedDoutPtr (PQ X K) 
+    matmult(loweredMat, rotatedDoutPtr, temp1, C * R * S, P * Q, K, 1);
+    delete [] loweredMat;
+     
+    // Inplace addition
+    for(int iter = 0; iter<K*CRS; iter++) {
+    	retPtr[iter] += temp1[iter];
+    }
+    
+	delete [] temp1;
+}
+
+void conv2dBiasAddDense(double* inputPtr, double* biasPtr, double* filterPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, bool addBias, int numThreads) {
+  // First step:  Avoids oversubscription and other openmp/internal blas threading issues
+  setNumThreadsForBLAS(1);
+  
+  int CHW = C * H * W;
+  int KPQ = K * P * Q;
+  int PQ = P * Q;
+  int numIm2ColElem = C * R * S * P * Q;
+  
+  // Allocate temporary data structures used in parallel for
+  int numOpenMPThreads = MIN(numThreads, N);
+  double* loweredMatArrays = new double[numIm2ColElem*numOpenMPThreads];
+  
+#pragma omp parallel for num_threads(numOpenMPThreads)
+  for (int n = 0; n < N; n++) {
+    double* loweredMat = loweredMatArrays + numIm2ColElem*omp_get_thread_num();
+
+    // Step 1: Perform im2col
+    im2col(inputPtr + n * CHW, loweredMat, 1, C, H, W, K,
+           R, S, stride_h, stride_w, pad_h, pad_w,
+           P, Q);
+
+    // Step 2: filter (K X CRS) %*% loweredMat (CRS X PQ)
+    matmult(filterPtr, loweredMat, retPtr + n * KPQ, K,
+            C * R * S, P * Q, 1);
+    
+    // Step 3: Add bias
+    if(addBias) {
+	    double* outputArr = retPtr + n*KPQ;
+	    int index = 0;
+		for(int k = 0; k < K; k++) {
+			for(int pq = 0; pq < PQ; pq++, index++) {
+				outputArr[index] += biasPtr[k];
+			}
+		}
+    }
+  } // end omp parallel for
+  
+  delete [] loweredMatArrays;
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/libmatrixdnn.h
----------------------------------------------------------------------
diff --git a/src/main/cpp/libmatrixdnn.h b/src/main/cpp/libmatrixdnn.h
new file mode 100644
index 0000000..bf6c113
--- /dev/null
+++ b/src/main/cpp/libmatrixdnn.h
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+ 
+#ifndef _libmatrixdnn_h
+#define _libmatrixdnn_h
+
+void conv2dBackwardFilterDense(double* inputPtr, double* doutPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads);
+
+void conv2dBackwardDataDense(double* filterPtr, double* doutPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads);
+    
+void conv2dBiasAddDense(double* inputPtr, double* biasPtr, double* filterPtr, double* retPtr, int N, int C, int H, int W, int K, int R, int S,
+    int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, bool addBias, int numThreads);
+    
+void conv2dSparse(int apos, int alen, int* aix, double* avals, double* filter, double* ret, int N, int C, int H, int W, 
+			int K, int R, int S, int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads);
+
+void conv2dBackwardFilterSparseDense(int apos, int alen, int* aix, double* avals, double* rotatedDoutPtr, double* ret, int N, int C, int H, int W, 
+			int K, int R, int S, int stride_h, int stride_w, int pad_h, int pad_w, int P, int Q, int numThreads);
+			
+#endif
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/libmatrixmult.cpp
----------------------------------------------------------------------
diff --git a/src/main/cpp/libmatrixmult.cpp b/src/main/cpp/libmatrixmult.cpp
new file mode 100644
index 0000000..6844c2a
--- /dev/null
+++ b/src/main/cpp/libmatrixmult.cpp
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "config.h"
+#include "libmatrixmult.h"
+#include <cstdlib>
+#include "omp.h"
+#include <cmath>
+
+int SYSML_CURRENT_NUM_THREADS = -1;
+void setNumThreadsForBLAS(int numThreads) {
+	if(SYSML_CURRENT_NUM_THREADS != numThreads) {
+#ifdef USE_OPEN_BLAS
+		openblas_set_num_threads(numThreads);
+#else
+		mkl_set_num_threads(numThreads);
+#endif
+	    SYSML_CURRENT_NUM_THREADS = numThreads;
+	}
+}
+ 
+// Multiplies two matrices m1Ptr and m2Ptr in row-major format of shape
+// (m1rlen, m1clen) and (m1clen, m2clen)
+void matmult(double* m1Ptr, double* m2Ptr, double* retPtr, int m1rlen,
+             int m1clen, int m2clen, int numThreads) {
+  int m = m1rlen;
+  int n = m2clen;
+  int k = m1clen;
+  
+  setNumThreadsForBLAS(numThreads);
+  
+  // if(m2clen == 1)
+  // 	cblas_dgemv(CblasRowMajor, CblasNoTrans, m1rlen, m1clen, 1, m1Ptr, m1clen, m2Ptr, 1, 0, retPtr, 1);
+  // else 
+  	cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1, m1Ptr, k, m2Ptr, n, 0, retPtr, n);
+}
+
+void tsmm(double* m1Ptr, double* retPtr, int m1rlen, int m1clen, bool isLeftTranspose, int numThreads) {
+  int m = isLeftTranspose ? m1clen : m1rlen;
+  int n = isLeftTranspose ? m1clen : m1rlen;
+  int k = isLeftTranspose ? m1rlen : m1clen;
+  
+  setNumThreadsForBLAS(numThreads);
+  
+  cblas_dgemm(CblasRowMajor, isLeftTranspose ? CblasTrans : CblasNoTrans, isLeftTranspose ? CblasNoTrans : CblasTrans, m, n, k, 1, m1Ptr, k, m1Ptr, n, 0, retPtr, n);
+}
+ 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/libmatrixmult.h
----------------------------------------------------------------------
diff --git a/src/main/cpp/libmatrixmult.h b/src/main/cpp/libmatrixmult.h
new file mode 100644
index 0000000..d39242e
--- /dev/null
+++ b/src/main/cpp/libmatrixmult.h
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+ 
+#ifndef _libmatrixmult_h
+#define _libmatrixmult_h
+
+#define MAX(x, y) (((x) > (y)) ? (x) : (y))
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+
+// *****************************************************************
+// We support Intel MKL (recommended) or OpenBLAS.
+// These flags are used for conditional compilation with mkl and openblas
+// #define USE_INTEL_MKL
+// #define USE_GNU_THREADING
+// #define USE_OPEN_BLAS
+// *****************************************************************
+
+//#ifdef __cplusplus
+//extern "C" {
+//#endif
+//#ifdef __cplusplus
+//}
+//#endif
+
+// Since we call cblas_dgemm in openmp for loop,
+// we call "extension" APIs for setting number of threads of the given API.
+// For example: for OpenBLAS we use openblas_set_num_threads and  
+// for MKL we use mkl_set_num_threads. This avoids performance degradation due to overprovisioning.
+#ifdef USE_OPEN_BLAS
+#include <cblas.h>
+// extern "C" void openblas_set_num_threads(int numThreads);
+#elif defined USE_INTEL_MKL
+#include <mkl.h>
+#include <mkl_service.h>
+#endif
+
+void setNumThreadsForBLAS(int numThreads);
+
+// Multiplies two matrices m1Ptr and m2Ptr in row-major format of shape
+// (m1rlen, m1clen) and (m1clen, m2clen)
+void matmult(double* m1Ptr, double* m2Ptr, double* retPtr, int m1rlen,
+             int m1clen, int m2clen, int numThreads);
+             
+void tsmm(double* m1Ptr, double* retPtr, int m1rlen, int m1clen, bool isLeftTranspose,  int numThreads);
+             
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/preload/preload_systemml.cpp
----------------------------------------------------------------------
diff --git a/src/main/cpp/preload/preload_systemml.cpp b/src/main/cpp/preload/preload_systemml.cpp
new file mode 100644
index 0000000..6ee20e0
--- /dev/null
+++ b/src/main/cpp/preload/preload_systemml.cpp
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "preload_systemml.h" 
+#include <cstdlib>
+ 
+//  g++ -o libpreload_systemml-linux-x86_64.so preload_systemml.cpp  -I$JAVA_HOME/include -I$JAVA_HOME/include/linux -lm -ldl -O3 -shared -fPIC
+JNIEXPORT void JNICALL Java_org_apache_sysml_utils_EnvironmentHelper_setEnv(JNIEnv * env, jclass c, jstring jname, jstring jvalue) {
+	const char* name = (env)->GetStringUTFChars(jname, NULL);
+    	const char* value = (env)->GetStringUTFChars(jvalue,NULL);
+#if defined _WIN32 || defined _WIN64 
+	_putenv_s(name, value);
+#else 
+	setenv(name, value, 1);
+#endif
+	(env)->ReleaseStringUTFChars(jname, name); 
+    	(env)->ReleaseStringUTFChars(jvalue, value);
+}
+ 
+ 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/preload/preload_systemml.h
----------------------------------------------------------------------
diff --git a/src/main/cpp/preload/preload_systemml.h b/src/main/cpp/preload/preload_systemml.h
new file mode 100644
index 0000000..79d58f8
--- /dev/null
+++ b/src/main/cpp/preload/preload_systemml.h
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_apache_sysml_utils_EnvironmentHelper */
+
+#ifndef _Included_org_apache_sysml_utils_EnvironmentHelper
+#define _Included_org_apache_sysml_utils_EnvironmentHelper
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class:     org_apache_sysml_utils_EnvironmentHelper
+ * Method:    setEnv
+ * Signature: (Ljava/lang/String;Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_org_apache_sysml_utils_EnvironmentHelper_setEnv
+  (JNIEnv *, jclass, jstring, jstring);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/systemml.cpp
----------------------------------------------------------------------
diff --git a/src/main/cpp/systemml.cpp b/src/main/cpp/systemml.cpp
new file mode 100644
index 0000000..41ce0bc
--- /dev/null
+++ b/src/main/cpp/systemml.cpp
@@ -0,0 +1,225 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+ 
+#include "config.h"
+#include "systemml.h"
+#include "libmatrixmult.h"
+#include "libmatrixdnn.h"
+
+// Linux:
+// g++ -o lib/libsystemml_mkl-Linux-x86_64.so *.cpp  -I$JAVA_HOME/include -I$MKLROOT/include -I$JAVA_HOME/include/linux -lmkl_rt -lpthread  -lm -ldl -DUSE_INTEL_MKL -DUSE_GNU_THREADING -L$MKLROOT/lib/intel64 -m64 -fopenmp -O3 -shared -fPIC
+// g++ -o lib/libsystemml_openblas-Linux-x86_64.so *.cpp  -I$JAVA_HOME/include  -I$JAVA_HOME/include/linux -lopenblas -lpthread -lm -ldl -DUSE_OPEN_BLAS -I/opt/OpenBLAS/include/ -L/opt/OpenBLAS/lib/ -fopenmp -O3 -shared -fPIC
+
+// Mac OSX:
+// g++ -o libsystemml_mkl-linux-x86_64.dylib *.cpp  -I$JAVA_HOME/include -I$MKLROOT/include -I$JAVA_HOME/include/linux -lmkl_rt -lpthread  -lm -ldl -DUSE_INTEL_MKL -DUSE_GNU_THREADING -L$MKLROOT/lib/intel64 -m64 -fopenmp -O3 -dynamiclib -fPIC -undefined dynamic_lookup
+// g++ -o libsystemml_openblas-linux-x86_64.dylib *.cpp  -I$JAVA_HOME/include  -I$JAVA_HOME/include/linux -lopenblas -lpthread -lm -ldl -DUSE_OPEN_BLAS -L/opt/OpenBLAS/lib/ -fopenmp -O3 -dynamiclib -fPIC -undefined dynamic_lookup
+
+// Windows MKL: 
+// "C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall.bat" amd64
+// "%MKLROOT%"\bin\mklvars.bat intel64
+// set JAVA_HOME=C:\Program Files\Java\jdk1.8.0_25
+// cl *.cpp -I. -I"%MKLROOT%"\include -I"%JAVA_HOME%"\include -I"%JAVA_HOME%"\include\win32 -DUSE_INTEL_MKL -Fesystemml_mkl-windows-x86_64.dll -MD -LD  "%MKLROOT%"\lib\intel64_win\mkl_intel_thread_dll.lib "%MKLROOT%"\lib\intel64_win\mkl_core_dll.lib "%MKLROOT%"\lib\intel64_win\mkl_intel_lp64_dll.lib
+// Windows OpenBLAS:
+// "C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall.bat" amd64
+// set JAVA_HOME=C:\Program Files\Java\jdk1.8.0_25
+// cl *.cpp -I. -I"%OPENBLASROOT%"\include -I"%JAVA_HOME%"\include -I"%JAVA_HOME%"\include\win32 -DUSE_OPEN_BLAS -Fesystemml_openblas-windows-x86_64.dll -MD -LD "%OPENBLASROOT%"\lib\libopenblas.dll.a
+
+// Results from Matrix-vector/vector-matrix 1M x 1K, dense show that GetDoubleArrayElements creates a copy on OpenJDK.
+
+// Logic:
+// 1. We chose GetDoubleArrayElements over GetPrimitiveArrayCritical in a multi-threaded scenario. This avoids any potential OOM related to GC halts.
+// 2. For input array, we don't copy back the array using JNI_ABORT.
+
+// JNI Methods to get/release double* 
+#define GET_DOUBLE_ARRAY(env, input, numThreads) \
+	((double*)env->GetPrimitiveArrayCritical(input, NULL))
+// ( maxThreads != -1 && ((int)numThreads) == maxThreads ? ((double*)env->GetPrimitiveArrayCritical(input, NULL)) :  env->GetDoubleArrayElements(input,NULL) )
+ 
+// ------------------------------------------------------------------- 
+// From: https://developer.android.com/training/articles/perf-jni.html
+// 0
+// Actual: the array object is un-pinned.
+// Copy: data is copied back. The buffer with the copy is freed.
+// JNI_ABORT
+// Actual: the array object is un-pinned. Earlier writes are not aborted.
+// Copy: the buffer with the copy is freed; any changes to it are lost.
+#define RELEASE_INPUT_DOUBLE_ARRAY(env, input, inputPtr, numThreads) \
+	env->ReleasePrimitiveArrayCritical(input, inputPtr, JNI_ABORT)
+// ( maxThreads != -1 && ((int)numThreads) == maxThreads ? env->ReleasePrimitiveArrayCritical(input, inputPtr, JNI_ABORT) : env->ReleaseDoubleArrayElements(input, inputPtr, JNI_ABORT) )
+
+#define RELEASE_DOUBLE_ARRAY(env, input, inputPtr, numThreads) \
+	env->ReleasePrimitiveArrayCritical(input, inputPtr, 0)
+// ( maxThreads != -1 && ((int)numThreads) == maxThreads ? env->ReleasePrimitiveArrayCritical(input, inputPtr, 0) :  env->ReleaseDoubleArrayElements(input, inputPtr, 0) )
+  
+// -------------------------------------------------------------------
+
+int maxThreads = -1;
+JNIEXPORT void JNICALL Java_org_apache_sysml_utils_NativeHelper_setMaxNumThreads
+  (JNIEnv *, jclass, jint jmaxThreads) {
+  maxThreads = (int) jmaxThreads;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_matrixMultDenseDense(
+    JNIEnv* env, jclass cls, jdoubleArray m1, jdoubleArray m2, jdoubleArray ret,
+    jint m1rlen, jint m1clen, jint m2clen, jint numThreads) {
+  double* m1Ptr = GET_DOUBLE_ARRAY(env, m1, numThreads);
+  double* m2Ptr = GET_DOUBLE_ARRAY(env, m2, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(m1Ptr == NULL || m2Ptr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+
+  matmult(m1Ptr, m2Ptr, retPtr, (int)m1rlen, (int)m1clen, (int)m2clen, (int)numThreads);
+
+  RELEASE_INPUT_DOUBLE_ARRAY(env, m1, m1Ptr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, m2, m2Ptr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads); 
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_tsmm
+  (JNIEnv * env, jclass cls, jdoubleArray m1, jdoubleArray ret, jint m1rlen, jint m1clen, jboolean isLeftTranspose, jint numThreads) {
+  double* m1Ptr = GET_DOUBLE_ARRAY(env, m1, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(m1Ptr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+
+  tsmm(m1Ptr, retPtr, (int) m1rlen, (int) m1clen, (bool) isLeftTranspose, (int) numThreads);
+  
+  RELEASE_INPUT_DOUBLE_ARRAY(env, m1, m1Ptr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads);
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dSparse
+  (JNIEnv * env, jclass, jint apos, jint alen, jintArray aix, jdoubleArray avals, jdoubleArray filter,
+    jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+  int* aixPtr = ((int*)env->GetPrimitiveArrayCritical(aix, NULL));
+  double* avalsPtr = GET_DOUBLE_ARRAY(env, avals, numThreads);
+  double* filterPtr = GET_DOUBLE_ARRAY(env, filter, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  
+  conv2dSparse((int)apos, (int)alen, aixPtr, avalsPtr, filterPtr, retPtr, (int)N, (int)C, (int)H, (int)W, 
+			(int)K, (int)R, (int)S, (int)stride_h, (int)stride_w, (int)pad_h, (int)pad_w, (int)P, (int)Q, (int)numThreads);
+  
+  RELEASE_INPUT_DOUBLE_ARRAY(env, avals, avalsPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, filter, filterPtr, numThreads);
+  env->ReleasePrimitiveArrayCritical(aix, aixPtr, JNI_ABORT);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads); 
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardFilterSparseDense
+  (JNIEnv * env, jclass, jint apos, jint alen, jintArray aix, jdoubleArray avals, jdoubleArray dout,  
+  	jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+  int* aixPtr = ((int*)env->GetPrimitiveArrayCritical(aix, NULL));
+  double* avalsPtr = GET_DOUBLE_ARRAY(env, avals, numThreads);
+  double* doutPtr = GET_DOUBLE_ARRAY(env, dout, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  
+  conv2dBackwardFilterSparseDense((int)apos, (int)alen, aixPtr, avalsPtr, doutPtr, retPtr, (int)N, (int)C, (int)H, (int)W, 
+			(int)K, (int)R, (int)S, (int)stride_h, (int)stride_w, (int)pad_h, (int)pad_w, (int)P, (int)Q, (int)numThreads);
+  
+  RELEASE_INPUT_DOUBLE_ARRAY(env, avals, avalsPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, dout, doutPtr, numThreads);
+  env->ReleasePrimitiveArrayCritical(aix, aixPtr, JNI_ABORT);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads); 
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dDense(
+	JNIEnv* env, jclass, jdoubleArray input, jdoubleArray filter,
+    jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+  double* inputPtr = GET_DOUBLE_ARRAY(env, input, numThreads);
+  double* filterPtr = GET_DOUBLE_ARRAY(env, filter, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(inputPtr == NULL || filterPtr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+  
+  conv2dBiasAddDense(inputPtr, 0, filterPtr, retPtr, (int) N, (int) C, (int) H, (int) W, (int) K, (int) R, (int) S,
+    (int) stride_h, (int) stride_w, (int) pad_h, (int) pad_w, (int) P, (int) Q, false, (int) numThreads);
+    
+  RELEASE_INPUT_DOUBLE_ARRAY(env, input, inputPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, filter, filterPtr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads); 
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBiasAddDense(
+	JNIEnv* env, jclass, jdoubleArray input, jdoubleArray bias, jdoubleArray filter,
+    jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+    
+  double* inputPtr = GET_DOUBLE_ARRAY(env, input, numThreads);
+  double* biasPtr = GET_DOUBLE_ARRAY(env, bias, numThreads);
+  double* filterPtr = GET_DOUBLE_ARRAY(env, filter, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(inputPtr == NULL || biasPtr == NULL || filterPtr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+  
+  conv2dBiasAddDense(inputPtr, biasPtr, filterPtr, retPtr, (int) N, (int) C, (int) H, (int) W, (int) K, (int) R, (int) S,
+    (int) stride_h, (int) stride_w, (int) pad_h, (int) pad_w, (int) P, (int) Q, true, (int) numThreads);
+    
+  RELEASE_INPUT_DOUBLE_ARRAY(env, input, inputPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, bias, biasPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, filter, filterPtr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads); 
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardDataDense(
+	JNIEnv* env, jclass, jdoubleArray filter, jdoubleArray dout,
+    jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+  
+  double* filterPtr = GET_DOUBLE_ARRAY(env, filter, numThreads);
+  double* doutPtr = GET_DOUBLE_ARRAY(env, dout, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(doutPtr == NULL || filterPtr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+  
+  conv2dBackwardDataDense(filterPtr, doutPtr, retPtr, (int) N, (int) C, (int) H, (int) W, (int) K, (int) R, (int) S,
+    (int) stride_h, (int) stride_w, (int) pad_h, (int) pad_w, (int) P, (int) Q, (int) numThreads);
+  
+  RELEASE_INPUT_DOUBLE_ARRAY(env, filter, filterPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, dout, doutPtr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads);
+  return (jboolean) true;
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardFilterDense(
+	JNIEnv* env, jclass, jdoubleArray input, jdoubleArray dout,
+    jdoubleArray ret, jint N, jint C, jint H, jint W, jint K, jint R, jint S,
+    jint stride_h, jint stride_w, jint pad_h, jint pad_w, jint P, jint Q, jint numThreads) {
+  double* inputPtr = GET_DOUBLE_ARRAY(env, input, numThreads);
+  double* doutPtr = GET_DOUBLE_ARRAY(env, dout, numThreads);
+  double* retPtr = GET_DOUBLE_ARRAY(env, ret, numThreads);
+  if(doutPtr == NULL || inputPtr == NULL || retPtr == NULL)
+  	return (jboolean) false;
+  
+  conv2dBackwardFilterDense(inputPtr, doutPtr, retPtr, (int) N, (int) C, (int) H, (int) W, (int) K, (int) R, (int) S,
+    (int) stride_h, (int) stride_w, (int) pad_h, (int) pad_w, (int) P, (int) Q, (int) numThreads);
+  
+  RELEASE_INPUT_DOUBLE_ARRAY(env, input, inputPtr, numThreads);
+  RELEASE_INPUT_DOUBLE_ARRAY(env, dout, doutPtr, numThreads);
+  RELEASE_DOUBLE_ARRAY(env, ret, retPtr, numThreads);
+  return (jboolean) true;
+}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/cpp/systemml.h
----------------------------------------------------------------------
diff --git a/src/main/cpp/systemml.h b/src/main/cpp/systemml.h
new file mode 100644
index 0000000..ac36495
--- /dev/null
+++ b/src/main/cpp/systemml.h
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+ 
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_apache_sysml_utils_NativeHelper */
+
+#ifndef _Included_org_apache_sysml_utils_NativeHelper
+#define _Included_org_apache_sysml_utils_NativeHelper
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    matrixMultDenseDense
+ * Signature: ([D[D[DIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_matrixMultDenseDense
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    tsmm
+ * Signature: ([D[DIIZI)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_tsmm
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jint, jint, jboolean, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dDense
+ * Signature: ([D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dDense
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dBiasAddDense
+ * Signature: ([D[D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBiasAddDense
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dBackwardDataDense
+ * Signature: ([D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardDataDense
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dBackwardFilterDense
+ * Signature: ([D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardFilterDense
+  (JNIEnv *, jclass, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dSparse
+ * Signature: (II[I[D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dSparse
+  (JNIEnv *, jclass, jint, jint, jintArray, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    conv2dBackwardFilterSparse
+ * Signature: (II[I[D[D[DIIIIIIIIIIIIII)Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_apache_sysml_utils_NativeHelper_conv2dBackwardFilterSparseDense
+  (JNIEnv *, jclass, jint, jint, jintArray, jdoubleArray, jdoubleArray, jdoubleArray, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint, jint);
+
+/*
+ * Class:     org_apache_sysml_utils_NativeHelper
+ * Method:    setMaxNumThreads
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL Java_org_apache_sysml_utils_NativeHelper_setMaxNumThreads
+  (JNIEnv *, jclass, jint);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+ 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/api/DMLScript.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/api/DMLScript.java b/src/main/java/org/apache/sysml/api/DMLScript.java
index ab059a0..bfac48d 100644
--- a/src/main/java/org/apache/sysml/api/DMLScript.java
+++ b/src/main/java/org/apache/sysml/api/DMLScript.java
@@ -706,7 +706,6 @@ public class DMLScript
 		}
 	}
 	
-	
 	///////////////////////////////
 	// private internal interface 
 	// (core compilation and execute)
@@ -1117,4 +1116,4 @@ public class DMLScript
 			throw new DMLException("Failed to run SystemML workspace cleanup.", ex);
 		}
 	}
-}  
\ No newline at end of file
+}  

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/conf/DMLConfig.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/conf/DMLConfig.java b/src/main/java/org/apache/sysml/conf/DMLConfig.java
index e974a71..84bea42 100644
--- a/src/main/java/org/apache/sysml/conf/DMLConfig.java
+++ b/src/main/java/org/apache/sysml/conf/DMLConfig.java
@@ -71,6 +71,7 @@ public class DMLConfig
 	public static final String CP_PARALLEL_MATRIXMULT = "cp.parallel.matrixmult";
 	public static final String CP_PARALLEL_TEXTIO   = "cp.parallel.textio";
 	public static final String COMPRESSED_LINALG    = "compressed.linalg";
+	public static final String NATIVE_BLAS    			= "native.blas";
 	public static final String CODEGEN              = "codegen.enabled"; //boolean
 	public static final String CODEGEN_PLANCACHE    = "codegen.plancache"; //boolean
 	public static final String CODEGEN_LITERALS     = "codegen.literals"; //1..heuristic, 2..always
@@ -115,6 +116,7 @@ public class DMLConfig
 		_defaultVals.put(CODEGEN,                "false" );
 		_defaultVals.put(CODEGEN_PLANCACHE,      "true" );
 		_defaultVals.put(CODEGEN_LITERALS,       "1" );
+		_defaultVals.put(NATIVE_BLAS,      			 "true" );
 
 		_defaultVals.put(EXTRA_GPU_STATS,       "false" );
 		_defaultVals.put(EXTRA_DNN_STATS,       "false" );
@@ -404,7 +406,7 @@ public class DMLConfig
 				LOCAL_TMP_DIR,SCRATCH_SPACE,OPTIMIZATION_LEVEL,
 				NUM_REDUCERS, DEFAULT_BLOCK_SIZE,
 				YARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM, 
-				CP_PARALLEL_MATRIXMULT, CP_PARALLEL_TEXTIO,
+				CP_PARALLEL_MATRIXMULT, CP_PARALLEL_TEXTIO, NATIVE_BLAS,
 				COMPRESSED_LINALG, CODEGEN, CODEGEN_LITERALS, CODEGEN_PLANCACHE,
 				EXTRA_GPU_STATS, EXTRA_DNN_STATS
 		}; 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/hops/ConvolutionOp.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/hops/ConvolutionOp.java b/src/main/java/org/apache/sysml/hops/ConvolutionOp.java
index a097d19..a18aada 100644
--- a/src/main/java/org/apache/sysml/hops/ConvolutionOp.java
+++ b/src/main/java/org/apache/sysml/hops/ConvolutionOp.java
@@ -150,10 +150,17 @@ public class ConvolutionOp extends Hop  implements MultiThreadedHop
 		int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
 		OperationTypes lopOp = HopsConv2Lops.get(op);
 
-		if(op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {
+		// RELU_MAX_POOLING and RELU_MAX_POOLING_BACKWARD is extremely useful for CP backend 
+		// by reducing unnecessary sparse-to-dense-to-sparse conversion.
+		// For other backends, this operators is not necessary as it reduces an additional relu operator.
+		if(et == ExecType.CP && op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {
 			in = inputs.get(0).getInput().get(0).constructLops();
 			lopOp = OperationTypes.RELU_MAX_POOLING;
 		}
+		else if(et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && isInputReLU(inputs.get(0))) {
+			in = inputs.get(0).getInput().get(0).constructLops();
+			lopOp = OperationTypes.RELU_MAX_POOLING_BACKWARD;
+		}
 		else if(op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {
 			lopOp = OperationTypes.DIRECT_CONV2D_BIAS_ADD;
 			

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/lops/ConvolutionTransform.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/lops/ConvolutionTransform.java b/src/main/java/org/apache/sysml/lops/ConvolutionTransform.java
index 6d8885a..8784956 100644
--- a/src/main/java/org/apache/sysml/lops/ConvolutionTransform.java
+++ b/src/main/java/org/apache/sysml/lops/ConvolutionTransform.java
@@ -30,7 +30,7 @@ public class ConvolutionTransform extends Lop
 
 	
 	public enum OperationTypes {
-		MAX_POOLING, MAX_POOLING_BACKWARD, RELU_MAX_POOLING, RELU_BACKWARD,
+		MAX_POOLING, MAX_POOLING_BACKWARD, RELU_MAX_POOLING, RELU_BACKWARD, RELU_MAX_POOLING_BACKWARD,
 		DIRECT_CONV2D, DIRECT_CONV2D_BACKWARD_FILTER, DIRECT_CONV2D_BACKWARD_DATA,
 		BIAS_ADD, DIRECT_CONV2D_BIAS_ADD, BIAS_MULTIPLY
 	};
@@ -112,6 +112,9 @@ public class ConvolutionTransform extends Lop
 		case RELU_MAX_POOLING:
 			return "relu_maxpooling";
 			
+		case RELU_MAX_POOLING_BACKWARD:
+			return "relu_maxpooling_backward";
+			
 		case RELU_BACKWARD:
 			return "relu_backward";
 			

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java b/src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java
index bb5e01a..52c11e6 100644
--- a/src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java
+++ b/src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java
@@ -225,6 +225,7 @@ public class CPInstructionParser extends InstructionParser
 		// Opcodes related to convolutions
 		String2CPInstructionType.put( "relu_backward"      , CPINSTRUCTION_TYPE.Convolution);
 		String2CPInstructionType.put( "relu_maxpooling"      , CPINSTRUCTION_TYPE.Convolution);
+		String2CPInstructionType.put( "relu_maxpooling_backward"      , CPINSTRUCTION_TYPE.Convolution);
 		String2CPInstructionType.put( "maxpooling"      , CPINSTRUCTION_TYPE.Convolution);
 		String2CPInstructionType.put( "maxpooling_backward"      , CPINSTRUCTION_TYPE.Convolution);
 		String2CPInstructionType.put( "conv2d"      , CPINSTRUCTION_TYPE.Convolution);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java b/src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java
index 4051d6a..e5b3326 100644
--- a/src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java
+++ b/src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java
@@ -41,7 +41,6 @@ public class GPUInstructionParser  extends InstructionParser
 		// Neural Network Operators
 		String2GPUInstructionType.put( "relu_backward",          GPUINSTRUCTION_TYPE.Convolution);
 		String2GPUInstructionType.put( "conv2d",                 GPUINSTRUCTION_TYPE.Convolution);
-		String2GPUInstructionType.put( "relu_maxpooling",          GPUINSTRUCTION_TYPE.Convolution);
 		String2GPUInstructionType.put( "conv2d_bias_add",                 GPUINSTRUCTION_TYPE.Convolution);
 		String2GPUInstructionType.put( "conv2d_backward_filter", GPUINSTRUCTION_TYPE.Convolution);
 		String2GPUInstructionType.put( "conv2d_backward_data",   GPUINSTRUCTION_TYPE.Convolution);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/39a37ae4/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java b/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java
index 702e4f3..59da4ac 100644
--- a/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java
+++ b/src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java
@@ -19,6 +19,8 @@
 
 package org.apache.sysml.runtime.instructions.cp;
 
+import org.apache.sysml.conf.ConfigurationManager;
+import org.apache.sysml.conf.DMLConfig;
 import org.apache.sysml.parser.Expression.DataType;
 import org.apache.sysml.parser.Expression.ValueType;
 import org.apache.sysml.runtime.DMLRuntimeException;
@@ -71,15 +73,17 @@ public class AggregateBinaryCPInstruction extends BinaryCPInstruction
 	{	
 		//get inputs
 		MatrixBlock matBlock1 = ec.getMatrixInput(input1.getName());
-        MatrixBlock matBlock2 = ec.getMatrixInput(input2.getName());
+    MatrixBlock matBlock2 = ec.getMatrixInput(input2.getName());
 		
-        //compute matrix multiplication
-        AggregateBinaryOperator ab_op = (AggregateBinaryOperator) _optr;
+    //compute matrix multiplication
+    AggregateBinaryOperator ab_op = (AggregateBinaryOperator) _optr;
 		MatrixBlock soresBlock = null;
 		if( matBlock2 instanceof CompressedMatrixBlock )
 			soresBlock = (MatrixBlock) (matBlock2.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op));
-		else 
-			soresBlock = (MatrixBlock) (matBlock1.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op));
+		else  {
+			boolean enableNative = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS);
+			soresBlock = (MatrixBlock) (matBlock1.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op, enableNative));
+		}
 			
 		//release inputs/outputs
 		ec.releaseMatrixInput(input1.getName());