You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mahout.apache.org by ap...@apache.org on 2017/01/26 04:24:20 UTC

[2/5] mahout git commit: MAHOUT-1885: Inital commit of VCL bindings. closes apache/mahout#269 closes apache/mahout#261

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl-omp/src/main/scala/org/apache/mahout/viennacl/openmp/package.scala
----------------------------------------------------------------------
diff --git a/viennacl-omp/src/main/scala/org/apache/mahout/viennacl/openmp/package.scala b/viennacl-omp/src/main/scala/org/apache/mahout/viennacl/openmp/package.scala
new file mode 100644
index 0000000..89af010
--- /dev/null
+++ b/viennacl-omp/src/main/scala/org/apache/mahout/viennacl/openmp/package.scala
@@ -0,0 +1,434 @@
+package org.apache.mahout.viennacl
+
+import java.nio._
+
+import org.apache.mahout.math._
+import scalabindings._
+import RLikeOps._
+
+import scala.collection.JavaConversions._
+import org.apache.mahout.viennacl.openmp.javacpp.DenseRowMatrix
+import org.apache.mahout.viennacl.openmp.javacpp._
+import org.bytedeco.javacpp.{DoublePointer, IntPointer}
+
+
+
+package object openmp {
+
+  type IntConvertor = Int => Int
+
+  def toVclDenseRM(src: Matrix, vclCtx: Context = new Context(Context.MAIN_MEMORY)): DenseRowMatrix = {
+    vclCtx.memoryType match {
+      case Context.MAIN_MEMORY \u21d2
+        val vclMx = new DenseRowMatrix(
+          data = repackRowMajor(src, src.nrow, src.ncol),
+          nrow = src.nrow,
+          ncol = src.ncol,
+          ctx = vclCtx
+        )
+        vclMx
+      case _ \u21d2
+        val vclMx = new DenseRowMatrix(src.nrow, src.ncol, vclCtx)
+        fastCopy(src, vclMx)
+        vclMx
+    }
+  }
+
+
+  /**
+    * Convert a dense row VCL matrix to mahout matrix.
+    *
+    * @param src
+    * @return
+    */
+  def fromVclDenseRM(src: DenseRowMatrix): Matrix = {
+    val nrowIntern = src.internalnrow
+    val ncolIntern = src.internalncol
+
+    // A technical debt here:
+
+    // We do double copying here, this is obviously suboptimal, but hopefully we'll compensate
+    // this with gains from running superlinear algorithms in VCL.
+    val dbuff = new DoublePointer(nrowIntern * ncolIntern)
+    Functions.fastCopy(src, dbuff)
+    var srcOffset = 0
+    val ncol = src.ncol
+    val rows = for (irow \u2190 0 until src.nrow) yield {
+
+      val rowvec = new Array[Double](ncol)
+      dbuff.position(srcOffset).get(rowvec)
+
+      srcOffset += ncolIntern
+      rowvec
+    }
+
+    // Always! use shallow = true to avoid yet another copying.
+    new DenseMatrix(rows.toArray, true)
+  }
+
+  def fastCopy(mxSrc: Matrix, dst: DenseRowMatrix) = {
+    val nrowIntern = dst.internalnrow
+    val ncolIntern = dst.internalncol
+
+    assert(nrowIntern >= mxSrc.nrow && ncolIntern >= mxSrc.ncol)
+
+    val rmajorData = repackRowMajor(mxSrc, nrowIntern, ncolIntern)
+    Functions.fastCopy(rmajorData, new DoublePointer(rmajorData).position(rmajorData.limit()), dst)
+
+    rmajorData.close()
+  }
+
+  private def repackRowMajor(mx: Matrix, nrowIntern: Int, ncolIntern: Int): DoublePointer = {
+
+    assert(mx.nrow <= nrowIntern && mx.ncol <= ncolIntern)
+
+    val dbuff = new DoublePointer(nrowIntern * ncolIntern)
+
+    mx match {
+      case dm: DenseMatrix \u21d2
+        val valuesF = classOf[DenseMatrix].getDeclaredField("values")
+        valuesF.setAccessible(true)
+        val values = valuesF.get(dm).asInstanceOf[Array[Array[Double]]]
+        var dstOffset = 0
+        for (irow \u2190 0 until mx.nrow) {
+          val rowarr = values(irow)
+          dbuff.position(dstOffset).put(rowarr, 0, rowarr.size min ncolIntern)
+          dstOffset += ncolIntern
+        }
+        dbuff.position(0)
+      case _ \u21d2
+        // Naive copying. Could be sped up for a DenseMatrix. TODO.
+        for (row \u2190 mx) {
+          val dstOffset = row.index * ncolIntern
+          for (el \u2190 row.nonZeroes) dbuff.put(dstOffset + el.index, el)
+        }
+    }
+
+    dbuff
+  }
+
+  /**
+    *
+    * @param mxSrc
+    * @param ctx
+    * @return
+    */
+  def toVclCmpMatrixAlt(mxSrc: Matrix, ctx: Context): CompressedMatrix = {
+
+    // use repackCSR(matrix, ctx) to convert all ints to unsigned ints if Context is Ocl
+    // val (jumpers, colIdcs, els) = repackCSRAlt(mxSrc)
+    val (jumpers, colIdcs, els) = repackCSR(mxSrc, ctx)
+
+    val compMx = new CompressedMatrix(mxSrc.nrow, mxSrc.ncol, els.capacity().toInt, ctx)
+    compMx.set(jumpers, colIdcs, els, mxSrc.nrow, mxSrc.ncol, els.capacity().toInt)
+    compMx
+  }
+
+  private def repackCSRAlt(mx: Matrix): (IntPointer, IntPointer, DoublePointer) = {
+    val nzCnt = mx.map(_.getNumNonZeroElements).sum
+    val jumpers = new IntPointer(mx.nrow + 1L)
+    val colIdcs = new IntPointer(nzCnt + 0L)
+    val els = new DoublePointer(nzCnt)
+    var posIdx = 0
+
+    var sortCols = false
+
+    // Row-wise loop. Rows may not necessarily come in order. But we have to have them in-order.
+    for (irow \u2190 0 until mx.nrow) {
+
+      val row = mx(irow, ::)
+      jumpers.put(irow.toLong, posIdx)
+
+      // Remember row start index in case we need to restart conversion of this row if out-of-order
+      // column index is detected
+      val posIdxStart = posIdx
+
+      // Retry loop: normally we are done in one pass thru it unless we need to re-run it because
+      // out-of-order column was detected.
+      var done = false
+      while (!done) {
+
+        // Is the sorting mode on?
+        if (sortCols) {
+
+          // Sorting of column indices is on. So do it.
+          row.nonZeroes()
+            // Need to convert to a strict collection out of iterator
+            .map(el \u21d2 el.index \u2192 el.get)
+            // Sorting requires Sequence api
+            .toSeq
+            // Sort by column index
+            .sortBy(_._1)
+            // Flush to the CSR buffers.
+            .foreach { case (index, v) \u21d2
+              colIdcs.put(posIdx.toLong, index)
+              els.put(posIdx.toLong, v)
+              posIdx += 1
+            }
+
+          // Never need to retry if we are already in the sorting mode.
+          done = true
+
+        } else {
+
+          // Try to run unsorted conversion here, switch lazily to sorted if out-of-order column is
+          // detected.
+          var lastCol = 0
+          val nzIter = row.nonZeroes().iterator()
+          var abortNonSorted = false
+
+          while (nzIter.hasNext && !abortNonSorted) {
+
+            val el = nzIter.next()
+            val index = el.index
+
+            if (index < lastCol) {
+
+              // Out of order detected: abort inner loop, reset posIdx and retry with sorting on.
+              abortNonSorted = true
+              sortCols = true
+              posIdx = posIdxStart
+
+            } else {
+
+              // Still in-order: save element and column, continue.
+              els.put(posIdx, el)
+              colIdcs.put(posIdx.toLong, index)
+              posIdx += 1
+
+              // Remember last column seen.
+              lastCol = index
+            }
+          } // inner non-sorted
+
+          // Do we need to re-run this row with sorting?
+          done = !abortNonSorted
+
+        } // if (sortCols)
+
+      } // while (!done) retry loop
+
+    } // row-wise loop
+
+    // Make sure Mahout matrix did not cheat on non-zero estimate.
+    assert(posIdx == nzCnt)
+
+    jumpers.put(mx.nrow.toLong, nzCnt)
+
+    (jumpers, colIdcs, els)
+  }
+
+  // same as repackCSRAlt except converts to jumpers, colIdcs to unsigned ints before setting
+  private def repackCSR(mx: Matrix, context: Context): (IntPointer, IntPointer, DoublePointer) = {
+    val nzCnt = mx.map(_.getNumNonZeroElements).sum
+    val jumpers = new IntPointer(mx.nrow + 1L)
+    val colIdcs = new IntPointer(nzCnt + 0L)
+    val els = new DoublePointer(nzCnt)
+    var posIdx = 0
+
+    var sortCols = false
+
+    def convertInt: IntConvertor = if(context.memoryType == Context.OPENCL_MEMORY) {
+      int2cl_uint
+    } else {
+      i: Int => i: Int
+    }
+
+    // Row-wise loop. Rows may not necessarily come in order. But we have to have them in-order.
+    for (irow \u2190 0 until mx.nrow) {
+
+      val row = mx(irow, ::)
+      jumpers.put(irow.toLong, posIdx)
+
+      // Remember row start index in case we need to restart conversion of this row if out-of-order
+      // column index is detected
+      val posIdxStart = posIdx
+
+      // Retry loop: normally we are done in one pass thru it unless we need to re-run it because
+      // out-of-order column was detected.
+      var done = false
+      while (!done) {
+
+        // Is the sorting mode on?
+        if (sortCols) {
+
+          // Sorting of column indices is on. So do it.
+          row.nonZeroes()
+            // Need to convert to a strict collection out of iterator
+            .map(el \u21d2 el.index \u2192 el.get)
+            // Sorting requires Sequence api
+            .toIndexedSeq
+            // Sort by column index
+            .sortBy(_._1)
+            // Flush to the CSR buffers.
+            .foreach { case (index, v) \u21d2
+            // convert to cl_uint if context is OCL
+            colIdcs.put(posIdx.toLong, convertInt(index))
+            els.put(posIdx.toLong, v)
+            posIdx += 1
+          }
+
+          // Never need to retry if we are already in the sorting mode.
+          done = true
+
+        } else {
+
+          // Try to run unsorted conversion here, switch lazily to sorted if out-of-order column is
+          // detected.
+          var lastCol = 0
+          val nzIter = row.nonZeroes().iterator()
+          var abortNonSorted = false
+
+          while (nzIter.hasNext && !abortNonSorted) {
+
+            val el = nzIter.next()
+            val index = el.index
+
+            if (index < lastCol) {
+
+              // Out of order detected: abort inner loop, reset posIdx and retry with sorting on.
+              abortNonSorted = true
+              sortCols = true
+              posIdx = posIdxStart
+
+            } else {
+
+              // Still in-order: save element and column, continue.
+              els.put(posIdx, el)
+              // convert to cl_uint if context is OCL
+              colIdcs.put(posIdx.toLong, convertInt(index))
+              posIdx += 1
+
+              // Remember last column seen.
+              lastCol = index
+            }
+          } // inner non-sorted
+
+          // Do we need to re-run this row with sorting?
+          done = !abortNonSorted
+
+        } // if (sortCols)
+
+      } // while (!done) retry loop
+
+    } // row-wise loop
+
+    // Make sure Mahout matrix did not cheat on non-zero estimate.
+    assert(posIdx == nzCnt)
+
+    // convert to cl_uint if context is OCL
+    jumpers.put(mx.nrow.toLong, convertInt(nzCnt))
+
+    (jumpers, colIdcs, els)
+  }
+
+
+
+  def fromVclCompressedMatrix(src: CompressedMatrix): Matrix = {
+    val m = src.size1
+    val n = src.size2
+    val NNz = src.nnz
+
+    val row_ptr_handle = src.handle1
+    val col_idx_handle = src.handle2
+    val element_handle = src.handle
+
+    val row_ptr = new IntPointer((m + 1).toLong)
+    val col_idx = new IntPointer(NNz.toLong)
+    val values = new DoublePointer(NNz.toLong)
+
+    Functions.memoryReadInt(row_ptr_handle, 0, (m + 1) * 4, row_ptr, false)
+    Functions.memoryReadInt(col_idx_handle, 0, NNz * 4, col_idx, false)
+    Functions.memoryReadDouble(element_handle, 0, NNz * 8, values, false)
+
+    val rowPtr = row_ptr.asBuffer()
+    val colIdx = col_idx.asBuffer()
+    val vals = values.asBuffer()
+
+    rowPtr.rewind()
+    colIdx.rewind()
+    vals.rewind()
+
+
+    val srMx = new SparseRowMatrix(m, n)
+
+    // read the values back into the matrix
+    var j = 0
+    // row wise, copy any non-zero elements from row(i-1,::)
+    for (i <- 1 to m) {
+      // for each nonzero element, set column col(idx(j) value to vals(j)
+      while (j < rowPtr.get(i)) {
+        srMx(i - 1, colIdx.get(j)) = vals.get(j)
+        j += 1
+      }
+    }
+    srMx
+  }
+
+  def toVclVec(vec: Vector, ctx: Context): VCLVector = {
+
+    vec match {
+      case vec: DenseVector => {
+        val valuesF = classOf[DenseVector].getDeclaredField("values")
+        valuesF.setAccessible(true)
+        val values = valuesF.get(vec).asInstanceOf[Array[Double]]
+        val el_ptr = new DoublePointer(values.length.toLong)
+        el_ptr.put(values, 0, values.length)
+
+        new VCLVector(el_ptr, ctx.memoryType, values.length)
+      }
+
+      case vec: SequentialAccessSparseVector => {
+        val it = vec.iterateNonZero
+        val size = vec.size()
+        val el_ptr = new DoublePointer(size.toLong)
+        while (it.hasNext) {
+          val el: Vector.Element = it.next
+          el_ptr.put(el.index, el.get())
+        }
+        new VCLVector(el_ptr, ctx.memoryType, size)
+      }
+
+      case vec: RandomAccessSparseVector => {
+        val it = vec.iterateNonZero
+        val size = vec.size()
+        val el_ptr = new DoublePointer(size.toLong)
+        while (it.hasNext) {
+          val el: Vector.Element = it.next
+          el_ptr.put(el.index, el.get())
+        }
+        new VCLVector(el_ptr, ctx.memoryType, size)
+      }
+      case _ => throw new IllegalArgumentException("Vector sub-type not supported.")
+    }
+
+  }
+
+  def fromVClVec(vclVec: VCLVector): Vector = {
+    val size = vclVec.size
+    val element_handle = vclVec.handle
+    val ele_ptr = new DoublePointer(size)
+    Functions.memoryReadDouble(element_handle, 0, size * 8, ele_ptr, false)
+
+    // for now just assume its dense since we only have one flavor of
+    // VCLVector
+    val mVec = new DenseVector(size)
+    for (i <- 0 until size) {
+      mVec.setQuick(i, ele_ptr.get(i + 0L))
+    }
+
+    mVec
+  }
+
+
+  // TODO: Fix this?  cl_uint must be an unsigned int per each machine's representation of such.
+  // this is currently not working anyways.
+  // cl_uint is needed for OpenCl sparse Buffers
+  // per https://www.khronos.org/registry/cl/sdk/1.1/docs/man/xhtml/scalarDataTypes.html
+  // it is simply an unsigned int, so strip the sign.
+  def int2cl_uint(i: Int): Int = {
+    ((i >>> 1) << 1) + (i & 1)
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl-omp/src/test/scala/org/apache/mahout/viennacl/omp/ViennaCLSuiteOMP.scala
----------------------------------------------------------------------
diff --git a/viennacl-omp/src/test/scala/org/apache/mahout/viennacl/omp/ViennaCLSuiteOMP.scala b/viennacl-omp/src/test/scala/org/apache/mahout/viennacl/omp/ViennaCLSuiteOMP.scala
new file mode 100644
index 0000000..8eb3ff9
--- /dev/null
+++ b/viennacl-omp/src/test/scala/org/apache/mahout/viennacl/omp/ViennaCLSuiteOMP.scala
@@ -0,0 +1,249 @@
+package org.apache.mahout.viennacl.openmp
+
+import org.apache.mahout.math._
+import scalabindings._
+import RLikeOps._
+import org.bytedeco.javacpp.DoublePointer
+import org.scalatest.{FunSuite, Matchers}
+import org.apache.mahout.viennacl.openmp.javacpp._
+import org.apache.mahout.viennacl.openmp.javacpp.Functions._
+import org.apache.mahout.viennacl.openmp.javacpp.LinalgFunctions._
+
+import scala.util.Random
+
+class ViennaCLSuiteOMP extends FunSuite with Matchers {
+
+  test("row-major viennacl::matrix") {
+
+    // Just to make sure the javacpp library is loaded:
+    Context.loadLib()
+
+    val m = 20
+    val n = 30
+    val data = new DoublePointer(m * n)
+    val buff = data.asBuffer()
+    // Fill with some noise
+    while (buff.remaining() > 0) buff.put(Random.nextDouble())
+
+    // Create row-major matrix with OpenCL
+    val hostClCtx = new Context(Context.MAIN_MEMORY)
+    val cpuMx = new DenseRowMatrix(data = data, nrow = m, ncol = n, hostClCtx)
+    // And free.
+    cpuMx.close()
+
+  }
+
+
+  test("mmul microbenchmark") {
+    val memCtx = new Context(Context.MAIN_MEMORY)
+
+    val m = 3000
+    val n = 3000
+    val s = 1000
+
+    val r = new Random(1234)
+
+    // Dense row-wise
+    val mxA = new DenseMatrix(m, s)
+    val mxB = new DenseMatrix(s, n)
+
+    // add some data
+    mxA := { (_, _, _) => r.nextDouble() }
+    mxB := { (_, _, _) => r.nextDouble() }
+
+    var ms = System.currentTimeMillis()
+    mxA %*% mxB
+    ms = System.currentTimeMillis() - ms
+    info(s"Mahout multiplication time: $ms ms.")
+
+    import LinalgFunctions._
+
+    // openMP/cpu time, including copying:
+    {
+      ms = System.currentTimeMillis()
+      val ompA = toVclDenseRM(mxA, memCtx)
+      val ompB = toVclDenseRM(mxB, memCtx)
+      val ompC = new DenseRowMatrix(prod(ompA, ompB))
+      val mxC = fromVclDenseRM(ompC)
+      ms = System.currentTimeMillis() - ms
+      info(s"ViennaCL/cpu/OpenMP multiplication time: $ms ms.")
+
+      ompA.close()
+      ompB.close()
+      ompC.close()
+    }
+
+  }
+
+  test("trans") {
+
+    val ompCtx = new Context(Context.MAIN_MEMORY)
+
+
+    val m = 20
+    val n = 30
+
+    val r = new Random(1234)
+
+    // Dense row-wise
+    val mxA = new DenseMatrix(m, n)
+
+    // add some data
+    mxA := { (_, _, _) => r.nextDouble() }
+
+
+    // Test transposition in OpenMP
+    {
+      val ompA = toVclDenseRM(src = mxA, ompCtx)
+      val ompAt = new DenseRowMatrix(trans(ompA))
+
+      val mxAt = fromVclDenseRM(ompAt)
+      ompA.close()
+      ompAt.close()
+
+      (mxAt - mxA.t).norm / m / n should be < 1e-16
+    }
+
+  }
+
+  test("sparse mmul microbenchmark") {
+
+    val ompCtx = new Context(Context.MAIN_MEMORY)
+
+    val m = 3000
+    val n = 3000
+    val s = 1000
+
+    val r = new Random(1234)
+
+    // sparse row-wise
+    val mxA = new SparseRowMatrix(m, s, false)
+    val mxB = new SparseRowMatrix(s, n, true)
+
+    // add some sparse data with 20% density
+    mxA := { (_, _, v) => if (r.nextDouble() < .20) r.nextDouble() else v }
+    mxB := { (_, _, v) => if (r.nextDouble() < .20) r.nextDouble() else v }
+
+    var ms = System.currentTimeMillis()
+    val mxC = mxA %*% mxB
+    ms = System.currentTimeMillis() - ms
+    info(s"Mahout Sparse multiplication time: $ms ms.")
+
+
+    // Test multiplication in OpenMP
+    {
+      ms = System.currentTimeMillis()
+      //      val ompA = toVclCompressedMatrix(src = mxA, ompCtx)
+      //      val ompB = toVclCompressedMatrix(src = mxB, ompCtx)
+
+      val ompA = toVclCmpMatrixAlt(mxA, ompCtx)
+      val ompB = toVclCmpMatrixAlt(mxB, ompCtx)
+
+      val ompC = new CompressedMatrix(prod(ompA, ompB))
+
+      ms = System.currentTimeMillis() - ms
+      info(s"ViennaCL/cpu/OpenMP Sparse multiplication time: $ms ms.")
+
+      val ompMxC = fromVclCompressedMatrix(ompC)
+      (mxC - ompMxC).norm / mxC.nrow / mxC.ncol should be < 1e-16
+
+      ompA.close()
+      ompB.close()
+      ompC.close()
+
+    }
+
+  }
+
+  test("VCL Dense Matrix %*% Dense vector - no OpenCl") {
+
+    val ompCtx = new Context(Context.MAIN_MEMORY)
+
+
+    val m = 3000
+    val s = 1000
+
+    val r = new Random(1234)
+
+    // Dense row-wise
+    val mxA = new DenseMatrix(m, s)
+    val dvecB = new DenseVector(s)
+
+    // add some random data
+    mxA := { (_,_,_) => r.nextDouble() }
+    dvecB := { (_,_) => r.nextDouble() }
+
+    //test in matrix %*% vec
+    var ms = System.currentTimeMillis()
+    val mDvecC = mxA %*% dvecB
+    ms = System.currentTimeMillis() - ms
+    info(s"Mahout dense matrix %*% dense vector multiplication time: $ms ms.")
+
+
+    //Test multiplication in OpenMP
+      {
+
+        ms = System.currentTimeMillis()
+        val ompMxA = toVclDenseRM(mxA, ompCtx)
+        val ompVecB = toVclVec(dvecB, ompCtx)
+
+        val ompVecC = new VCLVector(prod(ompMxA, ompVecB))
+        val ompDvecC = fromVClVec(ompVecC)
+
+        ms = System.currentTimeMillis() - ms
+        info(s"ViennaCL/cpu/OpenMP dense matrix %*% dense vector multiplication time: $ms ms.")
+        (ompDvecC.toColMatrix - mDvecC.toColMatrix).norm / s  should be < 1e-16
+
+        ompMxA.close()
+        ompVecB.close()
+        ompVecC.close()
+      }
+
+  }
+
+
+  test("Sparse %*% Dense mmul microbenchmark") {
+    val memCtx = new Context(Context.MAIN_MEMORY)
+
+    val m = 3000
+    val n = 3000
+    val s = 1000
+
+    val r = new Random(1234)
+
+    // Dense row-wise
+    val mxSr = new SparseMatrix(m, s)
+    val mxDn = new DenseMatrix(s, n)
+
+    // add some data
+    mxSr := { (_, _, v) => if (r.nextDouble() < .20) r.nextDouble() else v }
+    mxDn := { (_, _, _) => r.nextDouble() }
+
+    var ms = System.currentTimeMillis()
+    mxSr %*% mxDn
+    ms = System.currentTimeMillis() - ms
+    info(s"Mahout multiplication time: $ms ms.")
+
+    import LinalgFunctions._
+
+
+    // openMP/cpu time, including copying:
+    {
+      ms = System.currentTimeMillis()
+      val ompA = toVclCmpMatrixAlt(mxSr, memCtx)
+      val ompB = toVclDenseRM(mxDn, memCtx)
+      val ompC = new DenseRowMatrix(prod(ompA, ompB))
+      val mxC = fromVclDenseRM(ompC)
+      ms = System.currentTimeMillis() - ms
+      info(s"ViennaCL/cpu/OpenMP multiplication time: $ms ms.")
+
+      ompA.close()
+      ompB.close()
+      ompC.close()
+    }
+
+  }
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/linux-haswell.properties
----------------------------------------------------------------------
diff --git a/viennacl/linux-haswell.properties b/viennacl/linux-haswell.properties
new file mode 100644
index 0000000..52d5cec
--- /dev/null
+++ b/viennacl/linux-haswell.properties
@@ -0,0 +1,28 @@
+platform=linux-haswell
+platform.path.separator=:
+platform.source.suffix=.cpp
+platform.includepath.prefix=-I
+platform.includepath=
+platform.compiler=g++
+platform.compiler.cpp11=-std=c++11
+platform.compiler.default=
+platform.compiler.fastfpu=-msse3 -ffast-math
+platform.compiler.viennacl=-fopenmp -fpermissive
+platform.compiler.nodeprecated=-Wno-deprecated-declarations
+#build for haswell arch with for GCC >= 4.9.0
+platform.compiler.output=-Wl,-rpath,$ORIGIN/ -Wl,-z,noexecstack -Wl,-Bsymbolic -march=haswell -m64 -Wall -O3 -fPIC -shared -s -o\u0020
+#for GCC < 4.9.0 use -march=core-avx2 for haswell arch
+#platform.compiler.output=-Wl,-rpath,$ORIGIN/ -Wl,-z,noexecstack -Wl,-Bsymbolic -march=core-avx2 -m64 -Wall -Ofast -fPIC -shared -s -o\u0020
+#build for native:
+#platform.compiler.output=-Wl,-rpath,$ORIGIN/ -Wl,-z,noexecstack -Wl,-Bsymbolic -march=native -m64 -Wall -Ofast -fPIC -shared -s -o\u0020
+platform.linkpath.prefix=-L
+platform.linkpath.prefix2=-Wl,-rpath,
+platform.linkpath=
+platform.link.prefix=-l
+platform.link.suffix=
+platform.link=
+platform.framework.prefix=-F
+platform.framework.suffix=
+platform.framework=
+platform.library.prefix=lib
+platform.library.suffix=.so

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/linux-x86_64-viennacl.properties
----------------------------------------------------------------------
diff --git a/viennacl/linux-x86_64-viennacl.properties b/viennacl/linux-x86_64-viennacl.properties
new file mode 100644
index 0000000..e5de1fa
--- /dev/null
+++ b/viennacl/linux-x86_64-viennacl.properties
@@ -0,0 +1,24 @@
+platform=linux-x86_64
+platform.path.separator=:
+platform.source.suffix=.cpp
+platform.includepath.prefix=-I
+platform.includepath=
+platform.compiler=g++
+platform.compiler.cpp11=-std=c++11
+platform.compiler.default=
+platform.compiler.fastfpu=-msse3 -ffast-math
+platform.compiler.viennacl=-fopenmp -fpermissive
+platform.compiler.nodeprecated=-Wno-deprecated-declarations
+# platform.compiler.output=-Wl,-rpath,$ORIGIN/ -Wl,-z,noexecstack -Wl,-Bsymbolic -march=x86-64 -m64 -Wall -O3 -fPIC -shared -s -o\u0020
+platform.compiler.output=-Wl,-rpath,$ORIGIN/ -Wl,-z,noexecstack -Wl,-Bsymbolic -march=native -m64 -Wall -Ofast -fPIC -shared -s -o\u0020
+platform.linkpath.prefix=-L
+platform.linkpath.prefix2=-Wl,-rpath,
+platform.linkpath=
+platform.link.prefix=-l
+platform.link.suffix=
+platform.link=
+platform.framework.prefix=-F
+platform.framework.suffix=
+platform.framework=
+platform.library.prefix=lib
+platform.library.suffix=.so

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/pom.xml
----------------------------------------------------------------------
diff --git a/viennacl/pom.xml b/viennacl/pom.xml
new file mode 100644
index 0000000..bd543f3
--- /dev/null
+++ b/viennacl/pom.xml
@@ -0,0 +1,271 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.mahout</groupId>
+    <artifactId>mahout</artifactId>
+    <version>0.13.0-SNAPSHOT</version>
+    <relativePath>../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>mahout-native-viennacl_${scala.compat.version}</artifactId>
+
+  <name>Mahout Native VienniaCL OpenCL Bindings</name>
+  <description>Native Structures and interfaces to be used from Mahout math-scala.
+  </description>
+
+  <packaging>jar</packaging>
+
+  <build>
+    <plugins>
+      <!-- create test jar so other modules can reuse the native test utility classes. -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <phase>package</phase>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <artifactId>maven-javadoc-plugin</artifactId>
+      </plugin>
+
+      <plugin>
+        <artifactId>maven-source-plugin</artifactId>
+      </plugin>
+
+      <plugin>
+        <groupId>net.alchim31.maven</groupId>
+        <artifactId>scala-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>add-scala-sources</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>scala-compile</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>scala-test-compile</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>testCompile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+
+      <!--this is what scalatest recommends to do to enable scala tests -->
+
+      <!-- disable surefire -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <configuration>
+          <skipTests>true</skipTests>
+        </configuration>
+      </plugin>
+      <!-- enable scalatest -->
+      <plugin>
+        <groupId>org.scalatest</groupId>
+        <artifactId>scalatest-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>test</id>
+            <goals>
+              <goal>test</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+            <argLine>-Xmx4g</argLine>
+        </configuration>
+      </plugin>
+
+
+      <!--JavaCPP native build plugin-->
+      <!--  old-style way to get it to compile. -->
+      <!--based on https://github.com/bytedeco/javacpp/wiki/Maven-->
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2.1</version>
+        <executions>
+          <execution>
+            <id>javacpp</id>
+            <phase>process-classes</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <environmentVariables>
+                <LD_LIBRARY_PATH>{project.basedir}/target/classes/org/apache/mahout/javacpp/linalg/linux-x86_64/
+                </LD_LIBRARY_PATH>
+              </environmentVariables>
+              <executable>java</executable>
+              <arguments>
+                <argument>-jar</argument>
+                <argument>${org.bytedeco:javacpp:jar}</argument>
+                <argument>-propertyfile</argument>
+                <argument>linux-x86_64-viennacl.properties</argument>
+                <argument>-classpath</argument>
+                <argument>${project.build.outputDirectory}:${org.scala-lang:scala-library:jar}</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.CompressedMatrix</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.Context</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.MatrixBase</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.DenseRowMatrix</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.DenseColumnMatrix</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.MatMatProdExpression</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.ProdExpression</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.SrMatDnMatProdExpression</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.MatrixTransExpression</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.LinalgFunctions</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.Functions</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.VectorBase</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.VCLVector</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.VecMultExpression</argument>
+                <argument>org.apache.mahout.viennacl.opencl.javacpp.MemHandle</argument>
+                <argument>org.apache.mahout.viennacl.opencl.GPUMMul</argument>
+                <argument>org.apache.mahout.viennacl.opencl.GPUMMul$</argument>
+              </arguments>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.3</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>properties</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.2.1</version>
+      </plugin>
+
+    </plugins>
+
+  </build>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>${project.groupId}</groupId>
+      <artifactId>mahout-math-scala_${scala.compat.version}</artifactId>
+    </dependency>
+
+    <!--  3rd-party -->
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+
+    <!-- scala stuff -->
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.compat.version}</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.bytedeco</groupId>
+      <artifactId>javacpp</artifactId>
+      <version>1.2.4</version>
+    </dependency>
+
+  </dependencies>
+
+
+  <profiles>
+    <profile>
+      <id>mahout-release</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>net.alchim31.maven</groupId>
+            <artifactId>scala-maven-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>generate-scaladoc</id>
+                <goals>
+                  <goal>doc</goal>
+                </goals>
+              </execution>
+              <execution>
+                <id>attach-scaladoc-jar</id>
+                <goals>
+                  <goal>doc-jar</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>travis</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-surefire-plugin</artifactId>
+            <configuration>
+              <!-- Limit memory for unit tests in Travis -->
+              <argLine>-Xmx4g</argLine>
+              <!--<argLine>-Djava.library.path=${project.build.directory}/libs/natives/linux-x86_64:${project.build.directory}/libs/natives/linux:${project.build.directory}/libs/natives/maxosx</argLine>-->
+            </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <configuration>
+              <!-- Limit memory for integration tests in Travis -->
+              <argLine>-Xmx4g</argLine>
+              <!--<argLine>-Djava.library.path=${project.build.directory}/libs/natives/linux-x86_64:${project.build.directory}/libs/natives/linux:${project.build.directory}/libs/natives/maxosx</argLine>-->
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/Functions.java
----------------------------------------------------------------------
diff --git a/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/Functions.java b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/Functions.java
new file mode 100644
index 0000000..1c14f97
--- /dev/null
+++ b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/Functions.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mahout.viennacl.opencl.javacpp;
+
+import org.bytedeco.javacpp.BytePointer;
+import org.bytedeco.javacpp.DoublePointer;
+import org.bytedeco.javacpp.IntPointer;
+import org.bytedeco.javacpp.annotation.*;
+
+import java.nio.DoubleBuffer;
+import java.nio.IntBuffer;
+
+
+@Properties(inherit = Context.class,
+        value = @Platform(
+                library = "jniViennaCL"
+        )
+)
+@Namespace("viennacl")
+public final class Functions {
+
+    private Functions() {
+    }
+
+    // This is (imo) an inconsistency in Vienna cl: almost all operations require MatrixBase, and
+    // fast_copy require type `matrix`, i.e., one of DenseRowMatrix or DenseColumnMatrix.
+    @Name("fast_copy")
+    public static native void fastCopy(DoublePointer srcBegin, DoublePointer srcEnd, @ByRef DenseRowMatrix dst);
+
+    @Name("fast_copy")
+    public static native void fastCopy(DoublePointer srcBegin, DoublePointer srcEnd, @ByRef DenseColumnMatrix dst);
+
+    @Name("fast_copy")
+    public static native void fastCopy(@ByRef DenseRowMatrix src, DoublePointer dst);
+
+    @Name("fast_copy")
+    public static native void fastCopy(@ByRef DenseColumnMatrix src, DoublePointer dst);
+
+    @Name("fast_copy")
+    public static native void fastCopy(@Const @ByRef VectorBase dst, @Const @ByRef VCLVector src);
+
+    @Name("fast_copy")
+    public static native void fastCopy(@Const @ByRef VCLVector src, @Const @ByRef VectorBase dst);
+
+
+    @ByVal
+    public static native MatrixTransExpression trans(@ByRef MatrixBase src);
+
+    @Name("backend::memory_read")
+    public static native void memoryReadInt(@Const @ByRef MemHandle src_buffer,
+                                  int bytes_to_read,
+                                  int offset,
+                                  IntPointer ptr,
+                                  boolean async);
+
+    @Name("backend::memory_read")
+    public static native void memoryReadDouble(@Const @ByRef MemHandle src_buffer,
+                                            int bytes_to_read,
+                                            int offset,
+                                            DoublePointer ptr,
+                                            boolean async);
+
+    @Name("backend::memory_read")
+    public static native void memoryReadInt(@Const @ByRef MemHandle src_buffer,
+                                            int bytes_to_read,
+                                            int offset,
+                                            IntBuffer ptr,
+                                            boolean async);
+
+    @Name("backend::memory_read")
+    public static native void memoryReadDouble(@Const @ByRef MemHandle src_buffer,
+                                               int bytes_to_read,
+                                               int offset,
+                                               DoubleBuffer ptr,
+                                               boolean async);
+
+    @Name("backend::memory_read")
+    public static native void memoryReadBytes(@Const @ByRef MemHandle src_buffer,
+                                              int bytes_to_read,
+                                              int offset,
+                                              BytePointer ptr,
+                                              boolean async);
+
+
+    static {
+        Context.loadLib();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/LinalgFunctions.java
----------------------------------------------------------------------
diff --git a/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/LinalgFunctions.java b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/LinalgFunctions.java
new file mode 100644
index 0000000..9540691
--- /dev/null
+++ b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/LinalgFunctions.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mahout.viennacl.opencl.javacpp;
+
+import org.bytedeco.javacpp.annotation.*;
+
+
+@Properties(inherit = Context.class,
+        value = @Platform(
+                library = "jniViennaCL"
+        )
+)
+@Namespace("viennacl::linalg")
+public final class LinalgFunctions {
+
+    private LinalgFunctions() {
+    }
+
+    static {
+        Context.loadLib();
+    }
+
+
+    @ByVal
+    public static native MatMatProdExpression prod(@Const @ByRef MatrixBase a,
+                                                   @Const @ByRef MatrixBase b);
+
+    @ByVal
+    public static native ProdExpression prod(@Const @ByRef CompressedMatrix a,
+                                             @Const @ByRef CompressedMatrix b);
+
+    @ByVal
+    public static native MatVecProdExpression prod(@Const @ByRef MatrixBase a,
+                                                   @Const @ByRef VectorBase b);
+
+    @ByVal
+    public static native SrMatDnMatProdExpression prod(@Const @ByRef CompressedMatrix spMx,
+                                                       @Const @ByRef MatrixBase dMx);
+    @ByVal
+    @Name("prod")
+    public static native DenseColumnMatrix prodCm(@Const @ByRef MatrixBase a,
+                                                  @Const @ByRef MatrixBase b);
+    @ByVal
+    @Name("prod")
+    public static native DenseRowMatrix prodRm(@Const @ByRef MatrixBase a,
+                                               @Const @ByRef MatrixBase b);
+
+    @ByVal
+    @Name("prod")
+    public static native DenseRowMatrix prodRm(@Const @ByRef CompressedMatrix spMx,
+                                               @Const @ByRef MatrixBase dMx);
+
+
+//    @ByVal
+//    public static native MatrixProdExpression prod(@Const @ByRef DenseRowMatrix a,
+//                                                   @Const @ByRef DenseRowMatrix b);
+//
+//    @ByVal
+//    public static native MatrixProdExpression prod(@Const @ByRef DenseRowMatrix a,
+//                                                   @Const @ByRef DenseColumnMatrix b);
+//
+//    @ByVal
+//    public static native MatrixProdExpression prod(@Const @ByRef DenseColumnMatrix a,
+//                                                   @Const @ByRef DenseRowMatrix b);
+//
+//    @ByVal
+//    public static native MatrixProdExpression prod(@Const @ByRef DenseColumnMatrix a,
+//                                                   @Const @ByRef DenseColumnMatrix b);
+
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/MatrixTransExpression.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/MatrixTransExpression.scala b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/MatrixTransExpression.scala
new file mode 100644
index 0000000..115af05
--- /dev/null
+++ b/viennacl/src/main/java/org/apache/mahout/viennacl/opencl/javacpp/MatrixTransExpression.scala
@@ -0,0 +1,34 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp;
+
+import org.bytedeco.javacpp.Pointer
+import org.bytedeco.javacpp.annotation.{Name, Namespace, Platform, Properties}
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    include = Array("matrix.hpp"),
+    library = "jniViennaCL")
+  ))
+@Namespace("viennacl")
+@Name(Array("matrix_expression<const viennacl::matrix_base<double>, " +
+  "const viennacl::matrix_base<double>, " +
+  "viennacl::op_trans>"))
+class MatrixTransExpression extends Pointer {
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/GPUMMul.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/GPUMMul.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/GPUMMul.scala
new file mode 100644
index 0000000..936448d
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/GPUMMul.scala
@@ -0,0 +1,455 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.mahout.viennacl.opencl
+
+import org.apache.mahout.logging._
+import org.apache.mahout.math
+import org.apache.mahout.math._
+import org.apache.mahout.math.backend.incore.MMulSolver
+import org.apache.mahout.math.flavor.{BackEnum, TraversingStructureEnum}
+import org.apache.mahout.math.function.Functions
+import org.apache.mahout.math.scalabindings.RLikeOps._
+import org.apache.mahout.math.scalabindings._
+import org.apache.mahout.viennacl.opencl.javacpp.Functions._
+import org.apache.mahout.viennacl.opencl.javacpp.LinalgFunctions._
+import org.apache.mahout.viennacl.opencl.javacpp.{CompressedMatrix, Context, DenseRowMatrix}
+
+import scala.collection.JavaConversions._
+object GPUMMul extends MMBinaryFunc {
+
+  private final implicit val log = getLog(GPUMMul.getClass)
+
+  override def apply(a: Matrix, b: Matrix, r: Option[Matrix]): Matrix = {
+
+    require(a.ncol == b.nrow, "Incompatible matrix sizes in matrix multiplication.")
+
+    val (af, bf) = (a.getFlavor, b.getFlavor)
+    val backs = (af.getBacking, bf.getBacking)
+    val sd = (af.getStructure, math.scalabindings.densityAnalysis(a), bf.getStructure, densityAnalysis(b))
+
+
+    try {
+
+      val alg: MMulAlg = backs match {
+
+        // Both operands are jvm memory backs.
+        case (BackEnum.JVMMEM, BackEnum.JVMMEM) \u21d2
+
+          sd match {
+
+            // Multiplication cases by a diagonal matrix.
+            case (TraversingStructureEnum.VECTORBACKED, _, TraversingStructureEnum.COLWISE, _)
+              if a.isInstanceOf[DiagonalMatrix] \u21d2 jvmDiagCW
+            case (TraversingStructureEnum.VECTORBACKED, _, TraversingStructureEnum.SPARSECOLWISE, _)
+              if a.isInstanceOf[DiagonalMatrix] \u21d2 jvmDiagCW
+            case (TraversingStructureEnum.VECTORBACKED, _, TraversingStructureEnum.ROWWISE, _)
+              if a.isInstanceOf[DiagonalMatrix] \u21d2 jvmDiagRW
+            case (TraversingStructureEnum.VECTORBACKED, _, TraversingStructureEnum.SPARSEROWWISE, _)
+              if a.isInstanceOf[DiagonalMatrix] \u21d2 jvmDiagRW
+
+            case (TraversingStructureEnum.COLWISE, _, TraversingStructureEnum.VECTORBACKED, _)
+              if b.isInstanceOf[DiagonalMatrix] \u21d2 jvmCWDiag
+            case (TraversingStructureEnum.SPARSECOLWISE, _, TraversingStructureEnum.VECTORBACKED, _)
+              if b.isInstanceOf[DiagonalMatrix] \u21d2 jvmCWDiag
+            case (TraversingStructureEnum.ROWWISE, _, TraversingStructureEnum.VECTORBACKED, _)
+              if b.isInstanceOf[DiagonalMatrix] \u21d2 jvmRWDiag
+            case (TraversingStructureEnum.SPARSEROWWISE, _, TraversingStructureEnum.VECTORBACKED, _)
+              if b.isInstanceOf[DiagonalMatrix] \u21d2 jvmRWDiag
+
+            // Dense-dense cases
+            case (TraversingStructureEnum.ROWWISE, true, TraversingStructureEnum.COLWISE, true) if a eq b.t \u21d2 gpuDRWAAt
+            case (TraversingStructureEnum.ROWWISE, true, TraversingStructureEnum.COLWISE, true) if a.t eq b \u21d2 gpuDRWAAt
+            case (TraversingStructureEnum.ROWWISE, true, TraversingStructureEnum.COLWISE, true) \u21d2 gpuRWCW
+            case (TraversingStructureEnum.ROWWISE, true, TraversingStructureEnum.ROWWISE, true) \u21d2 jvmRWRW
+            case (TraversingStructureEnum.COLWISE, true, TraversingStructureEnum.COLWISE, true) \u21d2 jvmCWCW
+            case (TraversingStructureEnum.COLWISE, true, TraversingStructureEnum.ROWWISE, true) if a eq b.t \u21d2 jvmDCWAAt
+            case (TraversingStructureEnum.COLWISE, true, TraversingStructureEnum.ROWWISE, true) if a.t eq b \u21d2 jvmDCWAAt
+            case (TraversingStructureEnum.COLWISE, true, TraversingStructureEnum.ROWWISE, true) \u21d2 jvmCWRW
+
+            // Sparse row matrix x sparse row matrix (array of vectors)
+            case (TraversingStructureEnum.ROWWISE, false, TraversingStructureEnum.ROWWISE, false) \u21d2 gpuSparseRWRW
+            case (TraversingStructureEnum.ROWWISE, false, TraversingStructureEnum.COLWISE, false) \u21d2 jvmSparseRWCW
+            case (TraversingStructureEnum.COLWISE, false, TraversingStructureEnum.ROWWISE, false) \u21d2 jvmSparseCWRW
+            case (TraversingStructureEnum.COLWISE, false, TraversingStructureEnum.COLWISE, false) \u21d2 jvmSparseCWCW
+
+            // Sparse matrix x sparse matrix (hashtable of vectors)
+            case (TraversingStructureEnum.SPARSEROWWISE, false, TraversingStructureEnum.SPARSEROWWISE, false) \u21d2
+              gpuSparseRowRWRW
+            case (TraversingStructureEnum.SPARSEROWWISE, false, TraversingStructureEnum.SPARSECOLWISE, false) \u21d2
+              jvmSparseRowRWCW
+            case (TraversingStructureEnum.SPARSECOLWISE, false, TraversingStructureEnum.SPARSEROWWISE, false) \u21d2
+              jvmSparseRowCWRW
+            case (TraversingStructureEnum.SPARSECOLWISE, false, TraversingStructureEnum.SPARSECOLWISE, false) \u21d2
+              jvmSparseRowCWCW
+
+            // Sparse matrix x non-like
+            case (TraversingStructureEnum.SPARSEROWWISE, false, TraversingStructureEnum.ROWWISE, _) \u21d2 gpuSparseRowRWRW
+            case (TraversingStructureEnum.SPARSEROWWISE, false, TraversingStructureEnum.COLWISE, _) \u21d2 jvmSparseRowRWCW
+            case (TraversingStructureEnum.SPARSECOLWISE, false, TraversingStructureEnum.ROWWISE, _) \u21d2 jvmSparseRowCWRW
+            case (TraversingStructureEnum.SPARSECOLWISE, false, TraversingStructureEnum.COLWISE, _) \u21d2 jvmSparseCWCW
+            case (TraversingStructureEnum.ROWWISE, _, TraversingStructureEnum.SPARSEROWWISE, false) \u21d2 gpuSparseRWRW
+            case (TraversingStructureEnum.ROWWISE, _, TraversingStructureEnum.SPARSECOLWISE, false) \u21d2 jvmSparseRWCW
+            case (TraversingStructureEnum.COLWISE, _, TraversingStructureEnum.SPARSEROWWISE, false) \u21d2 jvmSparseCWRW
+            case (TraversingStructureEnum.COLWISE, _, TraversingStructureEnum.SPARSECOLWISE, false) \u21d2 jvmSparseRowCWCW
+
+            // Everything else including at least one sparse LHS or RHS argument
+            case (TraversingStructureEnum.ROWWISE, false, TraversingStructureEnum.ROWWISE, _) \u21d2 gpuSparseRWRW
+            case (TraversingStructureEnum.ROWWISE, false, TraversingStructureEnum.COLWISE, _) \u21d2 jvmSparseRWCW
+            case (TraversingStructureEnum.COLWISE, false, TraversingStructureEnum.ROWWISE, _) \u21d2 jvmSparseCWRW
+            case (TraversingStructureEnum.COLWISE, false, TraversingStructureEnum.COLWISE, _) \u21d2 jvmSparseCWCW2flips
+
+            // Sparse methods are only effective if the first argument is sparse, so we need to do a swap.
+            case (_, _, _, false) \u21d2 (a, b, r) \u21d2 apply(b.t, a.t, r.map {
+              _.t
+            }).t
+
+            // Default jvm-jvm case.
+            // for some reason a SrarseRowMatrix DRM %*% SrarseRowMatrix DRM was dumping off to here
+            case _ \u21d2 gpuRWCW
+          }
+      }
+
+      alg(a, b, r)
+    } catch {
+      // TODO FASTHACK:  just revert to JVM if there is an exception..
+      //  eg. java.lang.nullPointerException if more openCL contexts
+      //  have been created than number of GPU cards.
+      //  better option wuold be to fall back to OpenCl First.
+      case ex: Exception =>
+        println(ex.getMessage + "falling back to JVM MMUL")
+        return MMul(a, b, r)
+    }
+  }
+
+  type MMulAlg = MMBinaryFunc
+
+  @inline
+  private def gpuRWCW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("gpuRWCW")
+//
+//    require(r.forall(mxR \u21d2 mxR.nrow == a.nrow && mxR.ncol == b.ncol))
+//    val (m, n) = (a.nrow, b.ncol)
+//
+//    val mxR = r.getOrElse(if (densityAnalysis(a)) a.like(m, n) else b.like(m, n))
+//
+//    for (row \u2190 0 until mxR.nrow; col \u2190 0 until mxR.ncol) {
+//      // this vector-vector should be sort of optimized, right?
+//      mxR(row, col) = a(row, ::) dot b(::, col)
+//    }
+//    mxR
+
+    val hasElementsA = a.zSum() >  0.0
+    val hasElementsB = b.zSum() >  0.0
+
+    // A has a sparse matrix structure of unknown size.  We do not want to
+    // simply convert it to a Dense Matrix which may result in an OOM error.
+
+    // If it is empty use JVM MMul, since we can not convert it to a VCL CSR Matrix.
+    if (!hasElementsA)  {
+      println("Matrix a has zero elements can not convert to CSR")
+      return MMul(a, b, r)
+    }
+
+    // CSR matrices are efficient up to 50% non-zero
+    if(b.getFlavor.isDense) {
+      var ms = System.currentTimeMillis()
+      val oclCtx = new Context(Context.OPENCL_MEMORY)
+      val oclA = toVclCmpMatrixAlt(a, oclCtx)
+      val oclB = toVclDenseRM(b, oclCtx)
+      val oclC = new DenseRowMatrix(prod(oclA, oclB))
+      val mxC = fromVclDenseRM(oclC)
+      ms = System.currentTimeMillis() - ms
+      debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+      oclA.close()
+      oclB.close()
+      oclC.close()
+
+      mxC
+    } else {
+      // Fall back to JVM based MMul if either matrix is sparse and empty
+      if (!hasElementsA || !hasElementsB)  {
+        println("Matrix a or b has zero elements can not convert to CSR")
+        return MMul(a, b, r)
+      }
+
+      var ms = System.currentTimeMillis()
+      val oclCtx = new Context(Context.OPENCL_MEMORY)
+      val oclA = toVclCmpMatrixAlt(a, oclCtx)
+      val oclB = toVclCmpMatrixAlt(b, oclCtx)
+      val oclC = new CompressedMatrix(prod(oclA, oclB))
+      val mxC = fromVclCompressedMatrix(oclC)
+      ms = System.currentTimeMillis() - ms
+      debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+      oclA.close()
+      oclB.close()
+      oclC.close()
+
+      mxC
+    }
+  }
+
+
+  @inline
+  private def jvmRWRW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("jvmRWRW")
+    // A bit hackish: currently, this relies a bit on the fact that like produces RW(?)
+    val bclone = b.like(b.ncol, b.nrow).t
+    for (brow \u2190 b) bclone(brow.index(), ::) := brow
+
+    require(bclone.getFlavor.getStructure == TraversingStructureEnum.COLWISE || bclone.getFlavor.getStructure ==
+      TraversingStructureEnum.SPARSECOLWISE, "COL wise conversion assumption of RHS is wrong, do over this code.")
+
+    gpuRWCW(a, bclone, r)
+  }
+
+  private def jvmCWCW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("jvmCWCW")
+    jvmRWRW(b.t, a.t, r.map(_.t)).t
+  }
+
+  private def jvmCWRW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("jvmCWRW")
+    // This is a primary contender with Outer Prod sum algo.
+    // Here, we force-reorient both matrices and run RWCW.
+    // A bit hackish: currently, this relies a bit on the fact that clone always produces RW(?)
+    val aclone = a.cloned
+
+    require(aclone.getFlavor.getStructure == TraversingStructureEnum.ROWWISE || aclone.getFlavor.getStructure ==
+      TraversingStructureEnum.SPARSEROWWISE, "Row wise conversion assumption of RHS is wrong, do over this code.")
+
+    jvmRWRW(aclone, b, r)
+  }
+
+  // left is Sparse right is any
+  private def gpuSparseRWRW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("gpuSparseRWRW")
+    val mxR = r.getOrElse(b.like(a.nrow, b.ncol))
+
+
+//    // This is basically almost the algorithm from SparseMatrix.times
+//    for (arow \u2190 a; ael \u2190 arow.nonZeroes)
+//      mxR(arow.index(), ::).assign(b(ael.index, ::), Functions.plusMult(ael))
+//
+//    mxR
+
+    // make sure that the matrix is not empty.  VCL {{compressed_matrix}}s must
+    // hav nnz > 0
+    // this method is horribly inefficent.  however there is a difference between
+    // getNumNonDefaultElements() and getNumNonZeroElements() which we do not always
+    // have access to  created MAHOUT-1882 for this
+    val hasElementsA = a.zSum() >  0.0
+    val hasElementsB = b.zSum() >  0.0
+
+    // A has a sparse matrix structure of unknown size.  We do not want to
+    // simply convert it to a Dense Matrix which may result in an OOM error.
+    // If it is empty use JVM MMul, since we can not convert it to a VCL CSR Matrix.
+    if (!hasElementsA)  {
+     println("Matrix a has zero elements can not convert to CSR")
+     return MMul(a, b, r)
+    }
+
+    // CSR matrices are efficient up to 50% non-zero
+    if(b.getFlavor.isDense) {
+      var ms = System.currentTimeMillis()
+      val oclCtx = new Context(Context.OPENCL_MEMORY)
+      val oclA = toVclCmpMatrixAlt(a, oclCtx)
+      val oclB = toVclDenseRM(b, oclCtx)
+      val oclC = new DenseRowMatrix(prod(oclA, oclB))
+      val mxC = fromVclDenseRM(oclC)
+      ms = System.currentTimeMillis() - ms
+      debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+      oclA.close()
+      oclB.close()
+      oclC.close()
+
+      mxC
+    } else {
+      // Fall back to JVM based MMul if either matrix is sparse and empty
+      if (!hasElementsA || !hasElementsB)  {
+        println("Matrix a or b has zero elements can not convert to CSR")
+        return MMul(a, b, r)
+      }
+
+      var ms = System.currentTimeMillis()
+      val oclCtx = new Context(Context.OPENCL_MEMORY)
+      val oclA = toVclCmpMatrixAlt(a, oclCtx)
+      val oclB = toVclCmpMatrixAlt(b, oclCtx)
+      val oclC = new CompressedMatrix(prod(oclA, oclB))
+      val mxC = fromVclCompressedMatrix(oclC)
+      ms = System.currentTimeMillis() - ms
+      debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+      oclA.close()
+      oclB.close()
+      oclC.close()
+
+      mxC
+    }
+
+  }
+
+  //sparse %*% dense
+  private def gpuSparseRowRWRW(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("gpuSparseRowRWRW")
+    val hasElementsA = a.zSum() >  0
+
+    // A has a sparse matrix structure of unknown size.  We do not want to
+    // simply convert it to a Dense Matrix which may result in an OOM error.
+    // If it is empty fall back to  JVM MMul, since we can not convert it
+    // to a VCL CSR Matrix.
+    if (!hasElementsA)  {
+      println("Matrix a has zero elements can not convert to CSR")
+      return MMul(a, b, r)
+    }
+
+    var ms = System.currentTimeMillis()
+    val oclCtx = new Context(Context.OPENCL_MEMORY)
+    val oclA = toVclCmpMatrixAlt(a, oclCtx)
+    val oclB = toVclDenseRM(b, oclCtx)
+    val oclC = new DenseRowMatrix(prod(oclA, oclB))
+    val mxC = fromVclDenseRM(oclC)
+    ms = System.currentTimeMillis() - ms
+    debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+    oclA.close()
+    oclB.close()
+    oclC.close()
+
+    mxC
+  }
+
+  private def jvmSparseRowCWCW(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRowRWRW(b.t, a.t, r.map(_.t)).t
+
+  private def jvmSparseRowCWCW2flips(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRowRWRW(a cloned, b cloned, r)
+
+  private def jvmSparseRowRWCW(a: Matrix, b: Matrix, r: Option[Matrix]) =
+    gpuSparseRowRWRW(a, b cloned, r)
+
+
+  private def jvmSparseRowCWRW(a: Matrix, b: Matrix, r: Option[Matrix]) =
+    gpuSparseRowRWRW(a cloned, b, r)
+
+  private def jvmSparseRWCW(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRWRW(a, b.cloned, r)
+
+  private def jvmSparseCWRW(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRWRW(a cloned, b, r)
+
+  private def jvmSparseCWCW(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRWRW(b.t, a.t, r.map(_.t)).t
+
+  private def jvmSparseCWCW2flips(a: Matrix, b: Matrix, r: Option[Matrix] = None) =
+    gpuSparseRWRW(a cloned, b cloned, r)
+
+  private def jvmDiagRW(diagm:Matrix, b:Matrix, r:Option[Matrix] = None):Matrix = {
+    println("jvmDiagRW")
+    val mxR = r.getOrElse(b.like(diagm.nrow, b.ncol))
+
+    for (del \u2190 diagm.diagv.nonZeroes())
+      mxR(del.index, ::).assign(b(del.index, ::), Functions.plusMult(del))
+
+    mxR
+  }
+
+  private def jvmDiagCW(diagm: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("jvmDiagCW")
+    val mxR = r.getOrElse(b.like(diagm.nrow, b.ncol))
+    for (bcol \u2190 b.t) mxR(::, bcol.index()) := bcol * diagm.diagv
+    mxR
+  }
+
+  private def jvmCWDiag(a: Matrix, diagm: Matrix, r: Option[Matrix] = None) =
+    jvmDiagRW(diagm, a.t, r.map {_.t}).t
+
+  private def jvmRWDiag(a: Matrix, diagm: Matrix, r: Option[Matrix] = None) =
+    jvmDiagCW(diagm, a.t, r.map {_.t}).t
+
+
+  /** Dense column-wise AA' */
+  private def jvmDCWAAt(a:Matrix, b:Matrix, r:Option[Matrix] = None) = {
+    // a.t must be equiv. to b. Cloning must rewrite to row-wise.
+    gpuDRWAAt(a.cloned,null,r)
+  }
+
+  /** Dense Row-wise AA' */
+  // we probably will not want to use this for the actual release unless A is cached already
+  // but adding for testing purposes.
+  private def gpuDRWAAt(a:Matrix, b:Matrix, r:Option[Matrix] = None) = {
+    // a.t must be equiv to b.
+    println("executing on gpu")
+    debug("AAt computation detected; passing off to GPU")
+
+    // Check dimensions if result is supplied.
+    require(r.forall(mxR \u21d2 mxR.nrow == a.nrow && mxR.ncol == a.nrow))
+
+    val mxR = r.getOrElse(a.like(a.nrow, a.nrow))
+
+    var ms = System.currentTimeMillis()
+    val oclCtx = new Context(Context.OPENCL_MEMORY)
+    val oclA = toVclDenseRM(src = a, oclCtx)
+    val oclAt = new DenseRowMatrix(trans(oclA))
+    val oclC = new DenseRowMatrix(prod(oclA, oclAt))
+
+    val mxC = fromVclDenseRM(oclC)
+    ms = System.currentTimeMillis() - ms
+    debug(s"ViennaCL/OpenCL multiplication time: $ms ms.")
+
+    oclA.close()
+    //oclApr.close()
+    oclAt.close()
+    oclC.close()
+
+    mxC
+
+  }
+
+  private def jvmOuterProdSum(a: Matrix, b: Matrix, r: Option[Matrix] = None): Matrix = {
+    println("jvmOuterProdSum")
+    // This may be already laid out for outer product computation, which may be faster than reorienting
+    // both matrices? need to check.
+    val (m, n) = (a.nrow, b.ncol)
+
+    // Prefer col-wise result iff a is dense and b is sparse. In all other cases default to row-wise.
+    val preferColWiseR = a.getFlavor.isDense && !b.getFlavor.isDense
+
+    val mxR = r.getOrElse {
+      (a.getFlavor.isDense, preferColWiseR) match {
+        case (false, false) \u21d2 b.like(m, n)
+        case (false, true) \u21d2 b.like(n, m).t
+        case (true, false) \u21d2 a.like(m, n)
+        case (true, true) \u21d2 a.like(n, m).t
+      }
+    }
+
+    // Loop outer products
+    if (preferColWiseR) {
+      // this means B is sparse and A is not, so we need to iterate over b values and update R columns with +=
+      // one at a time.
+      for ((acol, brow) \u2190 a.t.zip(b); bel \u2190 brow.nonZeroes) mxR(::, bel.index()) += bel * acol
+    } else {
+      for ((acol, brow) \u2190 a.t.zip(b); ael \u2190 acol.nonZeroes()) mxR(ael.index(), ::) += ael * brow
+    }
+
+    mxR
+  }
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/CompressedMatrix.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/CompressedMatrix.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/CompressedMatrix.scala
new file mode 100644
index 0000000..5a84ac5
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/CompressedMatrix.scala
@@ -0,0 +1,125 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import java.nio._
+
+import org.bytedeco.javacpp._
+import org.bytedeco.javacpp.annotation._
+
+import scala.collection.mutable.ArrayBuffer
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    include = Array("compressed_matrix.hpp"),
+    library="jniViennaCL"
+  )))
+@Name(Array("viennacl::compressed_matrix<double>"))
+final class CompressedMatrix(defaultCtr: Boolean = true) extends Pointer {
+
+  protected val ptrs = new ArrayBuffer[Pointer]()
+
+  // call this after set or better TODO: yet wrap set() in a public method that will call this
+  def registerPointersForDeallocation(p:Pointer): Unit = {
+    ptrs += p
+  }
+
+  override def deallocate(deallocate: Boolean): Unit = {
+    super.deallocate(deallocate)
+     ptrs.foreach(_.close())
+  }
+
+  if (defaultCtr) allocate()
+
+  def this(nrow: Int, ncol: Int, ctx: Context = new Context) {
+    this(false)
+    allocate(nrow, ncol, ctx)
+  }
+
+  def this(nrow: Int, ncol: Int, nonzeros: Int, ctx: Context = new Context) {
+    this(false)
+    allocate(nrow, ncol, nonzeros, ctx)
+  }
+
+  def this(pe: ProdExpression) {
+    this(false)
+    allocate(pe)
+  }
+
+  @native protected def allocate()
+
+  @native protected def allocate(nrow: Int, ncol: Int, nonzeros: Int, @ByVal ctx: Context)
+
+  @native protected def allocate(nrow: Int, ncol: Int, @ByVal ctx: Context)
+
+  @native protected def allocate(@Const @ByRef pe: ProdExpression)
+
+//  @native protected def allocate(db: DoubleBuffer)
+//
+//  @native protected def allocate(ib: IntBuffer)
+
+  // Warning: apparently there are differences in bit interpretation between OpenCL and everything
+  // else for unsigned int type. So, for OpenCL backend, rowJumper and colIndices have to be packed
+  // with reference to that cl_uint type that Vienna-CL defines.
+  @native def set(@Cast(Array("const void*")) rowJumper: IntBuffer,
+                  @Cast(Array("const void*")) colIndices: IntBuffer,
+                  @Const elements: DoubleBuffer,
+                  nrow: Int,
+                  ncol: Int,
+                  nonzeros: Int
+                 )
+
+  /** With javacpp pointers. */
+  @native def set(@Cast(Array("const void*")) rowJumper: IntPointer,
+                  @Cast(Array("const void*")) colIndices: IntPointer,
+                  @Const elements: DoublePointer,
+                  nrow: Int,
+                  ncol: Int,
+                  nonzeros: Int
+                 )
+
+  @Name(Array("operator="))
+  @native def :=(@Const @ByRef pe: ProdExpression)
+
+  @native def generate_row_block_information()
+
+  /** getters for the compressed_matrix size */
+  //const vcl_size_t & size1() const { return rows_; }
+  @native def size1: Int
+  //const vcl_size_t & size2() const { return cols_; }
+  @native def size2: Int
+  //const vcl_size_t & nnz() const { return nonzeros_; }
+  @native def nnz: Int
+  //const vcl_size_t & blocks1() const { return row_block_num_; }
+ // @native def blocks1: Int
+
+  /** getters for the compressed_matrix buffers */
+  //const handle_type & handle1() const { return row_buffer_; }
+  @native @Const @ByRef def handle1: MemHandle
+  //const handle_type & handle2() const { return col_buffer_; }
+  @native @Const @ByRef def handle2: MemHandle
+  //const handle_type & handle3() const { return row_blocks_; }
+  @native @Const @ByRef def handle3: MemHandle
+  //const handle_type & handle() const { return elements_; }
+  @native @Const @ByRef def handle: MemHandle
+
+}
+
+object CompressedMatrix {
+  Context.loadLib()
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/Context.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/Context.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/Context.scala
new file mode 100644
index 0000000..770f87f
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/Context.scala
@@ -0,0 +1,73 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.{Loader, Pointer}
+import org.bytedeco.javacpp.annotation._
+
+/**
+  * This assumes viennacl 1.7.1 is installed, which in ubuntu Xenial defaults to
+  * /usr/include/viennacl, and is installed via
+  * {{{
+  *   sudo apt-get install libviennacl-dev
+  * }}}
+  *
+  * @param mtype
+  */
+@Properties(Array(
+  new Platform(
+    includepath = Array("/usr/include/viennacl"),
+    include = Array("matrix.hpp", "compressed_matrix.hpp"),
+    define = Array("VIENNACL_WITH_OPENCL", "VIENNACL_WITH_OPENMP"),
+    compiler = Array("fastfpu","viennacl"),
+    link = Array("OpenCL"),
+    library = "jniViennaCL"
+  )))
+@Namespace("viennacl")
+@Name(Array("context"))
+final class Context(mtype: Int = Context.MEMORY_NOT_INITIALIZED) extends Pointer {
+
+  import Context._
+
+  if (mtype == MEMORY_NOT_INITIALIZED)
+    allocate()
+  else
+    allocate(mtype)
+
+  @native protected def allocate()
+
+  @native protected def allocate(@Cast(Array("viennacl::memory_types")) mtype: Int)
+
+  @Name(Array("memory_type"))
+  @Cast(Array("int"))
+  @native def memoryType: Int
+
+}
+
+object Context {
+
+  def loadLib() = Loader.load(classOf[Context])
+
+  loadLib()
+
+  /* Memory types. Ported from VCL header files. */
+  val MEMORY_NOT_INITIALIZED = 0
+  val MAIN_MEMORY = 1
+  val OPENCL_MEMORY = 2
+  val CUDA_MEMORY = 3
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseColumnMatrix.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseColumnMatrix.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseColumnMatrix.scala
new file mode 100644
index 0000000..7b268e3
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseColumnMatrix.scala
@@ -0,0 +1,83 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.{DoublePointer, Pointer}
+import org.bytedeco.javacpp.annotation._
+
+/**
+  * ViennaCL dense matrix, column-major. This is an exact duplication of [[DenseRowMatrix]], and
+  * is only different in the materialized C++ template name. Unfortunately I so far have not figured
+  * out how to handle it with.
+  *
+  * Also, the [[Platform.library]] does not get inherited for some reason, and we really want to
+  * collect all class mappings in the same one libjni.so, so we have to repeat this `library` defi-
+  * nition in every mapped class in this package. (One .so per package convention).
+  */
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform (
+    include=Array("matrix.hpp"),
+    library="jniViennaCL"
+  )))
+@Name(Array("viennacl::matrix<double,viennacl::column_major>"))
+final class DenseColumnMatrix(initDefault:Boolean = true) extends MatrixBase {
+
+  def this(nrow: Int, ncol: Int, ctx: Context = new Context()) {
+    this(false)
+    allocate(nrow, ncol, ctx)
+  }
+
+  def this(data: DoublePointer, nrow: Int, ncol: Int, ctx: Context = new Context(Context.MAIN_MEMORY)) {
+    this(false)
+    allocate(data, ctx.memoryType, nrow, ncol)
+    // We save it to deallocate it ad deallocation time.
+    ptrs += data
+  }
+
+  def this(me: MatMatProdExpression) {
+    this(false)
+    allocate(me)
+  }
+
+  def this(me: MatrixTransExpression) {
+    this(false)
+    allocate(me)
+  }
+
+
+  if (initDefault) allocate()
+
+  @native protected def allocate()
+
+  @native protected def allocate(nrow: Int, ncol: Int, @ByVal ctx: Context)
+
+  @native protected def allocate(data: DoublePointer,
+                                 @Cast(Array("viennacl::memory_types"))
+                                 memType: Int,
+                                 nrow: Int,
+                                 ncol: Int
+                                )
+
+  @native protected def allocate(@Const @ByRef me: MatMatProdExpression)
+
+  @native protected def allocate(@Const @ByRef me: MatrixTransExpression)
+
+}
+
+object DenseColumnMatrix {
+  Context.loadLib()
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseRowMatrix.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseRowMatrix.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseRowMatrix.scala
new file mode 100644
index 0000000..b353924
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/DenseRowMatrix.scala
@@ -0,0 +1,86 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.{DoublePointer, Pointer, annotation}
+import org.bytedeco.javacpp.annotation._
+
+import scala.collection.mutable.ArrayBuffer
+
+/**
+  * ViennaCL dense matrix, row-major.
+  */
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL"
+  )))
+@Name(Array("viennacl::matrix<double,viennacl::row_major>"))
+class DenseRowMatrix(initDefault: Boolean = true) extends MatrixBase {
+
+  def this(nrow: Int, ncol: Int, ctx: Context = new Context()) {
+    this(false)
+    allocate(nrow, ncol, ctx)
+  }
+
+  def this(data: DoublePointer, nrow: Int, ncol: Int, ctx: Context = new Context(Context.MAIN_MEMORY)) {
+    this(false)
+    allocate(data, ctx.memoryType, nrow, ncol)
+    // We save it to deallocate it ad deallocation time.
+    ptrs += data
+  }
+
+  def this(me: MatMatProdExpression) {
+    this(false)
+    allocate(me)
+  }
+
+  def this(me: MatrixTransExpression) {
+    this(false)
+    allocate(me)
+  }
+
+  // TODO: getting compilation errors here
+  def this(sd: SrMatDnMatProdExpression) {
+    this(false)
+    allocate(sd)
+  }
+
+  if (initDefault) allocate()
+
+  @native protected def allocate()
+
+  @native protected def allocate(nrow: Int, ncol: Int, @ByVal ctx: Context)
+
+  @native protected def allocate(data: DoublePointer,
+                                 @Cast(Array("viennacl::memory_types"))
+                                 memType: Int,
+                                 nrow: Int,
+                                 ncol: Int
+                                )
+
+  @native protected def allocate(@Const @ByRef me: MatMatProdExpression)
+
+  @native protected def allocate(@Const @ByRef me: MatrixTransExpression)
+
+  @native protected def allocate(@Const @ByRef me: SrMatDnMatProdExpression)
+
+}
+
+
+object DenseRowMatrix {
+  Context.loadLib()
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatMatProdExpression.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatMatProdExpression.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatMatProdExpression.scala
new file mode 100644
index 0000000..c88aee5
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatMatProdExpression.scala
@@ -0,0 +1,33 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.Pointer
+import org.bytedeco.javacpp.annotation.{Name, Namespace, Platform, Properties}
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL")
+  ))
+@Namespace("viennacl")
+@Name(Array("matrix_expression<const viennacl::matrix_base<double>, " +
+  "const viennacl::matrix_base<double>, " +
+  "viennacl::op_mat_mat_prod>"))
+class MatMatProdExpression extends Pointer {
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatVecProdExpression.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatVecProdExpression.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatVecProdExpression.scala
new file mode 100644
index 0000000..111cbd3
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatVecProdExpression.scala
@@ -0,0 +1,33 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.Pointer
+import org.bytedeco.javacpp.annotation.{Name, Namespace, Platform, Properties}
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL")
+  ))
+@Namespace("viennacl")
+@Name(Array("vector_expression<const viennacl::matrix_base<double>, " +
+  "const viennacl::vector_base<double>, " +
+  "viennacl::op_prod>"))
+class MatVecProdExpression extends Pointer {
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatrixBase.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatrixBase.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatrixBase.scala
new file mode 100644
index 0000000..6cc1f9f
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MatrixBase.scala
@@ -0,0 +1,75 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.Pointer
+import org.bytedeco.javacpp.annotation._
+
+import scala.collection.mutable.ArrayBuffer
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL"
+  )))
+@Name(Array("viennacl::matrix_base<double>"))
+class MatrixBase extends Pointer {
+
+  protected val ptrs = new ArrayBuffer[Pointer]()
+
+  override def deallocate(deallocate: Boolean): Unit = {
+    super.deallocate(deallocate)
+    ptrs.foreach(_.close())
+  }
+
+  @Name(Array("operator="))
+  @native def :=(@Const @ByRef src: DenseRowMatrix)
+
+  @Name(Array("operator="))
+  @native def :=(@Const @ByRef src: DenseColumnMatrix)
+
+  @Name(Array("size1"))
+  @native
+  def nrow: Int
+
+  @Name(Array("size2"))
+  @native
+  def ncol: Int
+
+  @Name(Array("row_major"))
+  @native
+  def isRowMajor: Boolean
+
+  @Name(Array("internal_size1"))
+  @native
+  def internalnrow: Int
+
+  @Name(Array("internal_size2"))
+  @native
+  def internalncol: Int
+
+  @Name(Array("memory_domain"))
+  @native
+  def memoryDomain: Int
+
+  @Name(Array("switch_memory_context"))
+  @native
+  def switchMemoryContext(@ByRef ctx: Context)
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MemHandle.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MemHandle.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MemHandle.scala
new file mode 100644
index 0000000..73807ac
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/MemHandle.scala
@@ -0,0 +1,48 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.{Loader, Pointer}
+import org.bytedeco.javacpp.annotation._
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL")
+  ))
+@Namespace("viennacl::backend")
+@Name(Array("mem_handle"))
+class MemHandle extends Pointer {
+
+  allocate()
+
+  @native def allocate()
+}
+
+object MemHandle {
+
+  def loadLib() = Loader.load(classOf[MemHandle])
+
+  loadLib()
+
+  /* Memory types. Ported from VCL header files. */
+  val MEMORY_NOT_INITIALIZED = 0
+  val MAIN_MEMORY = 1
+  val OPENCL_MEMORY = 2
+  val CUDA_MEMORY = 3
+
+}

http://git-wip-us.apache.org/repos/asf/mahout/blob/034790cc/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/ProdExpression.scala
----------------------------------------------------------------------
diff --git a/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/ProdExpression.scala b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/ProdExpression.scala
new file mode 100644
index 0000000..7ee42b8
--- /dev/null
+++ b/viennacl/src/main/scala/org/apache/mahout/viennacl/opencl/javacpp/ProdExpression.scala
@@ -0,0 +1,33 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  * http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.mahout.viennacl.opencl.javacpp
+
+import org.bytedeco.javacpp.Pointer
+import org.bytedeco.javacpp.annotation.{Name, Namespace, Platform, Properties}
+
+
+@Properties(inherit = Array(classOf[Context]),
+  value = Array(new Platform(
+    library = "jniViennaCL")
+  ))
+@Namespace("viennacl")
+@Name(Array("matrix_expression<const viennacl::compressed_matrix<double>, " +
+  "const viennacl::compressed_matrix<double>, " +
+  "viennacl::op_prod>"))
+class ProdExpression extends Pointer {
+
+}