You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by de...@apache.org on 2017/07/24 22:11:08 UTC

[1/4] systemml git commit: [SYSTEMML-1799] Remove ppred from test scripts

Repository: systemml
Updated Branches:
  refs/heads/master 11b689d49 -> 1a3d85f91


http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/CV_MultiClassSVM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/CV_MultiClassSVM.dml b/src/test/scripts/applications/validation/CV_MultiClassSVM.dml
index 83e45de..796b821 100644
--- a/src/test/scripts/applications/validation/CV_MultiClassSVM.dml
+++ b/src/test/scripts/applications/validation/CV_MultiClassSVM.dml
@@ -55,9 +55,9 @@ stats = matrix(0, rows=k, cols=1); #k-folds x 1-stats
 parfor( i in 1:k )
 {
    #prepare train/test fold projections
-   vPxi = ppred( P, i, "==" );   #  Select 1/k fraction of the rows
+   vPxi = (P == i);   #  Select 1/k fraction of the rows
    mPxi = (vPxi %*% ones);       #  for the i-th fold TEST set
-   #nvPxi = ppred( P, i, "!=" );
+   #nvPxi = (P != i);
    #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
    #create train/test folds
@@ -120,7 +120,7 @@ scoreMultiClassSVM = function( Matrix[double] X, Matrix[double] y, Matrix[double
    
    predicted_y = rowIndexMax( scores);
    
-   correct_percentage = sum( ppred( predicted_y - y, 0, "==")) / Nt * 100;
+   correct_percentage = sum((predicted_y - y) == 0) / Nt * 100;
 
    out_correct_pct = correct_percentage;
 
@@ -149,7 +149,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       
       iter_class = 1
       
-      Y_local = 2 * ppred( Y, iter_class, "==") - 1
+      Y_local = 2 * (Y == iter_class) - 1
       w_class = Rand( rows=num_features, cols=1, min=0, max=0, pdf="uniform")
    
       if (intercept == 1) {
@@ -172,7 +172,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -186,14 +186,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -213,7 +213,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       iter_class = iter_class + 1
       
       while(iter_class <= num_classes){
-       Y_local = 2 * ppred(Y, iter_class, "==") - 1
+       Y_local = 2 * (Y == iter_class) - 1
       # w_class = Rand(rows=num_features, cols=1, min=0, max=0, pdf="uniform")
        w_class = Rand(rows=ncol(X), cols=1, min=0, max=0, pdf="uniform")
        if (intercept == 1) {
@@ -236,7 +236,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -250,14 +250,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml b/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
index e62d411..0943f1a 100644
--- a/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
+++ b/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
@@ -50,9 +50,9 @@ stats = matrix(0, rows=k, cols=40); #k-folds x 40-stats
 parfor( i in 1:k )
 {
     #prepare train/test fold projections
-    vPxi = ppred( P, i, "==" );
+    vPxi = (P == i);
     mPxi = (vPxi %*% ones);   
-    #nvPxi = ppred( P, i, "!=" );
+    #nvPxi = (P != i);
     #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
     #create train/test folds
@@ -105,7 +105,7 @@ if (intercept == 1) {
 
 iter_class = 1
 
-Y_local = 2 * ppred(Y, iter_class, "==") - 1
+Y_local = 2 * (Y == iter_class) - 1
 w_class = Rand(rows=num_features, cols=1, min=0, max=0, pdf="uniform")
 if (intercept == 1) {
  zero_matrix = Rand(rows=1, cols=1, min=0.0, max=0.0);
@@ -126,7 +126,7 @@ while(continue == 1)  {
   while(continue1 == 1){
    tmp_w = w_class + step_sz*s
    out = 1 - Y_local * (X %*% tmp_w)
-   sv = ppred(out, 0, ">")
+   sv = (out > 0)
    out = out * sv
    g = wd + step_sz*dd - sum(out * Y_local * Xd)
    h = dd + sum(Xd * sv * Xd)
@@ -140,14 +140,14 @@ while(continue == 1)  {
   w_class = w_class + step_sz*s
  
   out = 1 - Y_local * (X %*% w_class)
-  sv = ppred(out, 0, ">")
+  sv = (out > 0)
   out = sv * out
   obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
   g_new = t(X) %*% (out * Y_local) - lambda * w_class
 
   tmp = sum(s * g_old)
   
-  train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+  train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
   print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
    
   if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -167,7 +167,7 @@ w = w_class
 iter_class = iter_class + 1
 
 while(iter_class <= num_classes){
- Y_local = 2 * ppred(Y, iter_class, "==") - 1
+ Y_local = 2 * (Y == iter_class) - 1
 # w_class = Rand(rows=num_features, cols=1, min=0, max=0, pdf="uniform")
  w_class = Rand(rows=ncol(X), cols=1, min=0, max=0, pdf="uniform")
  if (intercept == 1) {
@@ -190,7 +190,7 @@ while(iter_class <= num_classes){
   while(continue1 == 1){
    tmp_w = w_class + step_sz*s
    out = 1 - Y_local * (X %*% tmp_w)
-   sv = ppred(out, 0, ">")
+   sv = (out > 0)
    out = out * sv
    g = wd + step_sz*dd - sum(out * Y_local * Xd)
    h = dd + sum(Xd * sv * Xd)
@@ -204,14 +204,14 @@ while(iter_class <= num_classes){
   w_class = w_class + step_sz*s
  
   out = 1 - Y_local * (X %*% w_class)
-  sv = ppred(out, 0, ">")
+  sv = (out > 0)
   out = sv * out
   obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
   g_new = t(X) %*% (out * Y_local) - lambda * w_class
 
   tmp = sum(s * g_old)
   
-  train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+  train_acc = sum(Y_local*(X%*%w_class) >= 0)/num_samples*100
   print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
    
   if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -267,8 +267,8 @@ scoreMultiClassSVM  = function (Matrix[double] X_train, Matrix[double] y_train,
     scores_test  = X_test  %*% W + ones_test  %*% b;
     y_train_pred = rowIndexMax (scores_train);
     y_test_pred  = rowIndexMax (scores_test);
-    correct_train= ppred (y_train_pred, y_train, "==");
-    correct_test = ppred (y_test_pred,  y_test,  "==");
+    correct_train= (y_train_pred == y_train);
+    correct_test = (y_test_pred == y_test);
 
 # TRAINING DATA - COMPARE WITH ACTUAL LABELS:
 # Compute the actual number of true/false predictions

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/LinearLogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/LinearLogisticRegression.dml b/src/test/scripts/applications/validation/LinearLogisticRegression.dml
index a158ba5..b6523ff 100644
--- a/src/test/scripts/applications/validation/LinearLogisticRegression.dml
+++ b/src/test/scripts/applications/validation/LinearLogisticRegression.dml
@@ -230,7 +230,7 @@ while(!converge) {
  } 
  
  o2 = y * o
- correct = sum(ppred(o2, 0, ">"))
+ correct = sum(o2 > 0)
  accuracy = correct*100.0/N 
  iter = iter + 1
  #converge = (norm_grad < (tol * norm_grad_initial)) | (iter > maxiter)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/MultiClassSVM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/MultiClassSVM.dml b/src/test/scripts/applications/validation/MultiClassSVM.dml
index 3c76177..e46e198 100644
--- a/src/test/scripts/applications/validation/MultiClassSVM.dml
+++ b/src/test/scripts/applications/validation/MultiClassSVM.dml
@@ -55,7 +55,7 @@ if (intercept == 1) {
 
 iter_class = 1
 
-Y_local = 2 * ppred(Y, iter_class, "==") - 1
+Y_local = 2 * (Y == iter_class) - 1
 w_class = Rand(rows=num_features, cols=1, min=0, max=0, pdf="uniform")
 if (intercept == 1) {
  zero_matrix = Rand(rows=1, cols=1, min=0.0, max=0.0);
@@ -76,7 +76,7 @@ while(continue == 1)  {
   while(continue1 == 1){
    tmp_w = w_class + step_sz*s
    out = 1 - Y_local * (X %*% tmp_w)
-   sv = ppred(out, 0, ">")
+   sv = (out > 0)
    out = out * sv
    g = wd + step_sz*dd - sum(out * Y_local * Xd)
    h = dd + sum(Xd * sv * Xd)
@@ -90,14 +90,14 @@ while(continue == 1)  {
   w_class = w_class + step_sz*s
  
   out = 1 - Y_local * (X %*% w_class)
-  sv = ppred(out, 0, ">")
+  sv = (out > 0)
   out = sv * out
   obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
   g_new = t(X) %*% (out * Y_local) - lambda * w_class
 
   tmp = sum(s * g_old)
   
-  train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+  train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
   print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
    
   if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -117,7 +117,7 @@ w = w_class
 iter_class = iter_class + 1
 
 while(iter_class <= num_classes){
- Y_local = 2 * ppred(Y, iter_class, "==") - 1
+ Y_local = 2 * (Y == iter_class) - 1
 # w_class = Rand(rows=num_features, cols=1, min=0, max=0, pdf="uniform")
  w_class = Rand(rows=ncol(X), cols=1, min=0, max=0, pdf="uniform")
  if (intercept == 1) {
@@ -140,7 +140,7 @@ while(iter_class <= num_classes){
   while(continue1 == 1){
    tmp_w = w_class + step_sz*s
    out = 1 - Y_local * (X %*% tmp_w)
-   sv = ppred(out, 0, ">")
+   sv = (out > 0)
    out = out * sv
    g = wd + step_sz*dd - sum(out * Y_local * Xd)
    h = dd + sum(Xd * sv * Xd)
@@ -154,14 +154,14 @@ while(iter_class <= num_classes){
   w_class = w_class + step_sz*s
  
   out = 1 - Y_local * (X %*% w_class)
-  sv = ppred(out, 0, ">")
+  sv = (out > 0)
   out = sv * out
   obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
   g_new = t(X) %*% (out * Y_local) - lambda * w_class
 
   tmp = sum(s * g_old)
   
-  train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+  train_acc = sum(Y_local*(X%*%w_class) >= 0)/num_samples*100
   print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
    
   if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/MultiClassSVMScore.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/MultiClassSVMScore.dml b/src/test/scripts/applications/validation/MultiClassSVMScore.dml
index c6e9760..0e2c4ac 100644
--- a/src/test/scripts/applications/validation/MultiClassSVMScore.dml
+++ b/src/test/scripts/applications/validation/MultiClassSVMScore.dml
@@ -48,6 +48,6 @@ write(predicted_y, $7, format="text");
 
 if ($2 == 1) {
 	y = read($3);
-    correct_percentage = sum(ppred(predicted_y - y, 0, "==")) / Nt * 100;
+    correct_percentage = sum((predicted_y - y) == 0) / Nt * 100;
 	write(correct_percentage, $8);
 	}

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml b/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
index 7550a25..63dd402 100644
--- a/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
+++ b/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
@@ -82,7 +82,7 @@ if(addNoise == 1){
 
 print ("nrow(prob) = " + nrow(prob) + ", ncol(prob) = " + ncol(prob) + ";  nrow(r) = " + nrow(r) + ", ncol(r) = " + ncol(r));
 
-Y = 1 - 2*ppred(prob, r, "<")
+Y = 1 - 2*(prob < r)
 
 write (w, $5, format="text");
 write (X, $6, format="binary");

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/genRandData4MultiClassSVM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/genRandData4MultiClassSVM.dml b/src/test/scripts/applications/validation/genRandData4MultiClassSVM.dml
index d9fe043..5af1c65 100644
--- a/src/test/scripts/applications/validation/genRandData4MultiClassSVM.dml
+++ b/src/test/scripts/applications/validation/genRandData4MultiClassSVM.dml
@@ -66,7 +66,7 @@ if(addNoise == 1){
 	r = Rand(rows=numSamples, cols=1, min=0, max=1, pdf="uniform", seed=0)
 	#r = Rand(rows=numSamples, cols=1, min=0.5, max=0.5, pdf="uniform")
 }
-Y = 1 - 2*ppred(prob, r, "<")
+Y = 1 - 2*(prob < r)
 Y = (Y+3)/2
 
 write(w, $5, format="binary")

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication.dml b/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication.dml
index 7662968..e16a597 100644
--- a/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication.dml
+++ b/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication.dml
@@ -24,7 +24,7 @@
 A = read($1, rows=$2, cols=$3, format="text");
 B = read($4, rows=$2, cols=1, format="text");
 
-tmp = diag(ppred(B,2,"=="));
+tmp = diag(B == 2);
 P = removeEmpty(target=tmp, margin="rows");
 C = P %*% A;
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication2.dml b/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication2.dml
index 6300010..55aeebd 100644
--- a/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication2.dml
+++ b/src/test/scripts/functions/binary/matrix/UltraSparseMatrixMultiplication2.dml
@@ -24,7 +24,7 @@
 A = read($1, rows=$2, cols=$3, format="text");
 B = read($4, rows=$2, cols=1, format="text");
 
-tmp = diag(ppred(B,2,"=="));
+tmp = diag(B == 2);
 P = removeEmpty(target=tmp, margin="cols");
 C = t(P) %*% A;
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.R b/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.R
new file mode 100644
index 0000000..6f57134
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.R
@@ -0,0 +1,59 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+
+args <- commandArgs(TRUE)
+options(digits=22)
+
+library("Matrix")
+
+A <- readMM(paste(args[1], "A.mtx", sep=""))
+B <- readMM(paste(args[1], "B.mtx", sep=""))
+
+type = as.integer(args[2])
+
+if( type == 0 )
+{
+   C = (A > B)
+}
+if( type == 1 )
+{
+   C = (A < B)
+}
+if( type == 2 )
+{
+   C = (A == B)
+}
+if( type == 3 )
+{
+   C = (A != B)
+}
+if( type == 4 )
+{
+   C = (A >= B)
+}
+if( type == 5 )
+{
+   C = (A <= B)
+}
+
+
+writeMM(as(C, "CsparseMatrix"), paste(args[3], "C", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.dml b/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.dml
new file mode 100644
index 0000000..65dc35d
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalMatrixTest.dml
@@ -0,0 +1,50 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+A = read( $1 );
+B = read( $2 );
+
+if( $3 == 0 )
+{
+   C = (A > B)
+}
+else if( $3 == 1 )
+{
+   C = (A < B)
+}
+else if( $3 == 2 )
+{  
+   C = (A == B)
+}
+else if( $3 == 3 )
+{
+   C = (A != B)
+}
+else if( $3 == 4 )
+{
+   C = (A >= B)
+}
+else if( $3 == 5 )
+{
+   C = (A <= B)
+}
+
+write(C, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.R b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.R
new file mode 100644
index 0000000..c54b185
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.R
@@ -0,0 +1,60 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+
+args <- commandArgs(TRUE)
+options(digits=22)
+
+library("Matrix")
+
+A1 <- readMM(paste(args[1], "A.mtx", sep=""))
+A <- as.matrix(A1);
+
+type = as.integer(args[2])
+constant = as.double(args[3]);
+
+if( type == 0 )
+{
+   B = (constant > A)
+}
+if( type == 1 )
+{
+   B = (constant < A)
+}
+if( type == 2 )
+{
+   B = (constant == A)
+}
+if( type == 3 )
+{
+   B = (constant != A)
+}
+if( type == 4 )
+{
+   B = (constant >= A)
+}
+if( type == 5 )
+{
+   B = (constant <= A)
+}
+
+
+writeMM(as(B, "CsparseMatrix"), paste(args[4], "B", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.dml b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.dml
new file mode 100644
index 0000000..8097f40
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarLeftTest.dml
@@ -0,0 +1,49 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+A = read( $1 );
+
+if( $2 == 0 )
+{
+   B = ($3 > A)
+}
+else if( $2 == 1 )
+{
+   B = ($3 < A)
+}
+else if( $2 == 2 )
+{
+   B = ($3 == A)
+}
+else if( $2 == 3 )
+{
+   B = ($3 != A)
+}
+else if( $2 == 4 )
+{
+   B = ($3 >= A)
+}
+else if( $2 == 5 )
+{
+   B = ($3 <= A)
+}
+
+write(B, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.R b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.R
new file mode 100644
index 0000000..cd39071
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.R
@@ -0,0 +1,60 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+
+args <- commandArgs(TRUE)
+options(digits=22)
+
+library("Matrix")
+
+A1 <- readMM(paste(args[1], "A.mtx", sep=""))
+A <- as.matrix(A1);
+
+type = as.integer(args[2])
+constant = as.double(args[3]);
+
+if( type == 0 )
+{
+   B = (A > constant)
+}
+if( type == 1 )
+{
+   B = (A < constant)
+}
+if( type == 2 )
+{
+   B = (A == constant)
+}
+if( type == 3 )
+{
+   B = (A != constant)
+}
+if( type == 4 )
+{
+   B = (A >= constant)
+}
+if( type == 5 )
+{
+   B = (A <= constant)
+}
+
+
+writeMM(as(B, "CsparseMatrix"), paste(args[4], "B", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.dml b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.dml
new file mode 100644
index 0000000..c23acea
--- /dev/null
+++ b/src/test/scripts/functions/binary/matrix_full_other/LogicalScalarRightTest.dml
@@ -0,0 +1,49 @@
+#-------------------------------------------------------------
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#-------------------------------------------------------------
+
+A = read( $1 );
+
+if( $2 == 0 )
+{
+   B = (A > $3)
+}
+else if( $2 == 1 )
+{
+   B = (A < $3)
+}
+else if( $2 == 2 )
+{
+   B = (A == $3)
+}
+else if( $2 == 3 )
+{
+   B = (A != $3)
+}
+else if( $2 == 4 )
+{
+   B = (A >= $3)
+}
+else if( $2 == 5 )
+{
+   B = (A <= $3)
+}
+
+write(B, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.R b/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.R
deleted file mode 100644
index 6f57134..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.R
+++ /dev/null
@@ -1,59 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-
-args <- commandArgs(TRUE)
-options(digits=22)
-
-library("Matrix")
-
-A <- readMM(paste(args[1], "A.mtx", sep=""))
-B <- readMM(paste(args[1], "B.mtx", sep=""))
-
-type = as.integer(args[2])
-
-if( type == 0 )
-{
-   C = (A > B)
-}
-if( type == 1 )
-{
-   C = (A < B)
-}
-if( type == 2 )
-{
-   C = (A == B)
-}
-if( type == 3 )
-{
-   C = (A != B)
-}
-if( type == 4 )
-{
-   C = (A >= B)
-}
-if( type == 5 )
-{
-   C = (A <= B)
-}
-
-
-writeMM(as(C, "CsparseMatrix"), paste(args[3], "C", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.dml b/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.dml
deleted file mode 100644
index 13fd854..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredMatrixTest.dml
+++ /dev/null
@@ -1,50 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-A = read( $1 );
-B = read( $2 );
-
-if( $3 == 0 )
-{
-   C = ppred(A, B, ">")
-}
-else if( $3 == 1 )
-{
-   C = ppred(A, B, "<")
-}
-else if( $3 == 2 )
-{  
-   C = ppred(A, B, "==")
-}
-else if( $3 == 3 )
-{
-   C = ppred(A, B, "!=")
-}
-else if( $3 == 4 )
-{
-   C = ppred(A, B, ">=")
-}
-else if( $3 == 5 )
-{
-   C = ppred(A, B, "<=")
-}
-
-write(C, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.R b/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.R
deleted file mode 100644
index c54b185..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.R
+++ /dev/null
@@ -1,60 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-
-args <- commandArgs(TRUE)
-options(digits=22)
-
-library("Matrix")
-
-A1 <- readMM(paste(args[1], "A.mtx", sep=""))
-A <- as.matrix(A1);
-
-type = as.integer(args[2])
-constant = as.double(args[3]);
-
-if( type == 0 )
-{
-   B = (constant > A)
-}
-if( type == 1 )
-{
-   B = (constant < A)
-}
-if( type == 2 )
-{
-   B = (constant == A)
-}
-if( type == 3 )
-{
-   B = (constant != A)
-}
-if( type == 4 )
-{
-   B = (constant >= A)
-}
-if( type == 5 )
-{
-   B = (constant <= A)
-}
-
-
-writeMM(as(B, "CsparseMatrix"), paste(args[4], "B", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.dml b/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.dml
deleted file mode 100644
index 2567e9b..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarLeftTest.dml
+++ /dev/null
@@ -1,49 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-A = read( $1 );
-
-if( $2 == 0 )
-{
-   B = ppred($3, A, ">")
-}
-else if( $2 == 1 )
-{
-   B = ppred($3, A, "<")
-}
-else if( $2 == 2 )
-{
-   B = ppred($3, A, "==")
-}
-else if( $2 == 3 )
-{
-   B = ppred($3, A, "!=")
-}
-else if( $2 == 4 )
-{
-   B = ppred($3, A, ">=")
-}
-else if( $2 == 5 )
-{
-   B = ppred($3, A, "<=")
-}
-
-write(B, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.R b/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.R
deleted file mode 100644
index cd39071..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.R
+++ /dev/null
@@ -1,60 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-
-args <- commandArgs(TRUE)
-options(digits=22)
-
-library("Matrix")
-
-A1 <- readMM(paste(args[1], "A.mtx", sep=""))
-A <- as.matrix(A1);
-
-type = as.integer(args[2])
-constant = as.double(args[3]);
-
-if( type == 0 )
-{
-   B = (A > constant)
-}
-if( type == 1 )
-{
-   B = (A < constant)
-}
-if( type == 2 )
-{
-   B = (A == constant)
-}
-if( type == 3 )
-{
-   B = (A != constant)
-}
-if( type == 4 )
-{
-   B = (A >= constant)
-}
-if( type == 5 )
-{
-   B = (A <= constant)
-}
-
-
-writeMM(as(B, "CsparseMatrix"), paste(args[4], "B", sep="")); 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.dml b/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.dml
deleted file mode 100644
index e9c0579..0000000
--- a/src/test/scripts/functions/binary/matrix_full_other/PPredScalarRightTest.dml
+++ /dev/null
@@ -1,49 +0,0 @@
-#-------------------------------------------------------------
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#-------------------------------------------------------------
-
-A = read( $1 );
-
-if( $2 == 0 )
-{
-   B = ppred(A, $3, ">")
-}
-else if( $2 == 1 )
-{
-   B = ppred(A, $3, "<")
-}
-else if( $2 == 2 )
-{
-   B = ppred(A, $3, "==")
-}
-else if( $2 == 3 )
-{
-   B = ppred(A, $3, "!=")
-}
-else if( $2 == 4 )
-{
-   B = ppred(A, $3, ">=")
-}
-else if( $2 == 5 )
-{
-   B = ppred(A, $3, "<=")
-}
-
-write(B, $4, format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/misc/ValueTypePredLeftScalar.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/ValueTypePredLeftScalar.dml b/src/test/scripts/functions/misc/ValueTypePredLeftScalar.dml
index cc09f42..106d2db 100644
--- a/src/test/scripts/functions/misc/ValueTypePredLeftScalar.dml
+++ b/src/test/scripts/functions/misc/ValueTypePredLeftScalar.dml
@@ -21,5 +21,5 @@
 
 
 X = rand(rows=10, cols=10);
-Y = ppred($1, X, ">");
+Y = ($1 > X);
 write(Y, $2);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/misc/ValueTypePredRightScalar.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/ValueTypePredRightScalar.dml b/src/test/scripts/functions/misc/ValueTypePredRightScalar.dml
index 19deb5e..27c6b29 100644
--- a/src/test/scripts/functions/misc/ValueTypePredRightScalar.dml
+++ b/src/test/scripts/functions/misc/ValueTypePredRightScalar.dml
@@ -21,5 +21,5 @@
 
 
 X = rand(rows=10, cols=10);
-Y = ppred(X, $1, "<");
+Y = (X < $1);
 write(Y, $2);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusLeft.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusLeft.dml b/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusLeft.dml
index 6c7b61e..50a1cf1 100644
--- a/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusLeft.dml
+++ b/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusLeft.dml
@@ -25,6 +25,6 @@ W = read($1);
 U = read($2);
 V = read($3);
 
-R = t(t(U) %*% (ppred(W,0,"!=")*(U%*%t(V)-W)));
+R = t(t(U) %*% ((W != 0)*(U%*%t(V)-W)));
 
 write(R, $4);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusRight.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusRight.dml b/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusRight.dml
index b9b86fc..2fe105c 100644
--- a/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusRight.dml
+++ b/src/test/scripts/functions/quaternary/WeightedDivMMMultMinusRight.dml
@@ -25,6 +25,6 @@ W = read($1);
 U = read($2);
 V = read($3);
 
-R = (ppred(W,0,"!=")*(U%*%t(V)-W)) %*% V;
+R = ((W != 0)*(U%*%t(V)-W)) %*% V;
 
 write(R, $4);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/quaternary/WeightedSquaredLossPostNz.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/quaternary/WeightedSquaredLossPostNz.dml b/src/test/scripts/functions/quaternary/WeightedSquaredLossPostNz.dml
index 5aa9c1c..b6c1f1a 100644
--- a/src/test/scripts/functions/quaternary/WeightedSquaredLossPostNz.dml
+++ b/src/test/scripts/functions/quaternary/WeightedSquaredLossPostNz.dml
@@ -25,7 +25,7 @@ X = read($1);
 U = read($2);
 V = read($3);
 
-sl = sum( ppred(X,0,"!=") * (X - U %*% t(V)) ^ 2 );
+sl = sum( (X != 0) * (X - U %*% t(V)) ^ 2 );
 R = as.matrix(sl);
 
 write(R, $5);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/ternary/CTableRowHist.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/ternary/CTableRowHist.dml b/src/test/scripts/functions/ternary/CTableRowHist.dml
index ed34eef..e12fbef 100644
--- a/src/test/scripts/functions/ternary/CTableRowHist.dml
+++ b/src/test/scripts/functions/ternary/CTableRowHist.dml
@@ -22,7 +22,7 @@
 
 A = read($1, format="text");            
 
-IA = ppred (A, 0, "!=") * seq (1, nrow (A), 1);
+IA = (A != 0) * seq (1, nrow (A), 1);
 IA = matrix (IA, rows = (nrow (A) * ncol(A)), cols = 1, byrow = FALSE);
 VA = matrix ( A, rows = (nrow (A) * ncol(A)), cols = 1, byrow = FALSE);
 IA = removeEmpty (target = IA, margin = "rows");

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/unary/matrix/SelPos.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/unary/matrix/SelPos.dml b/src/test/scripts/functions/unary/matrix/SelPos.dml
index 655b685..8e14b47 100644
--- a/src/test/scripts/functions/unary/matrix/SelPos.dml
+++ b/src/test/scripts/functions/unary/matrix/SelPos.dml
@@ -22,5 +22,5 @@
 
 
 A = read($1);
-B = A*ppred(A,0,">");
+B = A*(A > 0);
 write(B, $2);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/functions/unary/matrix/Sign2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/unary/matrix/Sign2.dml b/src/test/scripts/functions/unary/matrix/Sign2.dml
index 40ed84c..43ccdbd 100644
--- a/src/test/scripts/functions/unary/matrix/Sign2.dml
+++ b/src/test/scripts/functions/unary/matrix/Sign2.dml
@@ -22,5 +22,5 @@
 
 
 A = read($1);
-B = ppred(A, 0, ">") - ppred(A, 0, "<");
+B = (A > 0) - (A < 0);
 write(B, $2);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java
----------------------------------------------------------------------
diff --git a/src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java b/src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java
index 516dc7b..558c77a 100644
--- a/src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java
+++ b/src/test_suites/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/ZPackageSuite.java
@@ -34,9 +34,9 @@ import org.junit.runners.Suite;
 	FullMatrixMultiplicationUltraSparseTest.class,
 	FullMinMaxComparisonTest.class,
 	FullPowerTest.class,
-	FullPPredMatrixTest.class,
-	FullPPredScalarLeftTest.class,
-	FullPPredScalarRightTest.class,
+	FullLogicalMatrixTest.class,
+	FullLogicalScalarLeftTest.class,
+	FullLogicalScalarRightTest.class,
 	MatrixMultShortLhsTest.class,
 })
 


[2/4] systemml git commit: [SYSTEMML-1799] Remove ppred from test scripts

Posted by de...@apache.org.
http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/ctableStats/Binomial.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/Binomial.dml b/src/test/scripts/applications/ctableStats/Binomial.dml
index a3cbf5a..6403a66 100644
--- a/src/test/scripts/applications/ctableStats/Binomial.dml
+++ b/src/test/scripts/applications/ctableStats/Binomial.dml
@@ -41,7 +41,7 @@ binomQuantile =
     for (i in 1:27) {  #  Uses "division by half" method to solve equations
         p_new = (p_min + p_max) / 2.0;
         [alpha_p_new] = binomProb (n_vector, m_vector, p_new);
-        move_new_to_max = ppred (alpha_p_new, alpha_vector, "<");
+        move_new_to_max = (alpha_p_new < alpha_vector);
         p_max = (1 - move_new_to_max) * p_max + move_new_to_max * p_new;
         p_min = (1 - move_new_to_max) * p_new + move_new_to_max * p_min;
         alpha_p_max = (1 - move_new_to_max) * alpha_p_max + move_new_to_max * alpha_p_new;
@@ -64,15 +64,15 @@ binomProb =
     num_iterations = 100;
 
     mean_vector = p_vector * n_vector;
-    is_opposite = ppred (mean_vector, m_vector, "<");
+    is_opposite = (mean_vector < m_vector);
     l_vector = is_opposite * (n_vector - (m_vector + 1)) + (1 - is_opposite) * m_vector;
     q_vector = is_opposite * (1.0 - p_vector) + (1 - is_opposite) * p_vector;
     n_minus_l_vector = n_vector - l_vector;
     
-    is_result_zero1 = ppred (l_vector, - 0.0000000001, "<");
-    is_result_one1  = ppred (n_minus_l_vector, 0.0000000001, "<");
-    is_result_zero2 = ppred (q_vector, 0.9999999999, ">");
-    is_result_one2  = ppred (q_vector, 0.0000000001, "<");
+    is_result_zero1 = (l_vector < - 0.0000000001);
+    is_result_one1  = (n_minus_l_vector < 0.0000000001);
+    is_result_zero2 = (q_vector > 0.9999999999);
+    is_result_one2  = (q_vector < 0.0000000001);
     
     is_result_zero  = is_result_zero1 + (1 - is_result_zero1) * is_result_zero2 * (1 - is_result_one1);
     is_result_one   = (is_result_one1 + (1 - is_result_one1)  * is_result_one2) * (1 - is_result_zero);
@@ -116,8 +116,8 @@ binomProb =
         denom = denom_new;
         
         abs_denom = abs (denom);
-        denom_too_big = ppred (abs_denom, 10000000000.0, ">");
-        denom_too_small = ppred (abs_denom, 0.0000000001, "<");
+        denom_too_big = (abs_denom > 10000000000.0);
+        denom_too_small = (abs_denom < 0.0000000001);
         denom_normal = 1.0 - denom_too_big - denom_too_small;
         rescale_vector = denom_too_big * 0.0000000001 + denom_too_small * 10000000000.0 + denom_normal;
         numer_old = numer_old * rescale_vector;
@@ -127,7 +127,7 @@ binomProb =
         
         convergence_check_left  = abs (numer * denom_old - numer_old * denom);
         convergence_check_right = abs (numer * denom_old) * 0.000000001;
-        has_converged = ppred (convergence_check_left, convergence_check_right, "<=");
+        has_converged = (convergence_check_left <= convergence_check_right);
         has_converged = still_iterating * has_converged;
         still_iterating = still_iterating - has_converged;
         result = result + has_converged * numer / denom;

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/ctableStats/ctci_odds.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/ctci_odds.dml b/src/test/scripts/applications/ctableStats/ctci_odds.dml
index 3d97489..f13a25b 100644
--- a/src/test/scripts/applications/ctableStats/ctci_odds.dml
+++ b/src/test/scripts/applications/ctableStats/ctci_odds.dml
@@ -172,7 +172,7 @@ gaussian_probability = function (Matrix[double] vector_of_points)
                  + t_gp * ( 1.421413741 # U.S. Nat-l Bureau of Standards, 10th print (Dec 1972), Sec. 7.1.26, p. 299
                  + t_gp * (-1.453152027 
                  + t_gp *   1.061405429)))) * exp (- vector_of_points * vector_of_points / 2.0);
-    erf_gp = erf_gp * 2.0 * (ppred (vector_of_points, 0.0, ">") - 0.5);
+    erf_gp = erf_gp * 2.0 * ((vector_of_points > 0.0) - 0.5);
     vector_of_probabilities = 0.5 + 0.5 * erf_gp;
 }
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/ctableStats/zipftest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/zipftest.dml b/src/test/scripts/applications/ctableStats/zipftest.dml
index 50ac5f4..5750c37 100644
--- a/src/test/scripts/applications/ctableStats/zipftest.dml
+++ b/src/test/scripts/applications/ctableStats/zipftest.dml
@@ -58,7 +58,7 @@ avg_density_records = rowSums (Probs);
 avg_density_features = colSums (Probs);
 
 Tosses = Rand (rows = num_records, cols = num_features, min = 0.0, max = 1.0);
-Data = ppred (Tosses, Probs, "<=");
+Data = (Tosses <= Probs);
 
 write (avg_density_records,  "Zipf.AvgDensity.Rows", format="text");
 write (avg_density_features, "Zipf.AvgDensity.Cols", format="text");

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/Categorical.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/Categorical.dml b/src/test/scripts/applications/descriptivestats/Categorical.dml
index e4aa9a5..e0e9959 100644
--- a/src/test/scripts/applications/descriptivestats/Categorical.dml
+++ b/src/test/scripts/applications/descriptivestats/Categorical.dml
@@ -40,11 +40,11 @@ s = sum(Nc)
 Pc = Nc / s
 
 # all categorical values of a categorical variable
-C = ppred(Nc, 0, ">")
+C = (Nc > 0)
 
 # mode
 mx = max(Nc)
-Mode =  ppred(Nc, mx, "==")
+Mode =  (Nc == mx)
 
 write(Nc, $3, format="text")
 write(R, $4)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/Scale.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/Scale.R b/src/test/scripts/applications/descriptivestats/Scale.R
index 45c9efb..ef9715a 100644
--- a/src/test/scripts/applications/descriptivestats/Scale.R
+++ b/src/test/scripts/applications/descriptivestats/Scale.R
@@ -114,7 +114,6 @@ iqm = iqm/(n*0.5)
 
 #print(paste("IQM ", iqm));
 
-# outliers use ppred to describe it
 out_minus = t(as.numeric(V< mu-5*std_dev)*V) 
 out_plus = t(as.numeric(V> mu+5*std_dev)*V)
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/Scale.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/Scale.dml b/src/test/scripts/applications/descriptivestats/Scale.dml
index 2394270..eee643e 100644
--- a/src/test/scripts/applications/descriptivestats/Scale.dml
+++ b/src/test/scripts/applications/descriptivestats/Scale.dml
@@ -90,9 +90,8 @@ Q = quantile(V, P)
 # inter-quartile mean
 iqm = interQuartileMean(V)
 
-# outliers use ppred to describe it
-out_minus = ppred(V, mu-5*std_dev, "<")*V 
-out_plus = ppred(V, mu+5*std_dev, ">")*V
+out_minus = (V < (mu-5*std_dev))*V 
+out_plus = (V > (mu+5*std_dev))*V
 
 write(mu, $5);
 write(std_dev, $6);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/WeightedCategoricalTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/WeightedCategoricalTest.dml b/src/test/scripts/applications/descriptivestats/WeightedCategoricalTest.dml
index b6ff41a..b9f8e3a 100644
--- a/src/test/scripts/applications/descriptivestats/WeightedCategoricalTest.dml
+++ b/src/test/scripts/applications/descriptivestats/WeightedCategoricalTest.dml
@@ -41,11 +41,11 @@ s = sum(Nc)
 Pc = Nc / s
 
 # all categorical values of a categorical variable
-C = ppred(Nc, 0, ">")
+C = (Nc > 0)
 
 # mode
 mx = max(Nc)
-Mode =  ppred(Nc, mx, "==")
+Mode =  (Nc == mx)
 
 write(Nc, $4, format="text")
 write(R, $5)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/WeightedScaleTest.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/WeightedScaleTest.R b/src/test/scripts/applications/descriptivestats/WeightedScaleTest.R
index eba3d1c..f539889 100644
--- a/src/test/scripts/applications/descriptivestats/WeightedScaleTest.R
+++ b/src/test/scripts/applications/descriptivestats/WeightedScaleTest.R
@@ -130,7 +130,6 @@ iqm = iqm/(n*0.5)
 
 #print(paste("IQM ", iqm));
 
-# outliers use ppred to describe it
 out_minus = t(as.numeric(Temp < mu-5*std_dev)*Temp) 
 out_plus = t(as.numeric(Temp > mu+5*std_dev)*Temp)
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/descriptivestats/WeightedScaleTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/WeightedScaleTest.dml b/src/test/scripts/applications/descriptivestats/WeightedScaleTest.dml
index d038328..bc4f9e4 100644
--- a/src/test/scripts/applications/descriptivestats/WeightedScaleTest.dml
+++ b/src/test/scripts/applications/descriptivestats/WeightedScaleTest.dml
@@ -92,9 +92,8 @@ g2 = (wt^2*(wt+1)*m4-3*m2^2*wt^2*(wt-1))/((wt-1)*(wt-2)*(wt-3)*std_dev^4)
 # Standard error of Kurtosis
 se_g2= sqrt( (4*(wt^2-1)*se_g1^2)/((wt+5)*(wt-3)) )
 
-# outliers use ppred to describe it
-out_minus = ppred(V, mu-5*std_dev, "<")*V 
-out_plus = ppred(V, mu+5*std_dev, ">")*V
+out_minus = (V < (mu-5*std_dev))*V 
+out_plus = (V > (mu+5*std_dev))*V
 
 # median
 md = median(V,W); #quantile(V, W, 0.5)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/glm/GLM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/glm/GLM.dml b/src/test/scripts/applications/glm/GLM.dml
index dd5163d..9dc311d 100644
--- a/src/test/scripts/applications/glm/GLM.dml
+++ b/src/test/scripts/applications/glm/GLM.dml
@@ -198,7 +198,7 @@ if (intercept_status == 2)  # scale-&-shift X columns to mean 0, variance 1
 {                           # Important assumption: X [, num_features] = ones_r
     avg_X_cols = t(colSums(X)) / num_records;
     var_X_cols = (t(colSums (X ^ 2)) - num_records * (avg_X_cols ^ 2)) / (num_records - 1);
-    is_unsafe = ppred (var_X_cols, 0.0, "<=");
+    is_unsafe = (var_X_cols <= 0.0);
     scale_X = 1.0 / sqrt (var_X_cols * (1 - is_unsafe) + is_unsafe);
     scale_X [num_features, 1] = 1;
     shift_X = - avg_X_cols * scale_X;
@@ -233,7 +233,7 @@ if (max_iteration_CG == 0) {
 
 if (distribution_type == 2 & ncol(Y) == 1)
 {
-    is_Y_negative = ppred (Y, bernoulli_No_label, "==");
+    is_Y_negative = (Y == bernoulli_No_label);
     Y = cbind (1 - is_Y_negative, is_Y_negative);
     count_Y_negative = sum (is_Y_negative);
     if (count_Y_negative == 0) {
@@ -574,14 +574,14 @@ return (Matrix[double] beta, double saturated_log_l, int isNaN)
     y_corr = Y [, 1];
     if (dist_type == 2) {
         n_corr = rowSums (Y);
-        is_n_zero = ppred (n_corr, 0.0, "==");
+        is_n_zero = (n_corr == 0.0);
         y_corr = Y [, 1] / (n_corr + is_n_zero) + (0.5 - Y [, 1]) * is_n_zero;    
     }
     linear_terms = y_corr;
     if (dist_type == 1 & link_type == 1) { # POWER DISTRIBUTION
         if          (link_power ==  0.0) {
-            if (sum (ppred (y_corr, 0.0, "<")) == 0) {
-                is_zero_y_corr = ppred (y_corr, 0.0, "==");
+            if (sum (y_corr < 0.0) == 0) {
+                is_zero_y_corr = (y_corr == 0.0);
                 linear_terms = log (y_corr + is_zero_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr);
             } else { isNaN = 1; }
         } else { if (link_power ==  1.0) {
@@ -589,16 +589,16 @@ return (Matrix[double] beta, double saturated_log_l, int isNaN)
         } else { if (link_power == -1.0) {
             linear_terms = 1.0 / y_corr;
         } else { if (link_power ==  0.5) {
-            if (sum (ppred (y_corr, 0.0, "<")) == 0) {
+            if (sum (y_corr < 0.0) == 0) {
                 linear_terms = sqrt (y_corr);
             } else { isNaN = 1; }
         } else { if (link_power >   0.0) {
-            if (sum (ppred (y_corr, 0.0, "<")) == 0) {
-                is_zero_y_corr = ppred (y_corr, 0.0, "==");
+            if (sum (y_corr < 0.0) == 0) {
+                is_zero_y_corr = (y_corr == 0.0);
                 linear_terms = (y_corr + is_zero_y_corr) ^ link_power - is_zero_y_corr;
             } else { isNaN = 1; }
         } else {
-            if (sum (ppred (y_corr, 0.0, "<=")) == 0) {
+            if (sum (y_corr <= 0.0) == 0) {
                 linear_terms = y_corr ^ link_power;
             } else { isNaN = 1; }
         }}}}}
@@ -606,31 +606,31 @@ return (Matrix[double] beta, double saturated_log_l, int isNaN)
     if (dist_type == 2 & link_type >= 1 & link_type <= 5)
     { # BINOMIAL/BERNOULLI DISTRIBUTION
         if          (link_type == 1 & link_power == 0.0)  { # Binomial.log
-            if (sum (ppred (y_corr, 0.0, "<")) == 0) {
-                is_zero_y_corr = ppred (y_corr, 0.0, "==");
+            if (sum (y_corr < 0.0) == 0) {
+                is_zero_y_corr = (y_corr == 0.0);
                 linear_terms = log (y_corr + is_zero_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr);
             } else { isNaN = 1; }
         } else { if (link_type == 1 & link_power >  0.0)  { # Binomial.power_nonlog pos
-            if (sum (ppred (y_corr, 0.0, "<")) == 0) {
-                is_zero_y_corr = ppred (y_corr, 0.0, "==");
+            if (sum (y_corr < 0.0) == 0) {
+                is_zero_y_corr = (y_corr == 0.0);
                 linear_terms = (y_corr + is_zero_y_corr) ^ link_power - is_zero_y_corr;
             } else { isNaN = 1; }
         } else { if (link_type == 1)                      { # Binomial.power_nonlog neg
-            if (sum (ppred (y_corr, 0.0, "<=")) == 0) {
+            if (sum (y_corr <= 0.0) == 0) {
                 linear_terms = y_corr ^ link_power;
             } else { isNaN = 1; }
         } else { 
-            is_zero_y_corr = ppred (y_corr, 0.0, "<=");
-            is_one_y_corr  = ppred (y_corr, 1.0, ">=");
+            is_zero_y_corr = (y_corr <= 0.0);
+            is_one_y_corr  = (y_corr >= 1.0);
             y_corr = y_corr * (1.0 - is_zero_y_corr) * (1.0 - is_one_y_corr) + 0.5 * (is_zero_y_corr + is_one_y_corr);
             if (link_type == 2)                           { # Binomial.logit
                 linear_terms = log (y_corr / (1.0 - y_corr)) 
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr);
             } else { if (link_type == 3)                  { # Binomial.probit
-                y_below_half = y_corr + (1.0 - 2.0 * y_corr) * ppred (y_corr, 0.5, ">");
+                y_below_half = y_corr + (1.0 - 2.0 * y_corr) * (y_corr > 0.5);
                 t = sqrt (- 2.0 * log (y_below_half));
                 approx_inv_Gauss_CDF = - t + (2.515517 + t * (0.802853 + t * 0.010328)) / (1.0 + t * (1.432788 + t * (0.189269 + t * 0.001308)));
-                linear_terms = approx_inv_Gauss_CDF * (1.0 - 2.0 * ppred (y_corr, 0.5, ">"))
+                linear_terms = approx_inv_Gauss_CDF * (1.0 - 2.0 * (y_corr > 0.5))
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr);
             } else { if (link_type == 4)                  { # Binomial.cloglog
                 linear_terms = log (- log (1.0 - y_corr))
@@ -739,19 +739,19 @@ glm_dist = function (Matrix[double] linear_terms, Matrix[double] Y,
                 vec1 = zeros_r;
                 if (link_power == 0.5)  {
                     vec1 = 1 / (1 - linear_terms ^ 2);
-                } else { if (sum (ppred (linear_terms, 0.0, "<")) == 0) {
+                } else { if (sum (linear_terms < 0.0) == 0) {
                     vec1 = linear_terms ^ (- 2 + 1 / link_power) / (1 - linear_terms ^ (1 / link_power));
                 } else {isNaN = 1;}}
                 # We want a "zero-protected" version of
                 #     vec2 = Y [, 1] / linear_terms;
-                is_y_0 = ppred (Y [, 1], 0.0, "==");
+                is_y_0 = ((Y [, 1]) == 0.0);
                 vec2 = (Y [, 1] + is_y_0) / (linear_terms * (1 - is_y_0) + is_y_0) - is_y_0;
                 g_Y =  (vec2 - Y [, 2] * vec1 * linear_terms) / link_power;
                 w   =  rowSums (Y) * vec1 / link_power ^ 2;
             }
         } else {
-            is_LT_pos_infinite = ppred (linear_terms,  1.0/0.0, "==");
-            is_LT_neg_infinite = ppred (linear_terms, -1.0/0.0, "==");
+            is_LT_pos_infinite = (linear_terms == (1.0/0.0));
+            is_LT_neg_infinite = (linear_terms == (-1.0/0.0));
             is_LT_infinite = is_LT_pos_infinite %*% one_zero + is_LT_neg_infinite %*% zero_one;
             finite_linear_terms = replace (target =        linear_terms, pattern =  1.0/0.0, replacement = 0);
             finite_linear_terms = replace (target = finite_linear_terms, pattern = -1.0/0.0, replacement = 0);
@@ -762,7 +762,7 @@ glm_dist = function (Matrix[double] linear_terms, Matrix[double] Y,
                 g_Y = rowSums (Y * (Y_prob %*% flip_neg));           ### = y_residual;
                 w   = rowSums (Y * (Y_prob %*% flip_pos) * Y_prob);  ### = y_variance;
             } else { if (link_type == 3)                  { # Binomial.probit
-                is_lt_pos = ppred (linear_terms, 0.0, ">=");
+                is_lt_pos = (linear_terms >= 0.0);
                 t_gp = 1.0 / (1.0 + abs (finite_linear_terms) * 0.231641888);  # 0.231641888 = 0.3275911 / sqrt (2.0)
                 pt_gp = t_gp * ( 0.254829592 
                       + t_gp * (-0.284496736 # "Handbook of Mathematical Functions", ed. by M. Abramowitz and I.A. Stegun,
@@ -777,7 +777,7 @@ glm_dist = function (Matrix[double] linear_terms, Matrix[double] Y,
             } else { if (link_type == 4)                  { # Binomial.cloglog
                 the_exp = exp (linear_terms)
                 the_exp_exp = exp (- the_exp);
-                is_too_small = ppred (10000000 + the_exp, 10000000, "==");
+                is_too_small = ((10000000 + the_exp) == 10000000);
                 the_exp_ratio = (1 - is_too_small) * (1 - the_exp_exp) / (the_exp + is_too_small) + is_too_small * (1 - the_exp / 2);
                 g_Y =  (rowSums (Y) * the_exp_exp - Y [, 2]) / the_exp_ratio;
                 w   =  the_exp_exp * the_exp * rowSums (Y) / the_exp_ratio;
@@ -811,38 +811,38 @@ glm_log_likelihood_part = function (Matrix[double] linear_terms, Matrix[double]
         is_natural_parameter_log_zero = zeros_r;
         if          (var_power == 1.0 & link_power == 0.0)  { # Poisson.log
             b_cumulant = exp (linear_terms);
-            is_natural_parameter_log_zero = ppred (linear_terms, -1.0/0.0, "==");
+            is_natural_parameter_log_zero = (linear_terms == -1.0/0.0);
             natural_parameters = replace (target = linear_terms, pattern = -1.0/0.0, replacement = 0);
         } else { if (var_power == 1.0 & link_power == 1.0)  { # Poisson.id
-            if (sum (ppred (linear_terms, 0.0, "<")) == 0)  {
+            if (sum (linear_terms < 0.0) == 0)  {
                 b_cumulant = linear_terms;
-                is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==");
+                is_natural_parameter_log_zero = (linear_terms == 0.0);
                 natural_parameters = log (linear_terms + is_natural_parameter_log_zero);
             } else {isNaN = 1;}
         } else { if (var_power == 1.0 & link_power == 0.5)  { # Poisson.sqrt
-            if (sum (ppred (linear_terms, 0.0, "<")) == 0)  {
+            if (sum (linear_terms < 0.0) == 0)  {
                 b_cumulant = linear_terms ^ 2;
-                is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==");
+                is_natural_parameter_log_zero = (linear_terms == 0.0);
                 natural_parameters = 2.0 * log (linear_terms + is_natural_parameter_log_zero);
             } else {isNaN = 1;}
         } else { if (var_power == 1.0 & link_power  > 0.0)  { # Poisson.power_nonlog, pos
-            if (sum (ppred (linear_terms, 0.0, "<")) == 0)  {
-                is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==");
+            if (sum (linear_terms < 0.0) == 0)  {
+                is_natural_parameter_log_zero = (linear_terms == 0.0);
                 b_cumulant = (linear_terms + is_natural_parameter_log_zero) ^ (1.0 / link_power) - is_natural_parameter_log_zero;
                 natural_parameters = log (linear_terms + is_natural_parameter_log_zero) / link_power;
             } else {isNaN = 1;}
         } else { if (var_power == 1.0)                      { # Poisson.power_nonlog, neg
-            if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+            if (sum (linear_terms <= 0.0) == 0) {
                 b_cumulant = linear_terms ^ (1.0 / link_power);
                 natural_parameters = log (linear_terms) / link_power;
             } else {isNaN = 1;}
         } else { if (var_power == 2.0 & link_power == -1.0) { # Gamma.inverse
-            if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+            if (sum (linear_terms <= 0.0) == 0) {
                 b_cumulant = - log (linear_terms);
                 natural_parameters = - linear_terms;
             } else {isNaN = 1;}
         } else { if (var_power == 2.0 & link_power ==  1.0) { # Gamma.id
-            if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+            if (sum (linear_terms <= 0.0) == 0) {
                 b_cumulant = log (linear_terms);
                 natural_parameters = - 1.0 / linear_terms;
             } else {isNaN = 1;}
@@ -850,7 +850,7 @@ glm_log_likelihood_part = function (Matrix[double] linear_terms, Matrix[double]
             b_cumulant = linear_terms;
             natural_parameters = - exp (- linear_terms);
         } else { if (var_power == 2.0)                      { # Gamma.power_nonlog
-            if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+            if (sum (linear_terms <= 0.0) == 0) {
                 b_cumulant = log (linear_terms) / link_power;
                 natural_parameters = - linear_terms ^ (- 1.0 / link_power);
             } else {isNaN = 1;}
@@ -867,7 +867,7 @@ glm_log_likelihood_part = function (Matrix[double] linear_terms, Matrix[double]
             } else { if ( 2 * link_power == 1.0 - var_power) {
                 natural_parameters = linear_terms ^ 2 / (1.0 - var_power);
             } else {
-                if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+                if (sum (linear_terms <= 0.0) == 0) {
                     power = (1.0 - var_power) / link_power;
                     natural_parameters = (linear_terms ^ power) / (1.0 - var_power);
                 } else {isNaN = 1;}
@@ -881,7 +881,7 @@ glm_log_likelihood_part = function (Matrix[double] linear_terms, Matrix[double]
             } else { if ( 2 * link_power == 2.0 - var_power) {
                 b_cumulant = linear_terms ^ 2 / (2.0 - var_power);
             } else {
-                if (sum (ppred (linear_terms, 0.0, "<=")) == 0) {
+                if (sum (linear_terms <= 0.0) == 0) {
                     power = (2.0 - var_power) / link_power;
                     b_cumulant = (linear_terms ^ power) / (2.0 - var_power);
                 } else {isNaN = 1;}
@@ -905,7 +905,7 @@ glm_log_likelihood_part = function (Matrix[double] linear_terms, Matrix[double]
         [Y_prob, isNaN] = binomial_probability_two_column (linear_terms, link_type, link_power);
         
         if (isNaN == 0) {            
-            does_prob_contradict = ppred (Y_prob, 0.0, "<=");
+            does_prob_contradict = (Y_prob <= 0.0);
             if (sum (does_prob_contradict * abs (Y)) == 0.0) {
                 log_l = sum (Y * log (Y_prob * (1 - does_prob_contradict) + does_prob_contradict));
                 if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 2.0)) {
@@ -954,13 +954,13 @@ binomial_probability_two_column =
         } else { if (link_power == 0.5) { # Binomial.sqrt
             Y_prob = (linear_terms ^ 2) %*% p_one_m_one + ones_r %*% zero_one;    
         } else {                          # Binomial.power_nonlog
-            if (sum (ppred (linear_terms, 0.0, "<")) == 0) {
+            if (sum (linear_terms < 0.0) == 0) {
                 Y_prob = (linear_terms ^ (1.0 / link_power)) %*% p_one_m_one + ones_r %*% zero_one;    
             } else {isNaN = 1;}
         }}
     } else {              # Binomial.non_power
-        is_LT_pos_infinite = ppred (linear_terms,  1.0/0.0, "==");
-        is_LT_neg_infinite = ppred (linear_terms, -1.0/0.0, "==");
+        is_LT_pos_infinite = (linear_terms == (1.0/0.0));
+        is_LT_neg_infinite = (linear_terms == (-1.0/0.0));
         is_LT_infinite = is_LT_pos_infinite %*% one_zero + is_LT_neg_infinite %*% zero_one;
         finite_linear_terms = replace (target =        linear_terms, pattern =  1.0/0.0, replacement = 0);
         finite_linear_terms = replace (target = finite_linear_terms, pattern = -1.0/0.0, replacement = 0);
@@ -968,7 +968,7 @@ binomial_probability_two_column =
             Y_prob = exp (finite_linear_terms) %*% one_zero + ones_r %*% zero_one;
             Y_prob = Y_prob / (rowSums (Y_prob) %*% ones_2);
         } else { if (link_type == 3)    { # Binomial.probit
-            lt_pos_neg = ppred (finite_linear_terms, 0.0, ">=") %*% p_one_m_one + ones_r %*% zero_one;
+            lt_pos_neg = (finite_linear_terms >= 0.0) %*% p_one_m_one + ones_r %*% zero_one;
             t_gp = 1.0 / (1.0 + abs (finite_linear_terms) * 0.231641888);  # 0.231641888 = 0.3275911 / sqrt (2.0)
             pt_gp = t_gp * ( 0.254829592 
                   + t_gp * (-0.284496736 # "Handbook of Mathematical Functions", ed. by M. Abramowitz and I.A. Stegun,
@@ -980,7 +980,7 @@ binomial_probability_two_column =
         } else { if (link_type == 4)    { # Binomial.cloglog
             the_exp = exp (finite_linear_terms);
             the_exp_exp = exp (- the_exp);
-            is_too_small = ppred (10000000 + the_exp, 10000000, "==");
+            is_too_small = ((10000000 + the_exp) == 10000000);
             Y_prob [, 1] = (1 - is_too_small) * (1 - the_exp_exp) + is_too_small * the_exp * (1 - the_exp / 2);
             Y_prob [, 2] = the_exp_exp;
         } else { if (link_type == 5)    { # Binomial.cauchit

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/glm/GLM.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/glm/GLM.pydml b/src/test/scripts/applications/glm/GLM.pydml
index 5f8be13..1875f33 100644
--- a/src/test/scripts/applications/glm/GLM.pydml
+++ b/src/test/scripts/applications/glm/GLM.pydml
@@ -195,7 +195,7 @@ if (intercept_status == 2): # scale-&-shift X columns to mean 0, variance 1
     # Important assumption: X [, num_features] = ones_r
     avg_X_cols = transpose(colSums(X)) / num_records
     var_X_cols = (transpose(colSums (X ** 2)) - num_records * (avg_X_cols ** 2)) / (num_records - 1)
-    is_unsafe = ppred (var_X_cols, 0.0, "<=")
+    is_unsafe = (var_X_cols <= 0.0)
     scale_X = 1.0 / sqrt (var_X_cols * (1 - is_unsafe) + is_unsafe)
     scale_X [num_features-1, 0] = 1
     shift_X = - avg_X_cols * scale_X
@@ -227,7 +227,7 @@ if (max_iteration_CG == 0):
 # In Bernoulli case, convert one-column "Y" into two-column
 
 if (distribution_type == 2 & ncol(Y) == 1):
-    is_Y_negative = ppred (Y, bernoulli_No_label, "==")
+    is_Y_negative = (Y == bernoulli_No_label)
     Y = cbind (1 - is_Y_negative, is_Y_negative)
     count_Y_negative = sum (is_Y_negative)
     if (count_Y_negative == 0):
@@ -644,14 +644,14 @@ def glm_initialize(X: matrix[float], Y: matrix[float], dist_type: int, var_power
     y_corr = Y [, 0]
     if (dist_type == 2):
         n_corr = rowSums (Y)
-        is_n_zero = ppred (n_corr, 0.0, "==")
+        is_n_zero = (n_corr == 0.0)
         y_corr = Y [, 0] / (n_corr + is_n_zero) + (0.5 - Y [, 0]) * is_n_zero
     
     linear_terms = y_corr
     if (dist_type == 1 & link_type == 1): # POWER DISTRIBUTION
         if (link_power ==  0.0):
-            if (sum (ppred (y_corr, 0.0, "<")) == 0):
-                is_zero_y_corr = ppred (y_corr, 0.0, "==")
+            if (sum (y_corr < 0.0) == 0):
+                is_zero_y_corr = (y_corr == 0.0)
                 linear_terms = log (y_corr + is_zero_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
             else:
                 isNaN = 1
@@ -664,21 +664,21 @@ def glm_initialize(X: matrix[float], Y: matrix[float], dist_type: int, var_power
                     linear_terms = 1.0 / y_corr
                 else:
                     if (link_power ==  0.5):
-                        if (sum (ppred (y_corr, 0.0, "<")) == 0):
+                        if (sum (y_corr < 0.0) == 0):
                             linear_terms = sqrt (y_corr)
                         else:
                             isNaN = 1
                         
                     else:
                         if (link_power >   0.0):
-                            if (sum (ppred (y_corr, 0.0, "<")) == 0):
-                                is_zero_y_corr = ppred (y_corr, 0.0, "==")
+                            if (sum (y_corr < 0.0) == 0):
+                                is_zero_y_corr = (y_corr == 0.0)
                                 linear_terms = (y_corr + is_zero_y_corr) ** link_power - is_zero_y_corr
                             else:
                                 isNaN = 1
                             
                         else:
-                            if (sum (ppred (y_corr, 0.0, "<=")) == 0):
+                            if (sum (y_corr <= 0.0) == 0):
                                 linear_terms = y_corr ** link_power
                             else:
                                 isNaN = 1
@@ -691,39 +691,39 @@ def glm_initialize(X: matrix[float], Y: matrix[float], dist_type: int, var_power
     
     if (dist_type == 2 & link_type >= 1 & link_type <= 5): # BINOMIAL/BERNOULLI DISTRIBUTION
         if (link_type == 1 & link_power == 0.0): # Binomial.log
-            if (sum (ppred (y_corr, 0.0, "<")) == 0):
-                is_zero_y_corr = ppred (y_corr, 0.0, "==")
+            if (sum (y_corr < 0.0) == 0):
+                is_zero_y_corr = (y_corr == 0.0)
                 linear_terms = log (y_corr + is_zero_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
             else:
                 isNaN = 1
             
         else:
             if (link_type == 1 & link_power >  0.0): # Binomial.power_nonlog pos
-                if (sum (ppred (y_corr, 0.0, "<")) == 0):
-                    is_zero_y_corr = ppred (y_corr, 0.0, "==")
+                if (sum (y_corr < 0.0) == 0):
+                    is_zero_y_corr = (y_corr == 0.0)
                     linear_terms = (y_corr + is_zero_y_corr) ** link_power - is_zero_y_corr
                 else:
                     isNaN = 1
                 
             else:
                 if (link_type == 1): # Binomial.power_nonlog neg
-                    if (sum (ppred (y_corr, 0.0, "<=")) == 0):
+                    if (sum (y_corr <= 0.0) == 0):
                         linear_terms = y_corr ** link_power
                     else:
                         isNaN = 1
                     
                 else:
-                    is_zero_y_corr = ppred (y_corr, 0.0, "<=")
-                    is_one_y_corr  = ppred (y_corr, 1.0, ">=")
+                    is_zero_y_corr = (y_corr <= 0.0)
+                    is_one_y_corr  = (y_corr >= 1.0)
                     y_corr = y_corr * (1.0 - is_zero_y_corr) * (1.0 - is_one_y_corr) + 0.5 * (is_zero_y_corr + is_one_y_corr)
                     if (link_type == 2): # Binomial.logit
                         linear_terms = log (y_corr / (1.0 - y_corr)) + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
                     else:
                         if (link_type == 3): # Binomial.probit
-                            y_below_half = y_corr + (1.0 - 2.0 * y_corr) * ppred (y_corr, 0.5, ">")
+                            y_below_half = y_corr + (1.0 - 2.0 * y_corr) * (y_corr > 0.5)
                             t = sqrt (- 2.0 * log (y_below_half))
                             approx_inv_Gauss_CDF = - t + (2.515517 + t * (0.802853 + t * 0.010328)) / (1.0 + t * (1.432788 + t * (0.189269 + t * 0.001308)))
-                            linear_terms = approx_inv_Gauss_CDF * (1.0 - 2.0 * ppred (y_corr, 0.5, ">")) + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
+                            linear_terms = approx_inv_Gauss_CDF * (1.0 - 2.0 * (y_corr > 0.5)) + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
                         else:
                             if (link_type == 4): # Binomial.cloglog
                                 linear_terms = log (- log (1.0 - y_corr)) - log (- log (0.5)) * (is_zero_y_corr + is_one_y_corr) + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
@@ -838,7 +838,7 @@ def glm_dist(linear_terms: matrix[float], Y: matrix[float], dist_type: int, var_
                 if (link_power == 0.5):
                     vec1 = 1 / (1 - linear_terms ** 2)
                 else:
-                    if (sum (ppred (linear_terms, 0.0, "<")) == 0):
+                    if (sum (linear_terms < 0.0) == 0):
                         vec1 = linear_terms ** (- 2 + 1 / link_power) / (1 - linear_terms ** (1 / link_power))
                     else:
                         isNaN = 1
@@ -846,14 +846,14 @@ def glm_dist(linear_terms: matrix[float], Y: matrix[float], dist_type: int, var_
                 
                 # We want a "zero-protected" version of
                 # vec2 = Y [, 1] / linear_terms
-                is_y_0 = ppred (Y [, 0], 0.0, "==")
+                is_y_0 = (Y[, 0] == 0.0)
                 vec2 = (Y [, 0] + is_y_0) / (linear_terms * (1 - is_y_0) + is_y_0) - is_y_0
                 g_Y =  (vec2 - Y [, 1] * vec1 * linear_terms) / link_power
                 w   =  rowSums (Y) * vec1 / link_power ** 2
             
         else:
-            is_LT_pos_infinite = ppred (linear_terms,  1.0/0.0, "==")
-            is_LT_neg_infinite = ppred (linear_terms, -1.0/0.0, "==")
+            is_LT_pos_infinite = (linear_terms == (1.0/0.0))
+            is_LT_neg_infinite = (linear_terms == (-1.0/0.0))
             is_LT_infinite = dot(is_LT_pos_infinite, one_zero) + dot(is_LT_neg_infinite, zero_one)
             finite_linear_terms = replace (target =        linear_terms, pattern =  1.0/0.0, replacement = 0)
             finite_linear_terms = replace (target = finite_linear_terms, pattern = -1.0/0.0, replacement = 0)
@@ -865,7 +865,7 @@ def glm_dist(linear_terms: matrix[float], Y: matrix[float], dist_type: int, var_
                 w   = rowSums (Y * (dot(Y_prob, flip_pos)) * Y_prob)  # = y_variance
             else:
                 if (link_type == 3): # Binomial.probit
-                    is_lt_pos = ppred (linear_terms, 0.0, ">=")
+                    is_lt_pos = (linear_terms >= 0.0)
                     t_gp = 1.0 / (1.0 + abs (finite_linear_terms) * 0.231641888)  # 0.231641888 = 0.3275911 / sqrt (2.0)
                     pt_gp = t_gp * ( 0.254829592 
                         + t_gp * (-0.284496736 # "Handbook of Mathematical Functions", ed. by M. Abramowitz and I.A. Stegun,
@@ -881,7 +881,7 @@ def glm_dist(linear_terms: matrix[float], Y: matrix[float], dist_type: int, var_
                     if (link_type == 4): # Binomial.cloglog
                         the_exp = exp (linear_terms)
                         the_exp_exp = exp (- the_exp)
-                        is_too_small = ppred (10000000 + the_exp, 10000000, "==")
+                        is_too_small = ((10000000 + the_exp) == 10000000)
                         the_exp_ratio = (1 - is_too_small) * (1 - the_exp_exp) / (the_exp + is_too_small) + is_too_small * (1 - the_exp / 2)
                         g_Y =  (rowSums (Y) * the_exp_exp - Y [, 1]) / the_exp_ratio
                         w   =  the_exp_exp * the_exp * rowSums (Y) / the_exp_ratio
@@ -915,30 +915,30 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
         is_natural_parameter_log_zero = zeros_r
         if (var_power == 1.0 & link_power == 0.0): # Poisson.log
             b_cumulant = exp (linear_terms)
-            is_natural_parameter_log_zero = ppred (linear_terms, -1.0/0.0, "==")
+            is_natural_parameter_log_zero = (linear_terms == (-1.0/0.0))
             natural_parameters = replace (target = linear_terms, pattern = -1.0/0.0, replacement = 0)
         else:
             if (var_power == 1.0 & link_power == 1.0): # Poisson.id
-                if (sum (ppred (linear_terms, 0.0, "<")) == 0):
+                if (sum (linear_terms < 0.0) == 0):
                     b_cumulant = linear_terms
-                    is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==")
+                    is_natural_parameter_log_zero = (linear_terms == 0.0)
                     natural_parameters = log (linear_terms + is_natural_parameter_log_zero)
                 else:
                     isNaN = 1
                 
             else:
                 if (var_power == 1.0 & link_power == 0.5): # Poisson.sqrt
-                    if (sum (ppred (linear_terms, 0.0, "<")) == 0):
+                    if (sum (linear_terms < 0.0) == 0):
                         b_cumulant = linear_terms ** 2
-                        is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==")
+                        is_natural_parameter_log_zero = (linear_terms == 0.0)
                         natural_parameters = 2.0 * log (linear_terms + is_natural_parameter_log_zero)
                     else:
                         isNaN = 1
                     
                 else:
                     if (var_power == 1.0 & link_power  > 0.0): # Poisson.power_nonlog, pos
-                        if (sum (ppred (linear_terms, 0.0, "<")) == 0):
-                            is_natural_parameter_log_zero = ppred (linear_terms, 0.0, "==")
+                        if (sum (linear_terms < 0.0) == 0):
+                            is_natural_parameter_log_zero = (linear_terms == 0.0)
                             b_cumulant = (linear_terms + is_natural_parameter_log_zero) ** (1.0 / link_power) - is_natural_parameter_log_zero
                             natural_parameters = log (linear_terms + is_natural_parameter_log_zero) / link_power
                         else:
@@ -946,7 +946,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                         
                     else:
                         if (var_power == 1.0): # Poisson.power_nonlog, neg
-                            if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                            if (sum (linear_terms <= 0.0) == 0):
                                 b_cumulant = linear_terms ** (1.0 / link_power)
                                 natural_parameters = log (linear_terms) / link_power
                             else:
@@ -954,7 +954,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                             
                         else:
                             if (var_power == 2.0 & link_power == -1.0): # Gamma.inverse
-                                if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                                if (sum (linear_terms <= 0.0) == 0):
                                     b_cumulant = - log (linear_terms)
                                     natural_parameters = - linear_terms
                                 else:
@@ -962,7 +962,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                                 
                             else:
                                 if (var_power == 2.0 & link_power ==  1.0): # Gamma.id
-                                    if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                                    if (sum (linear_terms <= 0.0) == 0):
                                         b_cumulant = log (linear_terms)
                                         natural_parameters = - 1.0 / linear_terms
                                     else:
@@ -974,7 +974,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                                         natural_parameters = - exp (- linear_terms)
                                     else:
                                         if (var_power == 2.0): # Gamma.power_nonlog
-                                            if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                                            if (sum(linear_terms <= 0.0) == 0):
                                                 b_cumulant = log (linear_terms) / link_power
                                                 natural_parameters = - linear_terms ** (- 1.0 / link_power)
                                             else:
@@ -997,7 +997,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                                                             if ( 2 * link_power == 1.0 - var_power):
                                                                 natural_parameters = linear_terms ** 2 / (1.0 - var_power)
                                                             else:
-                                                                if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                                                                if (sum (linear_terms <= 0.0) == 0):
                                                                     power = (1.0 - var_power) / link_power
                                                                     natural_parameters = (linear_terms ** power) / (1.0 - var_power)
                                                                 else:
@@ -1019,7 +1019,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
                                                             if ( 2 * link_power == 2.0 - var_power):
                                                                 b_cumulant = linear_terms ** 2 / (2.0 - var_power)
                                                             else:
-                                                                if (sum (ppred (linear_terms, 0.0, "<=")) == 0):
+                                                                if (sum (linear_terms <= 0.0) == 0):
                                                                     power = (2.0 - var_power) / link_power
                                                                     b_cumulant = (linear_terms ** power) / (2.0 - var_power)
                                                                 else:
@@ -1053,7 +1053,7 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: matrix[float],
         [Y_prob, isNaN] = binomial_probability_two_column (linear_terms, link_type, link_power)
         
         if (isNaN == 0):
-            does_prob_contradict = ppred (Y_prob, 0.0, "<=")
+            does_prob_contradict = (Y_prob <= 0.0)
             if (sum (does_prob_contradict * abs (Y)) == 0.0):
                 log_l = sum (Y * log (Y_prob * (1 - does_prob_contradict) + does_prob_contradict))
                 if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 2.0)):
@@ -1095,13 +1095,13 @@ def binomial_probability_two_column(linear_terms: matrix[float], link_type: int,
             if (link_power == 0.5): # Binomial.sqrt
                 Y_prob = dot((linear_terms ** 2), p_one_m_one) + dot(ones_r, zero_one)
             else: # Binomial.power_nonlog
-                if (sum (ppred (linear_terms, 0.0, "<")) == 0):
+                if (sum(linear_terms < 0.0) == 0):
                     Y_prob = dot((linear_terms ** (1.0 / link_power)), p_one_m_one) + dot(ones_r, zero_one)
                 else:
                     isNaN = 1
     else: # Binomial.non_power
-        is_LT_pos_infinite = ppred (linear_terms,  1.0/0.0, "==")
-        is_LT_neg_infinite = ppred (linear_terms, -1.0/0.0, "==")
+        is_LT_pos_infinite = (linear_terms == (1.0/0.0))
+        is_LT_neg_infinite = (linear_terms == (-1.0/0.0))
         is_LT_infinite = dot(is_LT_pos_infinite, one_zero) + dot(is_LT_neg_infinite, zero_one)
         finite_linear_terms = replace (target =        linear_terms, pattern =  1.0/0.0, replacement = 0)
         finite_linear_terms = replace (target = finite_linear_terms, pattern = -1.0/0.0, replacement = 0)
@@ -1110,7 +1110,7 @@ def binomial_probability_two_column(linear_terms: matrix[float], link_type: int,
             Y_prob = Y_prob / (dot(rowSums (Y_prob), ones_2))
         else:
             if (link_type == 3): # Binomial.probit
-                lt_pos_neg = dot(ppred (finite_linear_terms, 0.0, ">="), p_one_m_one) + dot(ones_r, zero_one)
+                lt_pos_neg = dot((finite_linear_terms >= 0.0), p_one_m_one) + dot(ones_r, zero_one)
                 t_gp = 1.0 / (1.0 + abs (finite_linear_terms) * 0.231641888)  # 0.231641888 = 0.3275911 / sqrt (2.0)
                 pt_gp = t_gp * ( 0.254829592 
                     + t_gp * (-0.284496736 # "Handbook of Mathematical Functions", ed. by M. Abramowitz and I.A. Stegun,
@@ -1123,7 +1123,7 @@ def binomial_probability_two_column(linear_terms: matrix[float], link_type: int,
                 if (link_type == 4): # Binomial.cloglog
                     the_exp = exp (finite_linear_terms)
                     the_exp_exp = exp (- the_exp)
-                    is_too_small = ppred (10000000 + the_exp, 10000000, "==")
+                    is_too_small = ((10000000 + the_exp) == 10000000)
                     Y_prob [, 0] = (1 - is_too_small) * (1 - the_exp_exp) + is_too_small * the_exp * (1 - the_exp / 2)
                     Y_prob [, 1] = the_exp_exp
                 else:

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/id3/id3.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/id3/id3.dml b/src/test/scripts/applications/id3/id3.dml
index a127fc8..69b59e1 100644
--- a/src/test/scripts/applications/id3/id3.dml
+++ b/src/test/scripts/applications/id3/id3.dml
@@ -100,7 +100,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 	#with non-zero samples
 	#and to pull out the most popular label
 	
-	num_non_zero_labels = sum(ppred(hist_labels, 0, ">"));
+	num_non_zero_labels = sum(hist_labels > 0);
 	most_popular_label = rowIndexMax(t(hist_labels));
 	num_remaining_attrs = sum(attributes)
 	
@@ -126,7 +126,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		hist_labels2 = aggregate(target=X_subset, groups=y, fn="sum")
 		num_samples2 = sum(X_subset)
 		print(num_samples2+" #samples")
-		zero_entries_in_hist1 = ppred(hist_labels2, 0, "==")
+		zero_entries_in_hist1 = (hist_labels2 == 0)
 		pi1 = hist_labels2/num_samples2
 		log_term1 = zero_entries_in_hist1*1 + (1-zero_entries_in_hist1)*pi1
 		entropy_vector1 = -pi1*log(log_term1)
@@ -144,12 +144,12 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
         for(j in 1:nrow(attr_domain), check=0){
 					if(as.scalar(attr_domain[j,1]) != 0){
 						val = j
-						Tj = X_subset * ppred(X[,i], val, "==")
+						Tj = X_subset * (X[,i] == val)
 						
 						#entropy = compute_entropy(Tj, y)
 						hist_labels1 = aggregate(target=Tj, groups=y, fn="sum")
 						num_samples1 = sum(Tj)
-						zero_entries_in_hist = ppred(hist_labels1, 0, "==")
+						zero_entries_in_hist = (hist_labels1 == 0)
 						pi = hist_labels1/num_samples1
 						log_term = zero_entries_in_hist*1 + (1-zero_entries_in_hist)*pi
 						entropy_vector = -pi*log(log_term)
@@ -203,7 +203,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		
     for(i1 in 1:nrow(attr_domain), check=0){
 			
-			Ti = X_subset * ppred(X[,best_attr], i1, "==")
+			Ti = X_subset * (X[,best_attr] == i1)
 			num_nodes_Ti = sum(Ti)
 			
 			if(num_nodes_Ti > 0){
@@ -234,7 +234,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		
 		#edges from root to children
 		if(1==1){
-			sz = sum(ppred(numSubtreeNodes, 0, ">")) + num_edges_in_subtrees
+			sz = sum(numSubtreeNodes > 0) + num_edges_in_subtrees
 		}
 		edges = matrix(1, rows=sz, cols=3)
 		numEdges = 0
@@ -295,7 +295,7 @@ y = y + labelCorrection + 0
 [nodes, edges] = id3_learn(X, y, X_subset, attributes, minsplit)
 
 # decoding outputs
-nodes[,2] = nodes[,2] - labelCorrection * ppred(nodes[,1], -1, "==")
+nodes[,2] = nodes[,2] - labelCorrection * (nodes[,1] == -1)
 for(i3 in 1:nrow(edges)){
 #parfor(i3 in 1:nrow(edges)){
 	e_parent = as.scalar(edges[i3,1])

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/id3/id3.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/id3/id3.pydml b/src/test/scripts/applications/id3/id3.pydml
index 02b80b5..9bfc70a 100644
--- a/src/test/scripts/applications/id3/id3.pydml
+++ b/src/test/scripts/applications/id3/id3.pydml
@@ -100,7 +100,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
     #with non-zero samples
     #and to pull out the most popular label
     
-    num_non_zero_labels = sum(ppred(hist_labels, 0, ">"))
+    num_non_zero_labels = sum(hist_labels > 0)
     most_popular_label = rowIndexMax(t(hist_labels))
     num_remaining_attrs = sum(attributes)
     
@@ -126,7 +126,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         hist_labels2 = aggregate(target=X_subset, groups=y, fn="sum")
         num_samples2 = sum(X_subset)
         print(num_samples2+" #samples")
-        zero_entries_in_hist1 = ppred(hist_labels2, 0, "==")
+        zero_entries_in_hist1 = (hist_labels2 == 0)
         pi1 = hist_labels2/num_samples2
         log_term1 = zero_entries_in_hist1*1 + (1-zero_entries_in_hist1)*pi1
         entropy_vector1 = -pi1*log(log_term1)
@@ -144,12 +144,12 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
                 for(j in 1:nrow(attr_domain), check=0):
                     if(scalar(attr_domain[j-1,0]) != 0):
                         val = j
-                        Tj = X_subset * ppred(X[,i-1], val, "==")
+                        Tj = X_subset * (X[,i-1] == val)
                         
                         #entropy = compute_entropy(Tj, y)
                         hist_labels1 = aggregate(target=Tj, groups=y, fn="sum")
                         num_samples1 = sum(Tj)
-                        zero_entries_in_hist = ppred(hist_labels1, 0, "==")
+                        zero_entries_in_hist = (hist_labels1 == 0)
                         pi = hist_labels1/num_samples1
                         log_term = zero_entries_in_hist*1 + (1-zero_entries_in_hist)*pi
                         entropy_vector = -pi*log(log_term)
@@ -194,7 +194,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         
         for(i1 in 1:nrow(attr_domain), check=0):
             
-            Ti = X_subset * ppred(X[,best_attr-1], i1, "==")
+            Ti = X_subset * (X[,best_attr-1] == i1)
             num_nodes_Ti = sum(Ti)
             
             if(num_nodes_Ti > 0):
@@ -222,7 +222,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         
         #edges from root to children
         if(1==1):
-            sz = sum(ppred(numSubtreeNodes, 0, ">")) + num_edges_in_subtrees
+            sz = sum(numSubtreeNodes > 0) + num_edges_in_subtrees
         
         edges = full(1, rows=sz, cols=3)
         numEdges = 0
@@ -276,7 +276,7 @@ y = y + labelCorrection + 0
 [nodes, edges] = id3_learn(X, y, X_subset, attributes, minsplit)
 
 # decoding outputs
-nodes[,1] = nodes[,1] - labelCorrection * ppred(nodes[,0], -1, "==")
+nodes[,1] = nodes[,1] - labelCorrection * (nodes[,0] == -1)
 for(i3 in 1:nrow(edges)):
 #parfor(i3 in 1:nrow(edges)):
     e_parent = scalar(edges[i3-1,0])

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/impute/tmp.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/tmp.dml b/src/test/scripts/applications/impute/tmp.dml
index c852cce..605ab3e 100644
--- a/src/test/scripts/applications/impute/tmp.dml
+++ b/src/test/scripts/applications/impute/tmp.dml
@@ -108,8 +108,8 @@ atan_temporary =
     function (Matrix [double] Args) return (Matrix [double] AtanArgs)
 {
     AbsArgs = abs (Args);
-    Eks = AbsArgs + ppred (AbsArgs, 0.0, "==") * 0.000000000001;
-    Eks = ppred (AbsArgs, 1.0, "<=") * Eks + ppred (AbsArgs, 1.0, ">") / Eks;
+    Eks = AbsArgs + (AbsArgs == 0.0) * 0.000000000001;
+    Eks = (AbsArgs <= 1.0) * Eks + (AbsArgs > 1.0) / Eks;
     EksSq = Eks * Eks;
     AtanEks = 
         Eks   * ( 1.0000000000 + 
@@ -122,7 +122,7 @@ atan_temporary =
         EksSq * (-0.0161657367 + 
         EksSq *   0.0028662257 ))))))));
     pi_over_two = 1.5707963267948966192313216916398;
-    AtanAbsArgs = ppred (AbsArgs, 1.0, "<=") * AtanEks + ppred (AbsArgs, 1.0, ">") * (pi_over_two - AtanEks);
-    AtanArgs    = (ppred (Args, 0.0, ">=") - ppred (Args, 0.0, "<")) * AtanAbsArgs;
+    AtanAbsArgs = (AbsArgs <= 1.0) * AtanEks + (AbsArgs > 1.0) * (pi_over_two - AtanEks);
+    AtanArgs    = ((Args >= 0.0) - (Args < 0.0)) * AtanAbsArgs;
 }
 */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/kmeans/Kmeans.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/kmeans/Kmeans.dml b/src/test/scripts/applications/kmeans/Kmeans.dml
index 368b98d..dd73dec 100644
--- a/src/test/scripts/applications/kmeans/Kmeans.dml
+++ b/src/test/scripts/applications/kmeans/Kmeans.dml
@@ -51,7 +51,7 @@ if (num_records > sample_size)
 {
    # Sample approximately 1000 records (Bernoulli sampling) 
    P = Rand( rows = num_records, cols = 1, min = 0.0, max = 1.0 );
-   P = ppred( P * num_records, sample_size, "<=" );
+   P = ((P * num_records) <= sample_size);
    X_sample = X * (P %*% t( one_per_feature ));
    X_sample = removeEmpty( target = X_sample, margin = "rows" );
 }
@@ -91,7 +91,7 @@ while (centroid_change > eps)
         D = one_per_record %*% t(rowSums (Y * Y)) - 2.0 * X %*% t(Y);
     }
     # Find the closest centroid for each record
-    P = ppred (D, rowMins (D) %*% t(one_per_centroid), "<=");
+    P = (D <= (rowMins (D) %*% t(one_per_centroid)));
     # If some records belong to multiple centroids, share them equally
     P = P / (rowSums (P) %*% t(one_per_centroid));
     # Normalize the columns of P to compute record weights for new centroids

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/l2svm/L2SVM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/l2svm/L2SVM.dml b/src/test/scripts/applications/l2svm/L2SVM.dml
index d546d9a..3794827 100644
--- a/src/test/scripts/applications/l2svm/L2SVM.dml
+++ b/src/test/scripts/applications/l2svm/L2SVM.dml
@@ -40,8 +40,8 @@ Y = read($Y)
 
 check_min = min(Y)
 check_max = max(Y)
-num_min = sum(ppred(Y, check_min, "=="))
-num_max = sum(ppred(Y, check_max, "=="))
+num_min = sum(Y == check_min)
+num_max = sum(Y == check_max)
 if(num_min + num_max != nrow(Y)) print("please check Y, it should contain only 2 labels")
 else{
 	if(check_min != -1 | check_max != +1) 
@@ -84,7 +84,7 @@ while(continue == 1 & iter < maxiterations)  {
 	while(continue1 == 1){
 		tmp_Xw = Xw + step_sz*Xd
 		out = 1 - Y * (tmp_Xw)
-		sv = ppred(out, 0, ">")
+		sv = (out > 0)
 		out = out * sv
 		g = wd + step_sz*dd - sum(out * Y * Xd)
 		h = dd + sum(Xd * sv * Xd)
@@ -99,7 +99,7 @@ while(continue == 1 & iter < maxiterations)  {
 	Xw = Xw + step_sz*Xd
 	
 	out = 1 - Y * Xw
-	sv = ppred(out, 0, ">")
+	sv = (out > 0)
 	out = sv * out
 	obj = 0.5 * sum(out * out) + lambda/2 * sum(w * w)
 	g_new = t(X) %*% (out * Y) - lambda * w

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/l2svm/L2SVM.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/l2svm/L2SVM.pydml b/src/test/scripts/applications/l2svm/L2SVM.pydml
index b28e584..19f074f 100644
--- a/src/test/scripts/applications/l2svm/L2SVM.pydml
+++ b/src/test/scripts/applications/l2svm/L2SVM.pydml
@@ -40,8 +40,8 @@ Y = load($Y)
 
 check_min = min(Y)
 check_max = max(Y)
-num_min = sum(ppred(Y, check_min, "=="))
-num_max = sum(ppred(Y, check_max, "=="))
+num_min = sum(Y == check_min)
+num_max = sum(Y == check_max)
 if(num_min + num_max != nrow(Y)):
     print("please check Y, it should contain only 2 labels")
 else:
@@ -82,7 +82,7 @@ while(continue == 1 & iter < maxiterations):
     while(continue1 == 1):
         tmp_Xw = Xw + step_sz*Xd
         out = 1 - Y * (tmp_Xw)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = out * sv
         g = wd + step_sz*dd - sum(out * Y * Xd)
         h = dd + sum(Xd * sv * Xd)
@@ -95,7 +95,7 @@ while(continue == 1 & iter < maxiterations):
     Xw = Xw + step_sz*Xd
     
     out = 1 - Y * Xw
-    sv = ppred(out, 0, ">")
+    sv = (out > 0)
     out = sv * out
     obj = 0.5 * sum(out * out) + lambda/2 * sum(w * w)
     g_new = dot(transpose(X), (out * Y)) - lambda * w

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/linearLogReg/LinearLogReg.dml b/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
index bf47bd1..9bd98e8 100644
--- a/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
+++ b/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
@@ -215,7 +215,7 @@ while(!converge) {
 	
 	ot = Xt %*% w
 	ot2 = yt * ot
-	correct = sum(ppred(ot2, 0, ">"))
+	correct = sum(ot2 > 0)
 	accuracy = correct*100.0/Nt 
 	iter = iter + 1
 	converge = (norm_grad < (tol * norm_grad_initial)) | (iter > maxiter)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml b/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
index 16160a3..1a94534 100644
--- a/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
+++ b/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
@@ -198,7 +198,7 @@ while(!converge):
     
     ot = dot(Xt, w)
     ot2 = yt * ot
-    correct = sum(ppred(ot2, 0, ">"))
+    correct = sum(ot2 > 0)
     accuracy = correct*100.0/Nt 
     iter = iter + 1
     converge = (norm_grad < (tol * norm_grad_initial)) | (iter > maxiter)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/m-svm/m-svm.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/m-svm/m-svm.dml b/src/test/scripts/applications/m-svm/m-svm.dml
index 439002c..400627d 100644
--- a/src/test/scripts/applications/m-svm/m-svm.dml
+++ b/src/test/scripts/applications/m-svm/m-svm.dml
@@ -67,7 +67,7 @@ if(check_X == 0){
 
 	debug_mat = matrix(-1, rows=max_iterations, cols=num_classes)
 	parfor(iter_class in 1:num_classes){		  
-		Y_local = 2 * ppred(Y, iter_class, "==") - 1
+		Y_local = 2 * (Y == iter_class) - 1
 		w_class = matrix(0, rows=num_features, cols=1)
 		if (intercept == 1) {
 			zero_matrix = matrix(0, rows=1, cols=1);
@@ -90,7 +90,7 @@ if(check_X == 0){
   			while(continue1 == 1){
    				tmp_Xw = Xw + step_sz*Xd
    				out = 1 - Y_local * (tmp_Xw)
-   				sv = ppred(out, 0, ">")
+   				sv = (out > 0)
    				out = out * sv
    				g = wd + step_sz*dd - sum(out * Y_local * Xd)
    				h = dd + sum(Xd * sv * Xd)
@@ -105,14 +105,14 @@ if(check_X == 0){
  			Xw = Xw + step_sz*Xd
  
   			out = 1 - Y_local * Xw
-  			sv = ppred(out, 0, ">")
+  			sv = (out > 0)
   			out = sv * out
   			obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
   			g_new = t(X) %*% (out * Y_local) - lambda * w_class
 
   			tmp = sum(s * g_old)
   
-  			train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+  			train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
   			print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
   			debug_mat[iter+1,iter_class] = obj	   
    

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/m-svm/m-svm.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/m-svm/m-svm.pydml b/src/test/scripts/applications/m-svm/m-svm.pydml
index 8c01806..1543f0c 100644
--- a/src/test/scripts/applications/m-svm/m-svm.pydml
+++ b/src/test/scripts/applications/m-svm/m-svm.pydml
@@ -65,7 +65,7 @@ else:
     
     debug_mat = full(-1, rows=max_iterations, cols=num_classes)
     parfor(iter_class in 1:num_classes):
-        Y_local = 2 * ppred(Y, iter_class, "==") - 1
+        Y_local = 2 * (Y == iter_class) - 1
         w_class = full(0, rows=num_features, cols=1)
         if (intercept == 1):
             zero_matrix = full(0, rows=1, cols=1)
@@ -86,7 +86,7 @@ else:
             while(continue1 == 1):
                 tmp_Xw = Xw + step_sz*Xd
                 out = 1 - Y_local * (tmp_Xw)
-                sv = ppred(out, 0, ">")
+                sv = (out > 0)
                 out = out * sv
                 g = wd + step_sz*dd - sum(out * Y_local * Xd)
                 h = dd + sum(Xd * sv * Xd)
@@ -99,14 +99,14 @@ else:
             Xw = Xw + step_sz*Xd
             
             out = 1 - Y_local * Xw
-            sv = ppred(out, 0, ">")
+            sv = (out > 0)
             out = sv * out
             obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
             g_new = dot(transpose(X), (out * Y_local)) - lambda * w_class
             
             tmp = sum(s * g_old)
             
-            train_acc = sum(ppred(Y_local*(dot(X, w_class)), 0, ">="))/num_samples*100
+            train_acc = sum(Y_local*(dot(X, w_class)) >= 0)/num_samples*100
             print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
             debug_mat[iter,iter_class-1] = obj
             

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/mdabivar/MDABivariateStats.dml b/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
index 5bb980c..1a47440 100644
--- a/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
+++ b/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
@@ -222,7 +222,7 @@ bivar_cc = function(Matrix[Double] A, Matrix[Double] B) return (Double pval, Mat
     r = rowSums(F)
     c = colSums(F)
     E = (r %*% c)/W
-    E = ppred(E, 0, "==")*0.0001 + E
+    E = (E == 0)*0.0001 + E
     T = (F-E)^2/E
     chi_squared = sum(T)
 
@@ -250,7 +250,7 @@ bivar_sc = function(Matrix[Double] Y, Matrix[Double] A) return (Double pVal, Mat
 
     # category-wise (frequencies, means, variances)
     CFreqs1 = aggregate(target=Y, groups=A, fn="count")
-    present_domain_vals_mat = removeEmpty(target=diag(1-ppred(CFreqs1, 0, "==")), margin="rows")
+    present_domain_vals_mat = removeEmpty(target=diag(1-(CFreqs1 == 0)), margin="rows")
     CFreqs = present_domain_vals_mat %*% CFreqs1
 
     CMeans = present_domain_vals_mat %*% aggregate(target=Y, groups=A, fn="mean")

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml b/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
index fd3cda6..2fbc506 100644
--- a/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
+++ b/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
@@ -203,7 +203,7 @@ def bivar_cc(A:matrix[float], B:matrix[float]) -> (pval:float, contingencyTable:
     r = rowSums(F)
     c = colSums(F)
     E = (dot(r, c))/W
-    E = ppred(E, 0, "==")*0.0001 + E
+    E = (E == 0)*0.0001 + E
     T = (F-E)**2/E
     chi_squared = sum(T)
     # compute p-value
@@ -228,7 +228,7 @@ def bivar_sc(Y:matrix[float], A:matrix[float]) -> (pVal:float, CFreqs:matrix[flo
     
     # category-wise (frequencies, means, variances)
     CFreqs1 = aggregate(target=Y, groups=A, fn="count")
-    present_domain_vals_mat = removeEmpty(target=diag(1-ppred(CFreqs1, 0, "==")), axis=0)
+    present_domain_vals_mat = removeEmpty(target=diag(1-(CFreqs1 == 0)), axis=0)
     CFreqs = dot(present_domain_vals_mat, CFreqs1)
     
     CMeans = dot(present_domain_vals_mat, aggregate(target=Y, groups=A, fn="mean"))

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.dml b/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.dml
index 83ddbf7..60f481b 100644
--- a/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.dml
+++ b/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.dml
@@ -67,7 +67,7 @@ D_w_ones = cbind(D, ones)
 model = cbind(class_conditionals, class_prior)
 log_probs = D_w_ones %*% t(log(model))
 pred = rowIndexMax(log_probs)
-acc = sum(ppred(pred, C, "==")) / numRows * 100
+acc = sum(pred == C) / numRows * 100
 
 acc_str = "Training Accuracy (%): " + acc
 print(acc_str)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.pydml b/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.pydml
index 58a23c6..09fa8ec 100644
--- a/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.pydml
+++ b/src/test/scripts/applications/naive-bayes-parfor/naive-bayes.pydml
@@ -68,7 +68,7 @@ log_model = log(model)
 transpose_log_model = log_model.transpose()
 log_probs = dot(D_w_ones, transpose_log_model)
 pred = rowIndexMax(log_probs)
-acc = sum(ppred(pred, C, "==")) / numRows * 100
+acc = sum(pred == C) / numRows * 100
 
 acc_str = "Training Accuracy (%): " + acc
 print(acc_str)

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm0.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm0.dml b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm0.dml
index 9c0ca9c..547176c 100644
--- a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm0.dml
+++ b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm0.dml
@@ -47,9 +47,9 @@ stats = matrix(0, rows=k, cols=1); #k-folds x 1-stats
 for( i in 1:k )
 {
    #prepare train/test fold projections
-   vPxi = ppred( P, i, "==" );   #  Select 1/k fraction of the rows
+   vPxi = (P == i);   #  Select 1/k fraction of the rows
    mPxi = (vPxi %*% ones);       #  for the i-th fold TEST set
-   #nvPxi = ppred( P, i, "!=" );
+   #nvPxi = (P != i);
    #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
    #create train/test folds
@@ -113,7 +113,7 @@ scoreMultiClassSVM = function( Matrix[double] X, Matrix[double] y, Matrix[double
    
    predicted_y = rowIndexMax( scores);
    
-   correct_percentage = sum( ppred( predicted_y - y, 0, "==")) / Nt * 100;
+   correct_percentage = sum((predicted_y - y) == 0) / Nt * 100;
 
    out_correct_pct = correct_percentage;
 
@@ -142,7 +142,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       
       iter_class = 1
       
-      Y_local = 2 * ppred( Y, iter_class, "==") - 1
+      Y_local = 2 * (Y == iter_class) - 1
       w_class = matrix( 0, rows=num_features, cols=1 )
    
       if (intercept == 1) {
@@ -165,7 +165,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -179,14 +179,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -206,7 +206,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       iter_class = iter_class + 1
       
       while(iter_class <= num_classes){
-       Y_local = 2 * ppred(Y, iter_class, "==") - 1
+       Y_local = 2 * (Y == iter_class) - 1
        w_class = matrix(0, rows=ncol(X), cols=1)
        if (intercept == 1) {
        	zero_matrix = matrix(0, rows=1, cols=1);
@@ -228,7 +228,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -242,14 +242,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm1.dml b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm1.dml
index 1dc2a34..0ef4bf4 100644
--- a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm1.dml
+++ b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm1.dml
@@ -47,9 +47,9 @@ stats = matrix(0, rows=k, cols=1); #k-folds x 1-stats
 parfor( i in 1:k, par=4, mode=LOCAL, opt=NONE )
 {
    #prepare train/test fold projections
-   vPxi = ppred( P, i, "==" );   #  Select 1/k fraction of the rows
+   vPxi = (P == i);   #  Select 1/k fraction of the rows
    mPxi = (vPxi %*% ones);       #  for the i-th fold TEST set
-   #nvPxi = ppred( P, i, "!=" );
+   #nvPxi = (P != i);
    #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
    #create train/test folds
@@ -113,7 +113,7 @@ scoreMultiClassSVM = function( Matrix[double] X, Matrix[double] y, Matrix[double
    
    predicted_y = rowIndexMax( scores);
    
-   correct_percentage = sum( ppred( predicted_y - y, 0, "==")) / Nt * 100;
+   correct_percentage = sum((predicted_y - y) == 0) / Nt * 100;
 
    out_correct_pct = correct_percentage;
 
@@ -142,7 +142,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       
       iter_class = 1
       
-      Y_local = 2 * ppred( Y, iter_class, "==") - 1
+      Y_local = 2 * (Y == iter_class) - 1
       w_class = matrix( 0, rows=num_features, cols=1 )
    
       if (intercept == 1) {
@@ -165,7 +165,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -179,14 +179,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -206,7 +206,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       iter_class = iter_class + 1
       
       while(iter_class <= num_classes){
-       Y_local = 2 * ppred(Y, iter_class, "==") - 1
+       Y_local = 2 * (Y == iter_class) - 1
        w_class = matrix(0, rows=ncol(X), cols=1)
        if (intercept == 1) {
        	zero_matrix = matrix(0, rows=1, cols=1);
@@ -228,7 +228,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -242,14 +242,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm4.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm4.dml b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm4.dml
index b94f168..06bd9d5 100644
--- a/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm4.dml
+++ b/src/test/scripts/applications/parfor/parfor_cv_multiclasssvm4.dml
@@ -47,9 +47,9 @@ stats = matrix(0, rows=k, cols=1); #k-folds x 1-stats
 parfor( i in 1:k )
 {
    #prepare train/test fold projections
-   vPxi = ppred( P, i, "==" );   #  Select 1/k fraction of the rows
+   vPxi = (P == i);   #  Select 1/k fraction of the rows
    mPxi = (vPxi %*% ones);       #  for the i-th fold TEST set
-   #nvPxi = ppred( P, i, "!=" );
+   #nvPxi = (P != i);
    #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
    #create train/test folds
@@ -113,7 +113,7 @@ scoreMultiClassSVM = function( Matrix[double] X, Matrix[double] y, Matrix[double
    
    predicted_y = rowIndexMax( scores);
    
-   correct_percentage = sum( ppred( predicted_y - y, 0, "==")) / Nt * 100;
+   correct_percentage = sum((predicted_y - y) == 0) / Nt * 100;
 
    out_correct_pct = correct_percentage;
 
@@ -142,7 +142,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       
       iter_class = 1
       
-      Y_local = 2 * ppred( Y, iter_class, "==") - 1
+      Y_local = 2 * (Y == iter_class) - 1
       w_class = matrix( 0, rows=num_features, cols=1 )
    
       if (intercept == 1) {
@@ -165,7 +165,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -179,14 +179,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){
@@ -206,7 +206,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
       iter_class = iter_class + 1
       
       while(iter_class <= num_classes){
-       Y_local = 2 * ppred(Y, iter_class, "==") - 1
+       Y_local = 2 * (Y == iter_class) - 1
        w_class = matrix(0, rows=ncol(X), cols=1)
        if (intercept == 1) {
        	zero_matrix = matrix(0, rows=1, cols=1);
@@ -228,7 +228,7 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         while(continue1 == 1){
          tmp_w = w_class + step_sz*s
          out = 1 - Y_local * (X %*% tmp_w)
-         sv = ppred(out, 0, ">")
+         sv = (out > 0)
          out = out * sv
          g = wd + step_sz*dd - sum(out * Y_local * Xd)
          h = dd + sum(Xd * sv * Xd)
@@ -242,14 +242,14 @@ multiClassSVM = function (Matrix[double] X, Matrix[double] Y, Integer intercept,
         w_class = w_class + step_sz*s
        
         out = 1 - Y_local * (X %*% w_class)
-        sv = ppred(out, 0, ">")
+        sv = (out > 0)
         out = sv * out
         obj = 0.5 * sum(out * out) + lambda/2 * sum(w_class * w_class)
         g_new = t(X) %*% (out * Y_local) - lambda * w_class
       
         tmp = sum(s * g_old)
         
-        train_acc = sum(ppred(Y_local*(X%*%w_class), 0, ">="))/num_samples*100
+        train_acc = sum((Y_local*(X%*%w_class)) >= 0)/num_samples*100
         #print("For class " + iter_class + " iteration " + iter + " training accuracy: " + train_acc)
          
         if((step_sz*tmp < epsilon*obj) | (iter >= max_iterations-1)){

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_sample.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_sample.dml b/src/test/scripts/applications/parfor/parfor_sample.dml
index 98ac395..386e9b0 100644
--- a/src/test/scripts/applications/parfor/parfor_sample.dml
+++ b/src/test/scripts/applications/parfor/parfor_sample.dml
@@ -60,8 +60,8 @@ svUpBnd = cumsum(sv);
 # Construct sampling matrix SM, and apply to create samples
 parfor ( i in 1:nrow(sv))
 {
-  T1 = ppred(R, as.scalar(svUpBnd[i,1]), "<=");
-  T2 = ppred(R, as.scalar(svLowBnd[i,1]), ">");
+  T1 = (R <= as.scalar(svUpBnd[i,1]));
+  T2 = (R > as.scalar(svLowBnd[i,1]));
   SM = T1 * T2; 
   P = removeEmpty(target=diag(SM), margin="rows");
   iX = P %*% X;

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_univariate0.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate0.dml b/src/test/scripts/applications/parfor/parfor_univariate0.dml
index 78397cb..448c1c3 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate0.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate0.dml
@@ -142,7 +142,7 @@ else {
 
 					mode = rowIndexMax(t(cat_counts));
 					mx = max(cat_counts)
-					modeArr =  ppred(cat_counts, mx, "==")
+					modeArr =  (cat_counts == mx)
 					numModes = sum(modeArr);
 
 					# place the computed statistics in output matrices

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_univariate1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate1.dml b/src/test/scripts/applications/parfor/parfor_univariate1.dml
index 64ee633..ad1e78f 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate1.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate1.dml
@@ -142,7 +142,7 @@ else {
 
 					mode = rowIndexMax(t(cat_counts));
 					mx = max(cat_counts)
-					modeArr =  ppred(cat_counts, mx, "==")
+					modeArr =  (cat_counts == mx)
 					numModes = sum(modeArr);
 
 					# place the computed statistics in output matrices

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/parfor/parfor_univariate4.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate4.dml b/src/test/scripts/applications/parfor/parfor_univariate4.dml
index 465cb8f..62646ef 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate4.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate4.dml
@@ -142,7 +142,7 @@ else {
 
 					mode = rowIndexMax(t(cat_counts));
 					mx = max(cat_counts)
-					modeArr =  ppred(cat_counts, mx, "==")
+					modeArr =  (cat_counts == mx)
 					numModes = sum(modeArr);
 
 					# place the computed statistics in output matrices

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/validation/CV_LogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/CV_LogisticRegression.dml b/src/test/scripts/applications/validation/CV_LogisticRegression.dml
index 191e6d1..6e765e3 100644
--- a/src/test/scripts/applications/validation/CV_LogisticRegression.dml
+++ b/src/test/scripts/applications/validation/CV_LogisticRegression.dml
@@ -60,9 +60,9 @@ stats = matrix(0, rows=k, cols=40); #k-folds x 40-stats
 parfor( i in 1:k )
 {
    #prepare train/test fold projections
-   vPxi = ppred( P, i, "==" );
+   vPxi = (P == i);
    mPxi = (vPxi %*% ones);   
-   #nvPxi = ppred( P, i, "!=" );
+   #nvPxi = (P != i);
    #nmPxi = (nvPxi %*% ones);  #note: inefficient for sparse data  
 
    #create train/test folds
@@ -292,7 +292,7 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
    } 
    
    o2 = y * o
-   correct = sum(ppred(o2, 0, ">"))
+   correct = sum(o2 > 0)
    accuracy = correct*100.0/N 
    iter = iter + 1
    #converge = (norm_grad < (tol * norm_grad_initial)) | (iter > maxiter)
@@ -352,7 +352,7 @@ scoreLogRegModel = function (Matrix[double] X_train, Matrix[double] y_train, Mat
     prob_train = 1.0 / (1.0 + exp (- linear_train));
     est_value_POS_train = value_TP * prob_train - cost_FP * (1.0 - prob_train);
     est_value_NEG_train = value_TN * (1.0 - prob_train) - cost_FN * prob_train;
-    y_train_pred = 2 * ppred (est_value_POS_train, est_value_NEG_train, ">") - 1;
+    y_train_pred = 2 * (est_value_POS_train > est_value_NEG_train) - 1;
 
 # Compute the estimated number of true/false positives/negatives
 
@@ -411,7 +411,7 @@ scoreLogRegModel = function (Matrix[double] X_train, Matrix[double] y_train, Mat
     prob_test = 1.0 / (1.0 + exp (- linear_test));
     est_value_POS_test = value_TP * prob_test - cost_FP * (1.0 - prob_test);
     est_value_NEG_test = value_TN * (1.0 - prob_test) - cost_FN * prob_test;
-    y_test_pred = 2 * ppred (est_value_POS_test, est_value_NEG_test, ">") - 1;
+    y_test_pred = 2 * (est_value_POS_test > est_value_NEG_test) - 1;
 
 # Compute the estimated number of true/false positives/negatives
 


[3/4] systemml git commit: [SYSTEMML-1799] Remove ppred from test scripts

Posted by de...@apache.org.
[SYSTEMML-1799] Remove ppred from test scripts

Replace ppred functions with relational operators in test scripts since
ppred() is deprecated.
Rename ppred test class names and script names.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/d30e1888
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/d30e1888
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/d30e1888

Branch: refs/heads/master
Commit: d30e188807af767df7ca46c2980c0f7110ad912d
Parents: 11b689d
Author: Deron Eriksson <de...@apache.org>
Authored: Mon Jul 24 11:34:31 2017 -0700
Committer: Deron Eriksson <de...@apache.org>
Committed: Mon Jul 24 15:07:55 2017 -0700

----------------------------------------------------------------------
 .../FullLogicalMatrixTest.java                  | 590 +++++++++++++++++++
 .../FullLogicalScalarLeftTest.java              | 437 ++++++++++++++
 .../FullLogicalScalarRightTest.java             | 436 ++++++++++++++
 .../matrix_full_other/FullPPredMatrixTest.java  | 590 -------------------
 .../FullPPredScalarLeftTest.java                | 437 --------------
 .../FullPPredScalarRightTest.java               | 436 --------------
 .../ternary/CTableMatrixIgnoreZerosTest.java    |   2 +-
 .../apply-transform/apply-transform.dml         |   4 +-
 .../apply-transform/apply-transform.pydml       |   4 +-
 .../applications/arima_box-jenkins/arima.dml    |   2 +-
 .../applications/arima_box-jenkins/arima.pydml  |   2 +-
 .../scripts/applications/cspline/CsplineCG.dml  |   2 +-
 .../applications/cspline/CsplineCG.pydml        |   2 +-
 .../scripts/applications/cspline/CsplineDS.dml  |   2 +-
 .../applications/cspline/CsplineDS.pydml        |   2 +-
 .../applications/ctableStats/Binomial.dml       |  18 +-
 .../applications/ctableStats/ctci_odds.dml      |   2 +-
 .../applications/ctableStats/zipftest.dml       |   2 +-
 .../descriptivestats/Categorical.dml            |   4 +-
 .../applications/descriptivestats/Scale.R       |   1 -
 .../applications/descriptivestats/Scale.dml     |   5 +-
 .../WeightedCategoricalTest.dml                 |   4 +-
 .../descriptivestats/WeightedScaleTest.R        |   1 -
 .../descriptivestats/WeightedScaleTest.dml      |   5 +-
 src/test/scripts/applications/glm/GLM.dml       |  86 +--
 src/test/scripts/applications/glm/GLM.pydml     |  86 +--
 src/test/scripts/applications/id3/id3.dml       |  14 +-
 src/test/scripts/applications/id3/id3.pydml     |  14 +-
 src/test/scripts/applications/impute/tmp.dml    |   8 +-
 src/test/scripts/applications/kmeans/Kmeans.dml |   4 +-
 src/test/scripts/applications/l2svm/L2SVM.dml   |   8 +-
 src/test/scripts/applications/l2svm/L2SVM.pydml |   8 +-
 .../applications/linearLogReg/LinearLogReg.dml  |   2 +-
 .../linearLogReg/LinearLogReg.pydml             |   2 +-
 src/test/scripts/applications/m-svm/m-svm.dml   |   8 +-
 src/test/scripts/applications/m-svm/m-svm.pydml |   8 +-
 .../applications/mdabivar/MDABivariateStats.dml |   4 +-
 .../mdabivar/MDABivariateStats.pydml            |   4 +-
 .../naive-bayes-parfor/naive-bayes.dml          |   2 +-
 .../naive-bayes-parfor/naive-bayes.pydml        |   2 +-
 .../parfor/parfor_cv_multiclasssvm0.dml         |  22 +-
 .../parfor/parfor_cv_multiclasssvm1.dml         |  22 +-
 .../parfor/parfor_cv_multiclasssvm4.dml         |  22 +-
 .../applications/parfor/parfor_sample.dml       |   4 +-
 .../applications/parfor/parfor_univariate0.dml  |   2 +-
 .../applications/parfor/parfor_univariate1.dml  |   2 +-
 .../applications/parfor/parfor_univariate4.dml  |   2 +-
 .../validation/CV_LogisticRegression.dml        |  10 +-
 .../validation/CV_MultiClassSVM.dml             |  22 +-
 .../validation/CV_MultiClassSVM.sasha.dml       |  24 +-
 .../validation/LinearLogisticRegression.dml     |   2 +-
 .../applications/validation/MultiClassSVM.dml   |  16 +-
 .../validation/MultiClassSVMScore.dml           |   2 +-
 .../genRandData4LogisticRegression.dml          |   2 +-
 .../validation/genRandData4MultiClassSVM.dml    |   2 +-
 .../matrix/UltraSparseMatrixMultiplication.dml  |   2 +-
 .../matrix/UltraSparseMatrixMultiplication2.dml |   2 +-
 .../matrix_full_other/LogicalMatrixTest.R       |  59 ++
 .../matrix_full_other/LogicalMatrixTest.dml     |  50 ++
 .../matrix_full_other/LogicalScalarLeftTest.R   |  60 ++
 .../matrix_full_other/LogicalScalarLeftTest.dml |  49 ++
 .../matrix_full_other/LogicalScalarRightTest.R  |  60 ++
 .../LogicalScalarRightTest.dml                  |  49 ++
 .../binary/matrix_full_other/PPredMatrixTest.R  |  59 --
 .../matrix_full_other/PPredMatrixTest.dml       |  50 --
 .../matrix_full_other/PPredScalarLeftTest.R     |  60 --
 .../matrix_full_other/PPredScalarLeftTest.dml   |  49 --
 .../matrix_full_other/PPredScalarRightTest.R    |  60 --
 .../matrix_full_other/PPredScalarRightTest.dml  |  49 --
 .../functions/misc/ValueTypePredLeftScalar.dml  |   2 +-
 .../functions/misc/ValueTypePredRightScalar.dml |   2 +-
 .../quaternary/WeightedDivMMMultMinusLeft.dml   |   2 +-
 .../quaternary/WeightedDivMMMultMinusRight.dml  |   2 +-
 .../quaternary/WeightedSquaredLossPostNz.dml    |   2 +-
 .../scripts/functions/ternary/CTableRowHist.dml |   2 +-
 .../scripts/functions/unary/matrix/SelPos.dml   |   2 +-
 .../scripts/functions/unary/matrix/Sign2.dml    |   2 +-
 .../binary/matrix_full_other/ZPackageSuite.java |   6 +-
 78 files changed, 2041 insertions(+), 2045 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
new file mode 100644
index 0000000..9563283
--- /dev/null
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
@@ -0,0 +1,590 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
+
+import java.util.HashMap;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.sysml.api.DMLScript;
+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
+import org.apache.sysml.lops.LopProperties.ExecType;
+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
+import org.apache.sysml.test.integration.AutomatedTestBase;
+import org.apache.sysml.test.integration.TestConfiguration;
+import org.apache.sysml.test.utils.TestUtils;
+
+/**
+ * The main purpose of this test is to verify various input combinations for
+ * matrix-matrix ppred operations that internally translate to binary operations.
+ * 
+ */
+public class FullLogicalMatrixTest extends AutomatedTestBase 
+{
+	
+	private final static String TEST_NAME1 = "LogicalMatrixTest";
+	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
+	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalMatrixTest.class.getSimpleName() + "/";
+	private final static double eps = 1e-10;
+	
+	private final static int rows1 = 1383;
+	private final static int cols1 = 1432;
+	
+	private final static double sparsity1 = 0.7;
+	private final static double sparsity2 = 0.01;
+	
+	public enum Type{
+		GREATER,
+		LESS,
+		EQUALS,
+		NOT_EQUALS,
+		GREATER_EQUALS,
+		LESS_EQUALS,
+	}
+		
+	@Override
+	public void setUp() 
+	{
+		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "C" }) ); 
+		TestUtils.clearAssertionInformation();
+		if (TEST_CACHE_ENABLED) {
+			setOutAndExpectedDeletionDisabled(true);
+		}
+	}
+
+	@BeforeClass
+	public static void init()
+	{
+		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+	}
+
+	@AfterClass
+	public static void cleanUp()
+	{
+		if (TEST_CACHE_ENABLED) {
+			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+		}
+	}
+	
+	@Test
+	public void testPPredGreaterDenseDenseCP() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterDenseSparseCP() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseDenseCP() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseSparseCP() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessDenseDenseCP() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessDenseSparseCP() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessSparseDenseCP() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessSparseSparseCP() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+	}
+	
+	
+	// ------------------------
+	@Test
+	public void testPPredGreaterDenseDenseSP() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterDenseSparseSP() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseDenseSP() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseSparseSP() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseDenseSP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseSparseSP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseDenseSP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseSparseSP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseDenseSP() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseSparseSP() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseDenseSP() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseSparseSP() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseDenseSP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseSparseSP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseDenseSP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseSparseSP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessDenseDenseSP() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessDenseSparseSP() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessSparseDenseSP() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessSparseSparseSP() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseDenseSP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseSparseSP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseDenseSP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.SPARK);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseSparseSP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.SPARK);
+	}
+	// ----------------------
+	
+	@Test
+	public void testPPredGreaterDenseDenseMR() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterDenseSparseMR() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseDenseMR() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterSparseSparseMR() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsDenseSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsSparseSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsDenseSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsSparseSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsDenseSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsSparseSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessDenseDenseMR() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessDenseSparseMR() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessSparseDenseMR() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessSparseSparseMR() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsDenseSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsSparseSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+	}
+	
+	
+	/**
+	 * 
+	 * @param type
+	 * @param instType
+	 * @param sparse
+	 */
+	private void runPPredTest( Type type, boolean sp1, boolean sp2, ExecType et )
+	{
+		String TEST_NAME = TEST_NAME1;
+		int rows = rows1;
+		int cols = cols1;
+		    
+	    RUNTIME_PLATFORM platformOld = rtplatform;
+		switch( et ){
+			case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;
+			case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;
+			default: rtplatform = RUNTIME_PLATFORM.HYBRID; break;
+		}
+		
+		boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
+	    if( rtplatform == RUNTIME_PLATFORM.SPARK )
+			DMLScript.USE_LOCAL_SPARK_CONFIG = true;
+	
+		double sparsityLeft = sp1 ? sparsity2 : sparsity1;
+		double sparsityRight = sp2 ? sparsity2 : sparsity1;
+		
+		String TEST_CACHE_DIR = "";
+		if (TEST_CACHE_ENABLED) {
+			TEST_CACHE_DIR = type.ordinal() + "_" + rows + "_" + cols + "_" + sparsityLeft + "_" + sparsityRight + "/";
+		}
+		
+		try
+		{
+			TestConfiguration config = getTestConfiguration(TEST_NAME);
+			loadTestConfiguration(config, TEST_CACHE_DIR);
+			
+			/* This is for running the junit test the new way, i.e., construct the arguments directly */
+			String HOME = SCRIPT_DIR + TEST_DIR;
+			fullDMLScriptName = HOME + TEST_NAME + ".dml";
+			programArgs = new String[]{"-args", input("A"), input("B"), 
+				Integer.toString(type.ordinal()), output("C") };
+			
+			fullRScriptName = HOME + TEST_NAME + ".R";
+			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + type.ordinal() + " " + expectedDir();
+	
+			//generate actual dataset
+			double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsityLeft, 7); 
+			writeInputMatrixWithMTD("A", A, true);
+			double[][] B = getRandomMatrix(rows, cols, -15, 15, sparsityRight, 3); 
+			writeInputMatrixWithMTD("B", B, true);
+			
+			//run tests
+			runTest(true, false, null, -1); 
+			runRScript(true); 
+			
+			//compare matrices 
+			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("C");
+			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("C");
+			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
+		}
+		finally
+		{
+			rtplatform = platformOld;
+			DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
+		}
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
new file mode 100644
index 0000000..c23943b
--- /dev/null
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
+
+import java.util.HashMap;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
+import org.apache.sysml.lops.LopProperties.ExecType;
+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
+import org.apache.sysml.test.integration.AutomatedTestBase;
+import org.apache.sysml.test.integration.TestConfiguration;
+import org.apache.sysml.test.utils.TestUtils;
+
+/**
+ * The main purpose of this test is to verify the internal optimization regarding
+ * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
+ * in general, but for certain instance involving 0 scalar it is).
+ * 
+ * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
+ * ppred operations in all execution types.
+ * 
+ */
+public class FullLogicalScalarLeftTest extends AutomatedTestBase 
+{
+	
+	private final static String TEST_NAME1 = "LogicalScalarLeftTest";
+	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
+	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalScalarLeftTest.class.getSimpleName() + "/";
+	private final static double eps = 1e-10;
+	
+	private final static int rows1 = 1072;
+	private final static int cols1 = 1009;
+	
+	private final static double sparsity1 = 0.7;
+	private final static double sparsity2 = 0.1;
+	
+	public enum Type{
+		GREATER,
+		LESS,
+		EQUALS,
+		NOT_EQUALS,
+		GREATER_EQUALS,
+		LESS_EQUALS,
+	}
+	
+	@BeforeClass
+	public static void init()
+	{
+		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+	}
+
+	@AfterClass
+	public static void cleanUp()
+	{
+		if (TEST_CACHE_ENABLED) {
+			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+		}
+	}
+
+	@Override
+	public void setUp() 
+	{
+		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
+		if (TEST_CACHE_ENABLED) {
+			setOutAndExpectedDeletionDisabled(true);
+		}
+	}
+
+	
+	@Test
+	public void testPPredGreaterZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+	}
+	
+	
+	/**
+	 * 
+	 * @param type
+	 * @param instType
+	 * @param sparse
+	 */
+	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
+	{
+		String TEST_NAME = TEST_NAME1;
+		int rows = rows1;
+		int cols = cols1;
+		double sparsity = sparse ? sparsity2 : sparsity1;
+		double constant = zero ? 0 : 0.5;
+		
+		String TEST_CACHE_DIR = "";
+		if (TEST_CACHE_ENABLED) {
+			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
+		}
+		
+		//rtplatform for MR
+		RUNTIME_PLATFORM platformOld = rtplatform;
+		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
+	
+		try
+		{
+			TestConfiguration config = getTestConfiguration(TEST_NAME);
+			
+			loadTestConfiguration(config, TEST_CACHE_DIR);
+			
+			/* This is for running the junit test the new way, i.e., construct the arguments directly */
+			String HOME = SCRIPT_DIR + TEST_DIR;
+			fullDMLScriptName = HOME + TEST_NAME + ".dml";
+			programArgs = new String[]{"-explain","-args", input("A"), 
+				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
+			
+			fullRScriptName = HOME + TEST_NAME + ".R";
+			rCmd = "Rscript" + " " + fullRScriptName + " " +  inputDir() + " " + 
+				type.ordinal() + " " + constant + " " + expectedDir();
+	
+			//generate actual dataset
+			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
+			writeInputMatrixWithMTD("A", A, true);
+	
+			//run tests
+			runTest(true, false, null, -1); 
+			runRScript(true); 
+			
+			//compare matrices 
+			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
+			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
+			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
+		}
+		finally
+		{
+			rtplatform = platformOld;
+		}
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
new file mode 100644
index 0000000..30fcdac
--- /dev/null
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
@@ -0,0 +1,436 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
+
+import java.util.HashMap;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
+import org.apache.sysml.lops.LopProperties.ExecType;
+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
+import org.apache.sysml.test.integration.AutomatedTestBase;
+import org.apache.sysml.test.integration.TestConfiguration;
+import org.apache.sysml.test.utils.TestUtils;
+
+/**
+ * The main purpose of this test is to verify the internal optimization regarding
+ * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
+ * in general, but for certain instance involving 0 scalar it is).
+ * 
+ * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
+ * ppred operations in all execution types.
+ * 
+ */
+public class FullLogicalScalarRightTest extends AutomatedTestBase 
+{
+	
+	private final static String TEST_NAME1 = "LogicalScalarRightTest";
+	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
+	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalScalarRightTest.class.getSimpleName() + "/";
+	private final static double eps = 1e-10;
+	
+	private final static int rows1 = 1072;
+	private final static int cols1 = 1009;
+	
+	private final static double sparsity1 = 0.7;
+	private final static double sparsity2 = 0.1;
+	
+	public enum Type{
+		GREATER,
+		LESS,
+		EQUALS,
+		NOT_EQUALS,
+		GREATER_EQUALS,
+		LESS_EQUALS,
+	}
+	
+	
+	@Override
+	public void setUp() 
+	{
+		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
+		if (TEST_CACHE_ENABLED) {
+			setOutAndExpectedDeletionDisabled(true);
+		}
+	}
+
+	@BeforeClass
+	public static void init()
+	{
+		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+	}
+
+	@AfterClass
+	public static void cleanUp()
+	{
+		if (TEST_CACHE_ENABLED) {
+			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
+		}
+	}
+	
+	@Test
+	public void testPPredGreaterZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroDenseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroSparseCP() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+	}
+
+	@Test
+	public void testPPredGreaterZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroDenseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+	}
+
+	@Test
+	public void testPPredGreaterNonZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessNonZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredNotEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredGreaterEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+	}
+	
+	@Test
+	public void testPPredLessEqualsNonZeroSparseMR() 
+	{
+		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+	}
+	
+	
+	/**
+	 * 
+	 * @param type
+	 * @param instType
+	 * @param sparse
+	 */
+	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
+	{
+		String TEST_NAME = TEST_NAME1;
+		int rows = rows1;
+		int cols = cols1;
+		double sparsity = sparse ? sparsity2 : sparsity1;
+		double constant = zero ? 0 : 0.5;
+		
+		String TEST_CACHE_DIR = "";
+		if (TEST_CACHE_ENABLED) {
+			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
+		}
+		
+		//rtplatform for MR
+		RUNTIME_PLATFORM platformOld = rtplatform;
+		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
+	
+		try
+		{
+			TestConfiguration config = getTestConfiguration(TEST_NAME);
+			loadTestConfiguration(config, TEST_CACHE_DIR);
+			
+			/* This is for running the junit test the new way, i.e., construct the arguments directly */
+			String HOME = SCRIPT_DIR + TEST_DIR;
+			fullDMLScriptName = HOME + TEST_NAME + ".dml";
+			programArgs = new String[]{"-args", input("A"), 
+				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
+			
+			fullRScriptName = HOME + TEST_NAME + ".R";
+			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + 
+				type.ordinal() + " " + constant + " " + expectedDir();
+	
+			//generate actual dataset
+			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
+			writeInputMatrixWithMTD("A", A, true);
+			
+			//run tests
+			runTest(true, false, null, -1); 
+			runRScript(true); 
+			
+			//compare matrices 
+			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
+			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
+			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
+		}
+		finally
+		{
+			rtplatform = platformOld;
+		}
+	}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredMatrixTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredMatrixTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredMatrixTest.java
deleted file mode 100644
index 5f2161e..0000000
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredMatrixTest.java
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
-
-import java.util.HashMap;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.sysml.api.DMLScript;
-import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
-import org.apache.sysml.lops.LopProperties.ExecType;
-import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
-import org.apache.sysml.test.integration.AutomatedTestBase;
-import org.apache.sysml.test.integration.TestConfiguration;
-import org.apache.sysml.test.utils.TestUtils;
-
-/**
- * The main purpose of this test is to verify various input combinations for
- * matrix-matrix ppred operations that internally translate to binary operations.
- * 
- */
-public class FullPPredMatrixTest extends AutomatedTestBase 
-{
-	
-	private final static String TEST_NAME1 = "PPredMatrixTest";
-	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
-	private final static String TEST_CLASS_DIR = TEST_DIR + FullPPredMatrixTest.class.getSimpleName() + "/";
-	private final static double eps = 1e-10;
-	
-	private final static int rows1 = 1383;
-	private final static int cols1 = 1432;
-	
-	private final static double sparsity1 = 0.7;
-	private final static double sparsity2 = 0.01;
-	
-	public enum Type{
-		GREATER,
-		LESS,
-		EQUALS,
-		NOT_EQUALS,
-		GREATER_EQUALS,
-		LESS_EQUALS,
-	}
-		
-	@Override
-	public void setUp() 
-	{
-		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "C" }) ); 
-		TestUtils.clearAssertionInformation();
-		if (TEST_CACHE_ENABLED) {
-			setOutAndExpectedDeletionDisabled(true);
-		}
-	}
-
-	@BeforeClass
-	public static void init()
-	{
-		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-	}
-
-	@AfterClass
-	public static void cleanUp()
-	{
-		if (TEST_CACHE_ENABLED) {
-			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-		}
-	}
-	
-	@Test
-	public void testPPredGreaterDenseDenseCP() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterDenseSparseCP() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseDenseCP() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseSparseCP() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessDenseDenseCP() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessDenseSparseCP() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessSparseDenseCP() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessSparseSparseCP() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
-	}
-	
-	
-	// ------------------------
-	@Test
-	public void testPPredGreaterDenseDenseSP() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterDenseSparseSP() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseDenseSP() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseSparseSP() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseDenseSP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseSparseSP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseDenseSP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseSparseSP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseDenseSP() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseSparseSP() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseDenseSP() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseSparseSP() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseDenseSP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseSparseSP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseDenseSP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseSparseSP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessDenseDenseSP() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessDenseSparseSP() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessSparseDenseSP() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessSparseSparseSP() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseDenseSP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseSparseSP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseDenseSP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.SPARK);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseSparseSP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.SPARK);
-	}
-	// ----------------------
-	
-	@Test
-	public void testPPredGreaterDenseDenseMR() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterDenseSparseMR() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseDenseMR() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterSparseSparseMR() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsDenseSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsSparseSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsDenseSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsSparseSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsDenseSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsSparseSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessDenseDenseMR() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessDenseSparseMR() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessSparseDenseMR() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessSparseSparseMR() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsDenseSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsSparseSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
-	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean sp1, boolean sp2, ExecType et )
-	{
-		String TEST_NAME = TEST_NAME1;
-		int rows = rows1;
-		int cols = cols1;
-		    
-	    RUNTIME_PLATFORM platformOld = rtplatform;
-		switch( et ){
-			case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;
-			case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;
-			default: rtplatform = RUNTIME_PLATFORM.HYBRID; break;
-		}
-		
-		boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
-	    if( rtplatform == RUNTIME_PLATFORM.SPARK )
-			DMLScript.USE_LOCAL_SPARK_CONFIG = true;
-	
-		double sparsityLeft = sp1 ? sparsity2 : sparsity1;
-		double sparsityRight = sp2 ? sparsity2 : sparsity1;
-		
-		String TEST_CACHE_DIR = "";
-		if (TEST_CACHE_ENABLED) {
-			TEST_CACHE_DIR = type.ordinal() + "_" + rows + "_" + cols + "_" + sparsityLeft + "_" + sparsityRight + "/";
-		}
-		
-		try
-		{
-			TestConfiguration config = getTestConfiguration(TEST_NAME);
-			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
-			/* This is for running the junit test the new way, i.e., construct the arguments directly */
-			String HOME = SCRIPT_DIR + TEST_DIR;
-			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-args", input("A"), input("B"), 
-				Integer.toString(type.ordinal()), output("C") };
-			
-			fullRScriptName = HOME + TEST_NAME + ".R";
-			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + type.ordinal() + " " + expectedDir();
-	
-			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsityLeft, 7); 
-			writeInputMatrixWithMTD("A", A, true);
-			double[][] B = getRandomMatrix(rows, cols, -15, 15, sparsityRight, 3); 
-			writeInputMatrixWithMTD("B", B, true);
-			
-			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
-			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("C");
-			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("C");
-			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
-		}
-		finally
-		{
-			rtplatform = platformOld;
-			DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarLeftTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarLeftTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarLeftTest.java
deleted file mode 100644
index 1ddf995..0000000
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarLeftTest.java
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
-
-import java.util.HashMap;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
-import org.apache.sysml.lops.LopProperties.ExecType;
-import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
-import org.apache.sysml.test.integration.AutomatedTestBase;
-import org.apache.sysml.test.integration.TestConfiguration;
-import org.apache.sysml.test.utils.TestUtils;
-
-/**
- * The main purpose of this test is to verify the internal optimization regarding
- * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
- * in general, but for certain instance involving 0 scalar it is).
- * 
- * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
- * ppred operations in all execution types.
- * 
- */
-public class FullPPredScalarLeftTest extends AutomatedTestBase 
-{
-	
-	private final static String TEST_NAME1 = "PPredScalarLeftTest";
-	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
-	private final static String TEST_CLASS_DIR = TEST_DIR + FullPPredScalarLeftTest.class.getSimpleName() + "/";
-	private final static double eps = 1e-10;
-	
-	private final static int rows1 = 1072;
-	private final static int cols1 = 1009;
-	
-	private final static double sparsity1 = 0.7;
-	private final static double sparsity2 = 0.1;
-	
-	public enum Type{
-		GREATER,
-		LESS,
-		EQUALS,
-		NOT_EQUALS,
-		GREATER_EQUALS,
-		LESS_EQUALS,
-	}
-	
-	@BeforeClass
-	public static void init()
-	{
-		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-	}
-
-	@AfterClass
-	public static void cleanUp()
-	{
-		if (TEST_CACHE_ENABLED) {
-			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-		}
-	}
-
-	@Override
-	public void setUp() 
-	{
-		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
-		if (TEST_CACHE_ENABLED) {
-			setOutAndExpectedDeletionDisabled(true);
-		}
-	}
-
-	
-	@Test
-	public void testPPredGreaterZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
-	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
-	{
-		String TEST_NAME = TEST_NAME1;
-		int rows = rows1;
-		int cols = cols1;
-		double sparsity = sparse ? sparsity2 : sparsity1;
-		double constant = zero ? 0 : 0.5;
-		
-		String TEST_CACHE_DIR = "";
-		if (TEST_CACHE_ENABLED) {
-			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
-		}
-		
-		//rtplatform for MR
-		RUNTIME_PLATFORM platformOld = rtplatform;
-		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
-	
-		try
-		{
-			TestConfiguration config = getTestConfiguration(TEST_NAME);
-			
-			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
-			/* This is for running the junit test the new way, i.e., construct the arguments directly */
-			String HOME = SCRIPT_DIR + TEST_DIR;
-			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-explain","-args", input("A"), 
-				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
-			
-			fullRScriptName = HOME + TEST_NAME + ".R";
-			rCmd = "Rscript" + " " + fullRScriptName + " " +  inputDir() + " " + 
-				type.ordinal() + " " + constant + " " + expectedDir();
-	
-			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
-			writeInputMatrixWithMTD("A", A, true);
-	
-			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
-			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
-			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
-			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
-		}
-		finally
-		{
-			rtplatform = platformOld;
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarRightTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarRightTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarRightTest.java
deleted file mode 100644
index 40139e0..0000000
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullPPredScalarRightTest.java
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.sysml.test.integration.functions.binary.matrix_full_other;
-
-import java.util.HashMap;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;
-import org.apache.sysml.lops.LopProperties.ExecType;
-import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;
-import org.apache.sysml.test.integration.AutomatedTestBase;
-import org.apache.sysml.test.integration.TestConfiguration;
-import org.apache.sysml.test.utils.TestUtils;
-
-/**
- * The main purpose of this test is to verify the internal optimization regarding
- * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
- * in general, but for certain instance involving 0 scalar it is).
- * 
- * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
- * ppred operations in all execution types.
- * 
- */
-public class FullPPredScalarRightTest extends AutomatedTestBase 
-{
-	
-	private final static String TEST_NAME1 = "PPredScalarRightTest";
-	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
-	private final static String TEST_CLASS_DIR = TEST_DIR + FullPPredScalarRightTest.class.getSimpleName() + "/";
-	private final static double eps = 1e-10;
-	
-	private final static int rows1 = 1072;
-	private final static int cols1 = 1009;
-	
-	private final static double sparsity1 = 0.7;
-	private final static double sparsity2 = 0.1;
-	
-	public enum Type{
-		GREATER,
-		LESS,
-		EQUALS,
-		NOT_EQUALS,
-		GREATER_EQUALS,
-		LESS_EQUALS,
-	}
-	
-	
-	@Override
-	public void setUp() 
-	{
-		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
-		if (TEST_CACHE_ENABLED) {
-			setOutAndExpectedDeletionDisabled(true);
-		}
-	}
-
-	@BeforeClass
-	public static void init()
-	{
-		TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-	}
-
-	@AfterClass
-	public static void cleanUp()
-	{
-		if (TEST_CACHE_ENABLED) {
-			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
-		}
-	}
-	
-	@Test
-	public void testPPredGreaterZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroDenseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroSparseCP() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
-	}
-
-	@Test
-	public void testPPredGreaterZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroDenseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
-	}
-
-	@Test
-	public void testPPredGreaterNonZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessNonZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredNotEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredGreaterEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
-	}
-	
-	@Test
-	public void testPPredLessEqualsNonZeroSparseMR() 
-	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
-	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
-	{
-		String TEST_NAME = TEST_NAME1;
-		int rows = rows1;
-		int cols = cols1;
-		double sparsity = sparse ? sparsity2 : sparsity1;
-		double constant = zero ? 0 : 0.5;
-		
-		String TEST_CACHE_DIR = "";
-		if (TEST_CACHE_ENABLED) {
-			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
-		}
-		
-		//rtplatform for MR
-		RUNTIME_PLATFORM platformOld = rtplatform;
-		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
-	
-		try
-		{
-			TestConfiguration config = getTestConfiguration(TEST_NAME);
-			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
-			/* This is for running the junit test the new way, i.e., construct the arguments directly */
-			String HOME = SCRIPT_DIR + TEST_DIR;
-			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-args", input("A"), 
-				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
-			
-			fullRScriptName = HOME + TEST_NAME + ".R";
-			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + 
-				type.ordinal() + " " + constant + " " + expectedDir();
-	
-			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
-			writeInputMatrixWithMTD("A", A, true);
-			
-			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
-			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
-			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
-			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
-		}
-		finally
-		{
-			rtplatform = platformOld;
-		}
-	}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/java/org/apache/sysml/test/integration/functions/ternary/CTableMatrixIgnoreZerosTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/ternary/CTableMatrixIgnoreZerosTest.java b/src/test/java/org/apache/sysml/test/integration/functions/ternary/CTableMatrixIgnoreZerosTest.java
index 75c5330..14464bb 100644
--- a/src/test/java/org/apache/sysml/test/integration/functions/ternary/CTableMatrixIgnoreZerosTest.java
+++ b/src/test/java/org/apache/sysml/test/integration/functions/ternary/CTableMatrixIgnoreZerosTest.java
@@ -35,7 +35,7 @@ import org.apache.sysml.test.utils.TestUtils;
 /**
  * This test investigates the specific Hop-Lop rewrite for the following pattern:
  * 
- * IA = ppred (A, 0, "!=") * seq (1, nrow (A), 1);
+ * IA = (A != 0) * seq (1, nrow (A), 1);
  * IA = matrix (IA, rows = (nrow (A) * ncol(A)), cols = 1, byrow = FALSE);
  * VA = matrix ( A, rows = (nrow (A) * ncol(A)), cols = 1, byrow = FALSE);
  * IA = removeEmpty (target = IA, margin = "rows");

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/apply-transform/apply-transform.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/apply-transform/apply-transform.dml b/src/test/scripts/applications/apply-transform/apply-transform.dml
index 5110cb0..2c9d0ca 100644
--- a/src/test/scripts/applications/apply-transform/apply-transform.dml
+++ b/src/test/scripts/applications/apply-transform/apply-transform.dml
@@ -119,8 +119,8 @@ parfor(i in 1:ncol(X), check=0){
 	
 		# note that max_val entries will get assigned num_bins+1
 		col = round((col - min_val)/bin_width - 0.5) + 1
-		less_than_lb = ppred(col, 1, "<")
-		more_than_ub = ppred(col, num_bins, ">")
+		less_than_lb = (col < 1)
+		more_than_ub = (col > num_bins)
 		
 		col = (1 - less_than_lb - more_than_ub)*col + more_than_ub*num_bins + less_than_lb
 	}

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/apply-transform/apply-transform.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/apply-transform/apply-transform.pydml b/src/test/scripts/applications/apply-transform/apply-transform.pydml
index d528098..84d8779 100644
--- a/src/test/scripts/applications/apply-transform/apply-transform.pydml
+++ b/src/test/scripts/applications/apply-transform/apply-transform.pydml
@@ -116,8 +116,8 @@ parfor(i in 1:ncol(X), check=0):
 
         # note that max_val entries will get assigned num_bins+1
         col = round((col - min_val)/bin_width - 0.5) + 1
-        less_than_lb = ppred(col, 1, "<")
-        more_than_ub = ppred(col, num_bins, ">")
+        less_than_lb = (col < 1)
+        more_than_ub = (col > num_bins)
 
         col = (1 - less_than_lb - more_than_ub)*col + more_than_ub*num_bins + less_than_lb
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/arima_box-jenkins/arima.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/arima_box-jenkins/arima.dml b/src/test/scripts/applications/arima_box-jenkins/arima.dml
index e21b75e..188aaa2 100644
--- a/src/test/scripts/applications/arima_box-jenkins/arima.dml
+++ b/src/test/scripts/applications/arima_box-jenkins/arima.dml
@@ -65,7 +65,7 @@ arima_css = function(Matrix[Double] w, Matrix[Double] X, Integer pIn, Integer P,
   	iter = 0
   	
 	if(useJacobi == 1){
-		check = sum(ppred(rowSums(abs(R)), 1, ">="))
+		check = sum(rowSums(abs(R)) >= 1)
 		if(check > 0){
 			print("R is not diagonal dominant. Suggest switching to an exact solver.")
 		}

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/arima_box-jenkins/arima.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/arima_box-jenkins/arima.pydml b/src/test/scripts/applications/arima_box-jenkins/arima.pydml
index d1299c9..2546ec4 100644
--- a/src/test/scripts/applications/arima_box-jenkins/arima.pydml
+++ b/src/test/scripts/applications/arima_box-jenkins/arima.pydml
@@ -62,7 +62,7 @@ def arima_css(w:matrix[float], X:matrix[float], pIn: int, P: int, qIn: int, Q:in
     iter = 0
     
     if(useJacobi == 1):
-        check = sum(ppred(rowSums(abs(R)), 1, ">="))
+        check = sum(rowSums(abs(R)) >= 1)
         if(check > 0):
             print("R is not diagonal dominant. Suggest switching to an exact solver.")
         diff = tol+1.0

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/cspline/CsplineCG.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineCG.dml b/src/test/scripts/applications/cspline/CsplineCG.dml
index 2281270..8193561 100644
--- a/src/test/scripts/applications/cspline/CsplineCG.dml
+++ b/src/test/scripts/applications/cspline/CsplineCG.dml
@@ -167,7 +167,7 @@ interpSpline = function(
 ) return (double q) {
 
   #first find the right knots for interpolation
-  i = as.integer(nrow(X) - sum(ppred(X, x, ">=")) + 1)
+  i = as.integer(nrow(X) - sum(X >= x) + 1)
 
   #calc the y as per the algo docs
   t = (x - X[i-1,1]) / ( X[i,1] - X[i-1,1])

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/cspline/CsplineCG.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineCG.pydml b/src/test/scripts/applications/cspline/CsplineCG.pydml
index 8e84f14..9451fb9 100644
--- a/src/test/scripts/applications/cspline/CsplineCG.pydml
+++ b/src/test/scripts/applications/cspline/CsplineCG.pydml
@@ -145,7 +145,7 @@ def calcKnotsDerivKs(X: matrix[float], Y: matrix[float], max_iteration: int, tol
 def interpSpline(x: float, X: matrix[float], Y: matrix[float], K: matrix[float]) -> (q: float):
     
     #first find the right knots for interpolation
-    i = nrow(X) - sum(ppred(X, x, ">=")) + 1
+    i = nrow(X) - sum(X >= x) + 1
     
     #calc the y as per the algo docs
     t = (x - X[i-2,0]) / ( X[i-1,0] - X[i-2,0])

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/cspline/CsplineDS.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineDS.dml b/src/test/scripts/applications/cspline/CsplineDS.dml
index ff34b5e..d81c215 100644
--- a/src/test/scripts/applications/cspline/CsplineDS.dml
+++ b/src/test/scripts/applications/cspline/CsplineDS.dml
@@ -142,7 +142,7 @@ interpSpline = function(
 ) return (double q) {
 
   #first find the right knots for interpolation
-  i = as.integer(nrow(X) - sum(ppred(X, x, ">=")) + 1)
+  i = as.integer(nrow(X) - sum(X >= x) + 1)
 
   #calc the y as per the algo docs
   t = (x - X[i-1,1]) / ( X[i,1] - X[i-1,1])

http://git-wip-us.apache.org/repos/asf/systemml/blob/d30e1888/src/test/scripts/applications/cspline/CsplineDS.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineDS.pydml b/src/test/scripts/applications/cspline/CsplineDS.pydml
index dc16661..0efc681 100644
--- a/src/test/scripts/applications/cspline/CsplineDS.pydml
+++ b/src/test/scripts/applications/cspline/CsplineDS.pydml
@@ -124,7 +124,7 @@ def calcKnotsDerivKs(X: matrix[float], Y: matrix[float]) -> (K: matrix[float]):
 def interpSpline(x: float, X: matrix[float], Y: matrix[float], K: matrix[float]) -> (q: float):
     
     #first find the right knots for interpolation
-    i = nrow(X) - sum(ppred(X, x, ">=")) + 1
+    i = nrow(X) - sum(X >= x) + 1
     
     #calc the y as per the algo docs
     t = (x - X[i-2,0]) / ( X[i-1,0] - X[i-2,0])


[4/4] systemml git commit: [SYSTEMML-1799] Update ppred test classes to logical

Posted by de...@apache.org.
[SYSTEMML-1799] Update ppred test classes to logical

Update content of test classes from ppred to logical. This is
done as a second commit to preserve the commit history for these
files.

Closes #591.


Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/1a3d85f9
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/1a3d85f9
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/1a3d85f9

Branch: refs/heads/master
Commit: 1a3d85f91378541fefc5a5c81c48be57549b267f
Parents: d30e188
Author: Deron Eriksson <de...@apache.org>
Authored: Mon Jul 24 15:08:28 2017 -0700
Committer: Deron Eriksson <de...@apache.org>
Committed: Mon Jul 24 15:08:28 2017 -0700

----------------------------------------------------------------------
 .../FullLogicalMatrixTest.java                  | 501 +++++++++----------
 .../FullLogicalScalarLeftTest.java              | 352 +++++++------
 .../FullLogicalScalarRightTest.java             | 352 +++++++------
 3 files changed, 593 insertions(+), 612 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/1a3d85f9/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
index 9563283..cb7ec9d 100644
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -35,23 +35,23 @@ import org.apache.sysml.test.utils.TestUtils;
 
 /**
  * The main purpose of this test is to verify various input combinations for
- * matrix-matrix ppred operations that internally translate to binary operations.
- * 
+ * matrix-matrix logical operations that internally translate to binary operations.
+ *
  */
-public class FullLogicalMatrixTest extends AutomatedTestBase 
+public class FullLogicalMatrixTest extends AutomatedTestBase
 {
-	
+
 	private final static String TEST_NAME1 = "LogicalMatrixTest";
 	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
 	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalMatrixTest.class.getSimpleName() + "/";
 	private final static double eps = 1e-10;
-	
+
 	private final static int rows1 = 1383;
 	private final static int cols1 = 1432;
-	
+
 	private final static double sparsity1 = 0.7;
 	private final static double sparsity2 = 0.01;
-	
+
 	public enum Type{
 		GREATER,
 		LESS,
@@ -60,11 +60,11 @@ public class FullLogicalMatrixTest extends AutomatedTestBase
 		GREATER_EQUALS,
 		LESS_EQUALS,
 	}
-		
+
 	@Override
-	public void setUp() 
+	public void setUp()
 	{
-		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "C" }) ); 
+		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "C" }) );
 		TestUtils.clearAssertionInformation();
 		if (TEST_CACHE_ENABLED) {
 			setOutAndExpectedDeletionDisabled(true);
@@ -84,499 +84,492 @@ public class FullLogicalMatrixTest extends AutomatedTestBase
 			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
 		}
 	}
-	
+
 	@Test
-	public void testPPredGreaterDenseDenseCP() 
+	public void testLogicalGreaterDenseDenseCP()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterDenseSparseCP() 
+	public void testLogicalGreaterDenseSparseCP()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseDenseCP() 
+	public void testLogicalGreaterSparseDenseCP()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseSparseCP() 
+	public void testLogicalGreaterSparseSparseCP()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseDenseCP() 
+	public void testLogicalGreaterEqualsDenseDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseSparseCP() 
+	public void testLogicalGreaterEqualsDenseSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseDenseCP() 
+	public void testLogicalGreaterEqualsSparseDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseSparseCP() 
+	public void testLogicalGreaterEqualsSparseSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseDenseCP() 
+	public void testLogicalEqualsDenseDenseCP()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseSparseCP() 
+	public void testLogicalEqualsDenseSparseCP()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseDenseCP() 
+	public void testLogicalEqualsSparseDenseCP()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseSparseCP() 
+	public void testLogicalEqualsSparseSparseCP()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseDenseCP() 
+	public void testLogicalNotEqualsDenseDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseSparseCP() 
+	public void testLogicalNotEqualsDenseSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseDenseCP() 
+	public void testLogicalNotEqualsSparseDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseSparseCP() 
+	public void testLogicalNotEqualsSparseSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseDenseCP() 
+	public void testLogicalLessDenseDenseCP()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseSparseCP() 
+	public void testLogicalLessDenseSparseCP()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseDenseCP() 
+	public void testLogicalLessSparseDenseCP()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseSparseCP() 
+	public void testLogicalLessSparseSparseCP()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseDenseCP() 
+	public void testLogicalLessEqualsDenseDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseSparseCP() 
+	public void testLogicalLessEqualsDenseSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseDenseCP() 
+	public void testLogicalLessEqualsSparseDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseSparseCP() 
+	public void testLogicalLessEqualsSparseSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.CP);
 	}
-	
-	
+
+
 	// ------------------------
 	@Test
-	public void testPPredGreaterDenseDenseSP() 
+	public void testLogicalGreaterDenseDenseSP()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.SPARK);
+		runLogicalTest(Type.GREATER, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterDenseSparseSP() 
+	public void testLogicalGreaterDenseSparseSP()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.SPARK);
+		runLogicalTest(Type.GREATER, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseDenseSP() 
+	public void testLogicalGreaterSparseDenseSP()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.SPARK);
+		runLogicalTest(Type.GREATER, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseSparseSP() 
+	public void testLogicalGreaterSparseSparseSP()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.SPARK);
+		runLogicalTest(Type.GREATER, true, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseDenseSP() 
+	public void testLogicalGreaterEqualsDenseDenseSP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.SPARK);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseSparseSP() 
+	public void testLogicalGreaterEqualsDenseSparseSP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.SPARK);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseDenseSP() 
+	public void testLogicalGreaterEqualsSparseDenseSP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.SPARK);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseSparseSP() 
+	public void testLogicalGreaterEqualsSparseSparseSP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.SPARK);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseDenseSP() 
+	public void testLogicalEqualsDenseDenseSP()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.SPARK);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseSparseSP() 
+	public void testLogicalEqualsDenseSparseSP()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.SPARK);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseDenseSP() 
+	public void testLogicalEqualsSparseDenseSP()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.SPARK);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseSparseSP() 
+	public void testLogicalEqualsSparseSparseSP()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.SPARK);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseDenseSP() 
+	public void testLogicalNotEqualsDenseDenseSP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.SPARK);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseSparseSP() 
+	public void testLogicalNotEqualsDenseSparseSP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.SPARK);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseDenseSP() 
+	public void testLogicalNotEqualsSparseDenseSP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.SPARK);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseSparseSP() 
+	public void testLogicalNotEqualsSparseSparseSP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.SPARK);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseDenseSP() 
+	public void testLogicalLessDenseDenseSP()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.SPARK);
+		runLogicalTest(Type.LESS, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseSparseSP() 
+	public void testLogicalLessDenseSparseSP()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.SPARK);
+		runLogicalTest(Type.LESS, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseDenseSP() 
+	public void testLogicalLessSparseDenseSP()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.SPARK);
+		runLogicalTest(Type.LESS, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseSparseSP() 
+	public void testLogicalLessSparseSparseSP()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.SPARK);
+		runLogicalTest(Type.LESS, true, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseDenseSP() 
+	public void testLogicalLessEqualsDenseDenseSP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.SPARK);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseSparseSP() 
+	public void testLogicalLessEqualsDenseSparseSP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.SPARK);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseDenseSP() 
+	public void testLogicalLessEqualsSparseDenseSP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.SPARK);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.SPARK);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseSparseSP() 
+	public void testLogicalLessEqualsSparseSparseSP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.SPARK);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.SPARK);
 	}
 	// ----------------------
-	
+
 	@Test
-	public void testPPredGreaterDenseDenseMR() 
+	public void testLogicalGreaterDenseDenseMR()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterDenseSparseMR() 
+	public void testLogicalGreaterDenseSparseMR()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseDenseMR() 
+	public void testLogicalGreaterSparseDenseMR()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterSparseSparseMR() 
+	public void testLogicalGreaterSparseSparseMR()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseDenseMR() 
+	public void testLogicalGreaterEqualsDenseDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsDenseSparseMR() 
+	public void testLogicalGreaterEqualsDenseSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseDenseMR() 
+	public void testLogicalGreaterEqualsSparseDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsSparseSparseMR() 
+	public void testLogicalGreaterEqualsSparseSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseDenseMR() 
+	public void testLogicalEqualsDenseDenseMR()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsDenseSparseMR() 
+	public void testLogicalEqualsDenseSparseMR()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseDenseMR() 
+	public void testLogicalEqualsSparseDenseMR()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsSparseSparseMR() 
+	public void testLogicalEqualsSparseSparseMR()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseDenseMR() 
+	public void testLogicalNotEqualsDenseDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsDenseSparseMR() 
+	public void testLogicalNotEqualsDenseSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseDenseMR() 
+	public void testLogicalNotEqualsSparseDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsSparseSparseMR() 
+	public void testLogicalNotEqualsSparseSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseDenseMR() 
+	public void testLogicalLessDenseDenseMR()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessDenseSparseMR() 
+	public void testLogicalLessDenseSparseMR()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseDenseMR() 
+	public void testLogicalLessSparseDenseMR()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessSparseSparseMR() 
+	public void testLogicalLessSparseSparseMR()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseDenseMR() 
+	public void testLogicalLessEqualsDenseDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsDenseSparseMR() 
+	public void testLogicalLessEqualsDenseSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseDenseMR() 
+	public void testLogicalLessEqualsSparseDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsSparseSparseMR() 
+	public void testLogicalLessEqualsSparseSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.MR);
 	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean sp1, boolean sp2, ExecType et )
+
+	private void runLogicalTest( Type type, boolean sp1, boolean sp2, ExecType et )
 	{
 		String TEST_NAME = TEST_NAME1;
 		int rows = rows1;
 		int cols = cols1;
-		    
+
 	    RUNTIME_PLATFORM platformOld = rtplatform;
 		switch( et ){
 			case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;
 			case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;
 			default: rtplatform = RUNTIME_PLATFORM.HYBRID; break;
 		}
-		
+
 		boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
 	    if( rtplatform == RUNTIME_PLATFORM.SPARK )
 			DMLScript.USE_LOCAL_SPARK_CONFIG = true;
-	
+
 		double sparsityLeft = sp1 ? sparsity2 : sparsity1;
 		double sparsityRight = sp2 ? sparsity2 : sparsity1;
-		
+
 		String TEST_CACHE_DIR = "";
 		if (TEST_CACHE_ENABLED) {
 			TEST_CACHE_DIR = type.ordinal() + "_" + rows + "_" + cols + "_" + sparsityLeft + "_" + sparsityRight + "/";
 		}
-		
+
 		try
 		{
 			TestConfiguration config = getTestConfiguration(TEST_NAME);
 			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
+
 			/* This is for running the junit test the new way, i.e., construct the arguments directly */
 			String HOME = SCRIPT_DIR + TEST_DIR;
 			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-args", input("A"), input("B"), 
+			programArgs = new String[]{"-args", input("A"), input("B"),
 				Integer.toString(type.ordinal()), output("C") };
-			
+
 			fullRScriptName = HOME + TEST_NAME + ".R";
 			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + type.ordinal() + " " + expectedDir();
-	
+
 			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsityLeft, 7); 
+			double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsityLeft, 7);
 			writeInputMatrixWithMTD("A", A, true);
-			double[][] B = getRandomMatrix(rows, cols, -15, 15, sparsityRight, 3); 
+			double[][] B = getRandomMatrix(rows, cols, -15, 15, sparsityRight, 3);
 			writeInputMatrixWithMTD("B", B, true);
-			
+
 			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
+			runTest(true, false, null, -1);
+			runRScript(true);
+
+			//compare matrices
 			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("C");
 			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("C");
 			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");

http://git-wip-us.apache.org/repos/asf/systemml/blob/1a3d85f9/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
index c23943b..b5bc217 100644
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarLeftTest.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -33,28 +33,29 @@ import org.apache.sysml.test.integration.TestConfiguration;
 import org.apache.sysml.test.utils.TestUtils;
 
 /**
- * The main purpose of this test is to verify the internal optimization regarding
- * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
- * in general, but for certain instance involving 0 scalar it is).
- * 
- * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
- * ppred operations in all execution types.
- * 
+ * The main purpose of this test is to verify the internal optimization
+ * regarding sparse-safeness of logical operations for various input
+ * combinations. (logical operations not sparse-safe in general, but for certain
+ * instance involving 0 scalar they are).
+ *
+ * Furthermore, it is used to test all combinations of matrix-scalar,
+ * scalar-matrix logical operations in all execution types.
+ *
  */
-public class FullLogicalScalarLeftTest extends AutomatedTestBase 
+public class FullLogicalScalarLeftTest extends AutomatedTestBase
 {
-	
+
 	private final static String TEST_NAME1 = "LogicalScalarLeftTest";
 	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
 	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalScalarLeftTest.class.getSimpleName() + "/";
 	private final static double eps = 1e-10;
-	
+
 	private final static int rows1 = 1072;
 	private final static int cols1 = 1009;
-	
+
 	private final static double sparsity1 = 0.7;
 	private final static double sparsity2 = 0.1;
-	
+
 	public enum Type{
 		GREATER,
 		LESS,
@@ -63,7 +64,7 @@ public class FullLogicalScalarLeftTest extends AutomatedTestBase
 		GREATER_EQUALS,
 		LESS_EQUALS,
 	}
-	
+
 	@BeforeClass
 	public static void init()
 	{
@@ -79,7 +80,7 @@ public class FullLogicalScalarLeftTest extends AutomatedTestBase
 	}
 
 	@Override
-	public void setUp() 
+	public void setUp()
 	{
 		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
 		if (TEST_CACHE_ENABLED) {
@@ -87,344 +88,337 @@ public class FullLogicalScalarLeftTest extends AutomatedTestBase
 		}
 	}
 
-	
+
 	@Test
-	public void testPPredGreaterZeroDenseCP() 
+	public void testLogicalGreaterZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroDenseCP() 
+	public void testLogicalLessZeroDenseCP()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroDenseCP() 
+	public void testLogicalEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroDenseCP() 
+	public void testLogicalNotEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroDenseCP() 
+	public void testLogicalGreaterEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroDenseCP() 
+	public void testLogicalLessEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroDenseCP() 
+	public void testLogicalGreaterNonZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroDenseCP() 
+	public void testLogicalLessNonZeroDenseCP()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroDenseCP() 
+	public void testLogicalEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroDenseCP() 
+	public void testLogicalNotEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroDenseCP() 
+	public void testLogicalGreaterEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroDenseCP() 
+	public void testLogicalLessEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterZeroSparseCP() 
+	public void testLogicalGreaterZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroSparseCP() 
+	public void testLogicalLessZeroSparseCP()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroSparseCP() 
+	public void testLogicalEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroSparseCP() 
+	public void testLogicalNotEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroSparseCP() 
+	public void testLogicalGreaterEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroSparseCP() 
+	public void testLogicalLessEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroSparseCP() 
+	public void testLogicalGreaterNonZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroSparseCP() 
+	public void testLogicalLessNonZeroSparseCP()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroSparseCP() 
+	public void testLogicalEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroSparseCP() 
+	public void testLogicalNotEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroSparseCP() 
+	public void testLogicalGreaterEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroSparseCP() 
+	public void testLogicalLessEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterZeroDenseMR() 
+	public void testLogicalGreaterZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroDenseMR() 
+	public void testLogicalLessZeroDenseMR()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroDenseMR() 
+	public void testLogicalEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroDenseMR() 
+	public void testLogicalNotEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroDenseMR() 
+	public void testLogicalGreaterEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroDenseMR() 
+	public void testLogicalLessEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.MR);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroDenseMR() 
+	public void testLogicalGreaterNonZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroDenseMR() 
+	public void testLogicalLessNonZeroDenseMR()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroDenseMR() 
+	public void testLogicalEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroDenseMR() 
+	public void testLogicalNotEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroDenseMR() 
+	public void testLogicalGreaterEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroDenseMR() 
+	public void testLogicalLessEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterZeroSparseMR() 
+	public void testLogicalGreaterZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroSparseMR() 
+	public void testLogicalLessZeroSparseMR()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroSparseMR() 
+	public void testLogicalEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroSparseMR() 
+	public void testLogicalNotEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroSparseMR() 
+	public void testLogicalGreaterEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroSparseMR() 
+	public void testLogicalLessEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.MR);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroSparseMR() 
+	public void testLogicalGreaterNonZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroSparseMR() 
+	public void testLogicalLessNonZeroSparseMR()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroSparseMR() 
+	public void testLogicalEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroSparseMR() 
+	public void testLogicalNotEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroSparseMR() 
+	public void testLogicalGreaterEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroSparseMR() 
+	public void testLogicalLessEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.MR);
 	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
+
+	private void runLogicalTest( Type type, boolean zero, boolean sparse, ExecType et )
 	{
 		String TEST_NAME = TEST_NAME1;
 		int rows = rows1;
 		int cols = cols1;
 		double sparsity = sparse ? sparsity2 : sparsity1;
 		double constant = zero ? 0 : 0.5;
-		
+
 		String TEST_CACHE_DIR = "";
 		if (TEST_CACHE_ENABLED) {
 			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
 		}
-		
+
 		//rtplatform for MR
 		RUNTIME_PLATFORM platformOld = rtplatform;
 		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
-	
+
 		try
 		{
 			TestConfiguration config = getTestConfiguration(TEST_NAME);
-			
+
 			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
+
 			/* This is for running the junit test the new way, i.e., construct the arguments directly */
 			String HOME = SCRIPT_DIR + TEST_DIR;
 			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-explain","-args", input("A"), 
+			programArgs = new String[]{"-explain","-args", input("A"),
 				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
-			
+
 			fullRScriptName = HOME + TEST_NAME + ".R";
-			rCmd = "Rscript" + " " + fullRScriptName + " " +  inputDir() + " " + 
+			rCmd = "Rscript" + " " + fullRScriptName + " " +  inputDir() + " " +
 				type.ordinal() + " " + constant + " " + expectedDir();
-	
+
 			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
+			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7);
 			writeInputMatrixWithMTD("A", A, true);
-	
+
 			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
+			runTest(true, false, null, -1);
+			runRScript(true);
+
+			//compare matrices
 			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
 			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
 			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");

http://git-wip-us.apache.org/repos/asf/systemml/blob/1a3d85f9/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
index 30fcdac..dcaf398 100644
--- a/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
+++ b/src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalScalarRightTest.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -33,28 +33,29 @@ import org.apache.sysml.test.integration.TestConfiguration;
 import org.apache.sysml.test.utils.TestUtils;
 
 /**
- * The main purpose of this test is to verify the internal optimization regarding
- * sparse-safeness of ppred for various input combinations. (ppred is not sparse-safe 
- * in general, but for certain instance involving 0 scalar it is).
- * 
- * Furthermore, it is used to test all combinations of matrix-scalar, scalar-matrix
- * ppred operations in all execution types.
- * 
+ * The main purpose of this test is to verify the internal optimization
+ * regarding sparse-safeness of logical operations for various input
+ * combinations. (logical operations not sparse-safe in general, but for certain
+ * instance involving 0 scalar they are).
+ *
+ * Furthermore, it is used to test all combinations of matrix-scalar,
+ * scalar-matrix logical operations in all execution types.
+ *
  */
-public class FullLogicalScalarRightTest extends AutomatedTestBase 
+public class FullLogicalScalarRightTest extends AutomatedTestBase
 {
-	
+
 	private final static String TEST_NAME1 = "LogicalScalarRightTest";
 	private final static String TEST_DIR = "functions/binary/matrix_full_other/";
 	private final static String TEST_CLASS_DIR = TEST_DIR + FullLogicalScalarRightTest.class.getSimpleName() + "/";
 	private final static double eps = 1e-10;
-	
+
 	private final static int rows1 = 1072;
 	private final static int cols1 = 1009;
-	
+
 	private final static double sparsity1 = 0.7;
 	private final static double sparsity2 = 0.1;
-	
+
 	public enum Type{
 		GREATER,
 		LESS,
@@ -63,10 +64,10 @@ public class FullLogicalScalarRightTest extends AutomatedTestBase
 		GREATER_EQUALS,
 		LESS_EQUALS,
 	}
-	
-	
+
+
 	@Override
-	public void setUp() 
+	public void setUp()
 	{
 		addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { "B" })   );
 		if (TEST_CACHE_ENABLED) {
@@ -87,343 +88,336 @@ public class FullLogicalScalarRightTest extends AutomatedTestBase
 			TestUtils.clearDirectory(TEST_DATA_DIR + TEST_CLASS_DIR);
 		}
 	}
-	
+
 	@Test
-	public void testPPredGreaterZeroDenseCP() 
+	public void testLogicalGreaterZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroDenseCP() 
+	public void testLogicalLessZeroDenseCP()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroDenseCP() 
+	public void testLogicalEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroDenseCP() 
+	public void testLogicalNotEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroDenseCP() 
+	public void testLogicalGreaterEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroDenseCP() 
+	public void testLogicalLessEqualsZeroDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroDenseCP() 
+	public void testLogicalGreaterNonZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroDenseCP() 
+	public void testLogicalLessNonZeroDenseCP()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroDenseCP() 
+	public void testLogicalEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroDenseCP() 
+	public void testLogicalNotEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroDenseCP() 
+	public void testLogicalGreaterEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroDenseCP() 
+	public void testLogicalLessEqualsNonZeroDenseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterZeroSparseCP() 
+	public void testLogicalGreaterZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroSparseCP() 
+	public void testLogicalLessZeroSparseCP()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroSparseCP() 
+	public void testLogicalEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroSparseCP() 
+	public void testLogicalNotEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroSparseCP() 
+	public void testLogicalGreaterEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroSparseCP() 
+	public void testLogicalLessEqualsZeroSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroSparseCP() 
+	public void testLogicalGreaterNonZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroSparseCP() 
+	public void testLogicalLessNonZeroSparseCP()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroSparseCP() 
+	public void testLogicalEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroSparseCP() 
+	public void testLogicalNotEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroSparseCP() 
+	public void testLogicalGreaterEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.CP);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroSparseCP() 
+	public void testLogicalLessEqualsNonZeroSparseCP()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.CP);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.CP);
 	}
 
 	@Test
-	public void testPPredGreaterZeroDenseMR() 
+	public void testLogicalGreaterZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroDenseMR() 
+	public void testLogicalLessZeroDenseMR()
 	{
-		runPPredTest(Type.LESS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroDenseMR() 
+	public void testLogicalEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroDenseMR() 
+	public void testLogicalNotEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroDenseMR() 
+	public void testLogicalGreaterEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroDenseMR() 
+	public void testLogicalLessEqualsZeroDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, false, ExecType.MR);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroDenseMR() 
+	public void testLogicalGreaterNonZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroDenseMR() 
+	public void testLogicalLessNonZeroDenseMR()
 	{
-		runPPredTest(Type.LESS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroDenseMR() 
+	public void testLogicalEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroDenseMR() 
+	public void testLogicalNotEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroDenseMR() 
+	public void testLogicalGreaterEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroDenseMR() 
+	public void testLogicalLessEqualsNonZeroDenseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, false, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, false, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterZeroSparseMR() 
+	public void testLogicalGreaterZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessZeroSparseMR() 
+	public void testLogicalLessZeroSparseMR()
 	{
-		runPPredTest(Type.LESS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsZeroSparseMR() 
+	public void testLogicalEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsZeroSparseMR() 
+	public void testLogicalNotEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsZeroSparseMR() 
+	public void testLogicalGreaterEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, true, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsZeroSparseMR() 
+	public void testLogicalLessEqualsZeroSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, true, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, true, true, ExecType.MR);
 	}
 
 	@Test
-	public void testPPredGreaterNonZeroSparseMR() 
+	public void testLogicalGreaterNonZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessNonZeroSparseMR() 
+	public void testLogicalLessNonZeroSparseMR()
 	{
-		runPPredTest(Type.LESS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredEqualsNonZeroSparseMR() 
+	public void testLogicalEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredNotEqualsNonZeroSparseMR() 
+	public void testLogicalNotEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.NOT_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.NOT_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredGreaterEqualsNonZeroSparseMR() 
+	public void testLogicalGreaterEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.GREATER_EQUALS, false, true, ExecType.MR);
 	}
-	
+
 	@Test
-	public void testPPredLessEqualsNonZeroSparseMR() 
+	public void testLogicalLessEqualsNonZeroSparseMR()
 	{
-		runPPredTest(Type.LESS_EQUALS, false, true, ExecType.MR);
+		runLogicalTest(Type.LESS_EQUALS, false, true, ExecType.MR);
 	}
-	
-	
-	/**
-	 * 
-	 * @param type
-	 * @param instType
-	 * @param sparse
-	 */
-	private void runPPredTest( Type type, boolean zero, boolean sparse, ExecType et )
+
+	private void runLogicalTest( Type type, boolean zero, boolean sparse, ExecType et )
 	{
 		String TEST_NAME = TEST_NAME1;
 		int rows = rows1;
 		int cols = cols1;
 		double sparsity = sparse ? sparsity2 : sparsity1;
 		double constant = zero ? 0 : 0.5;
-		
+
 		String TEST_CACHE_DIR = "";
 		if (TEST_CACHE_ENABLED) {
 			TEST_CACHE_DIR = type.ordinal() + "_" + constant + "_" + sparsity + "/";
 		}
-		
+
 		//rtplatform for MR
 		RUNTIME_PLATFORM platformOld = rtplatform;
 		rtplatform = (et==ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
-	
+
 		try
 		{
 			TestConfiguration config = getTestConfiguration(TEST_NAME);
 			loadTestConfiguration(config, TEST_CACHE_DIR);
-			
+
 			/* This is for running the junit test the new way, i.e., construct the arguments directly */
 			String HOME = SCRIPT_DIR + TEST_DIR;
 			fullDMLScriptName = HOME + TEST_NAME + ".dml";
-			programArgs = new String[]{"-args", input("A"), 
+			programArgs = new String[]{"-args", input("A"),
 				Integer.toString(type.ordinal()), Double.toString(constant), output("B") };
-			
+
 			fullRScriptName = HOME + TEST_NAME + ".R";
-			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + 
+			rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " +
 				type.ordinal() + " " + constant + " " + expectedDir();
-	
+
 			//generate actual dataset
-			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7); 
+			double[][] A = getRandomMatrix(rows, cols, -1, 1, sparsity, 7);
 			writeInputMatrixWithMTD("A", A, true);
-			
+
 			//run tests
-			runTest(true, false, null, -1); 
-			runRScript(true); 
-			
-			//compare matrices 
+			runTest(true, false, null, -1);
+			runRScript(true);
+
+			//compare matrices
 			HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
 			HashMap<CellIndex, Double> rfile  = readRMatrixFromFS("B");
 			TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");