You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@systemml.apache.org by de...@apache.org on 2016/05/05 01:13:54 UTC

[1/2] incubator-systemml git commit: [SYSTEMML-647] Replace castAsScalar calls

Repository: incubator-systemml
Updated Branches:
  refs/heads/master 7013910e0 -> 2da814574


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/wfundInputGenerator2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/wfundInputGenerator2.dml b/src/test/scripts/applications/impute/wfundInputGenerator2.dml
index e6d302d..18d0ee2 100644
--- a/src/test/scripts/applications/impute/wfundInputGenerator2.dml
+++ b/src/test/scripts/applications/impute/wfundInputGenerator2.dml
@@ -124,7 +124,7 @@ disabled_known_values = disabled_known_values_extended;
 
 is_free = matrix (1.0, rows = num_attrs, cols = 1);
 for (i in 1:num_attrs) {
-    j = castAsScalar (subtotals_tree [i, 1]);
+    j = as.scalar (subtotals_tree [i, 1]);
     if (j > 0.0) {
         is_free [j, 1] = 0.0 + zero;
     } else {
@@ -137,12 +137,12 @@ num_frees = num_state_terms * num_frees_per_term;
 CReps_block = matrix (0.0, rows = num_attrs, cols = num_frees_per_term);
 index_free = 0;
 for (i in 1:num_attrs) {
-    if (castAsScalar (is_free [i, 1]) == 1.0) {
+    if (as.scalar (is_free [i, 1]) == 1.0) {
         index_free = index_free + 1;
         j = i;
         while (j > 0.0) {
             CReps_block [j, index_free] = 1.0 + zero;
-            j = castAsScalar (subtotals_tree [j, 1]);
+            j = as.scalar (subtotals_tree [j, 1]);
 }   }   }
 
 CReps = matrix (0.0, rows = (num_terms * num_attrs), cols = num_frees);
@@ -221,7 +221,7 @@ RegresFactorDefault = matrix (0.0, rows = (num_reg_eqs * num_factors), cols = 1)
 for (t in 2 : num_state_terms) {
     for (i in 1 : num_attrs) {
         reg_index = ((t-1) * num_attrs - 1 + i) * num_factors;
-        agg = castAsScalar (subtotals_tree [i, 1]);
+        agg = as.scalar (subtotals_tree [i, 1]);
         if (i <= 18 & agg > 0)
         {
             RegresValueMap [reg_index + 1, (t-1) * num_attrs +  i ]   = -1.0 + zero;  # 1st factor: -x[t]
@@ -256,8 +256,8 @@ for (t in 2 : num_state_terms) {
 for (t1 in (num_state_terms + 1) : num_terms) {
     t2 = t1 - num_state_terms;
     for (i in 1 : num_attrs) {
-        if ((i <= num_observed_attrs & t2 <= num_known_terms & castAsScalar (disabled_known_values [i, t2]) == 0.0) |
-            (i > num_observed_attrs & castAsScalar (subtotals_tree [i, 1]) > 0.0))
+        if ((i <= num_observed_attrs & t2 <= num_known_terms & as.scalar (disabled_known_values [i, t2]) == 0.0) |
+            (i > num_observed_attrs & as.scalar (subtotals_tree [i, 1]) > 0.0))
         {
             reg_index = ((t1 - 1) * num_attrs - 1 + i) * num_factors;
             RegresValueMap [reg_index + 1, (t1 - 1) * num_attrs + i] = -1.0 + zero; # 1st factor: -y[t]
@@ -291,7 +291,7 @@ RegresCoeffDefault = matrix (0.0, rows = (num_reg_eqs * num_factors), cols = 1);
 
 for (t in 2 : num_state_terms) {
     for (i in 1 : num_observed_attrs) {
-        if (castAsScalar (subtotals_tree [i, 1]) > 0.0) {
+        if (as.scalar (subtotals_tree [i, 1]) > 0.0) {
             param_1 = 3 * i - 1;
             param_2 = 3 * i;
             param_3 = 3 * i + 1;
@@ -308,7 +308,7 @@ for (t in 2 : num_state_terms) {
     RegresCoeffDefault [reg_index + 4, 1] = 1.0 + zero;
     
     for (i in (num_observed_attrs + 1) : num_attrs) {    
-        if (castAsScalar (subtotals_tree [i, 1]) > 0.0) {
+        if (as.scalar (subtotals_tree [i, 1]) > 0.0) {
             reg_index = ((t-1) * num_attrs - 1 + i) * num_factors;
             RegresCoeffDefault [reg_index + 1, 1] = 1.0 + zero;
             RegresCoeffDefault [reg_index + 2, 1] = 1.0 + zero;
@@ -331,8 +331,8 @@ for (t in 2 : num_state_terms) {
 for (t1 in (num_state_terms + 1) : num_terms) {
     t2 = t1 - num_state_terms;
     for (i in 1 : num_attrs) {
-        if ((i <= num_observed_attrs & t2 <= num_known_terms & castAsScalar (disabled_known_values [i, t2]) == 0.0) |
-            (i > num_observed_attrs & castAsScalar (subtotals_tree [i, 1]) > 0.0))
+        if ((i <= num_observed_attrs & t2 <= num_known_terms & as.scalar (disabled_known_values [i, t2]) == 0.0) |
+            (i > num_observed_attrs & as.scalar (subtotals_tree [i, 1]) > 0.0))
         {
             reg_index = ((t1 - 1) * num_attrs - 1 + i) * num_factors;
             RegresCoeffDefault [reg_index + 1, 1] = 1.0 + zero;
@@ -352,7 +352,7 @@ RegresParamMap     [reg_index + 1, param] = 1.0 + zero;
 RegresCoeffDefault [reg_index + 2,   1  ] = 0.0 + zero;
 
 for (i in 1 : num_observed_attrs) {
-    agg = castAsScalar (subtotals_tree [i, 1]);
+    agg = as.scalar (subtotals_tree [i, 1]);
     if (agg >= 0.0)
     {
         param = 3 * i - 1;
@@ -397,7 +397,7 @@ for (i in 1 : num_attrs)
     scale_factor = 1.0;
     if (i <= num_observed_attrs) {
         ### CORRECTION FOR OBSERVED ATTRIBUTES:
-        attribute_size_i = castAsScalar (attribute_size [i, 1]);
+        attribute_size_i = as.scalar (attribute_size [i, 1]);
         scale_factor = sqrt (attribute_size_i / max_attr_size) * 0.999 + 0.001;
     }
     for (t in 1 : num_terms) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/linearLogReg/LinearLogReg.dml b/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
index cf2f7ad..4c84158 100644
--- a/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
+++ b/src/test/scripts/applications/linearLogReg/LinearLogReg.dml
@@ -90,7 +90,7 @@ while(!converge) {
 	norm_grad = sqrt(sum(grad*grad))
 	
 	print("-- Outer Iteration = " + iter)
-	objScalar = castAsScalar(obj)
+	objScalar = as.scalar(obj)
 	print("     Iterations = " + iter + ", Objective = " + objScalar + ", Gradient Norm = " + norm_grad)
 	
 	# SOLVE TRUST REGION SUB-PROBLEM
@@ -108,23 +108,23 @@ while(!converge) {
 		alpha_deno = t(d) %*% Hd 
 		alpha = norm_r2 / alpha_deno
 	
-		s = s + castAsScalar(alpha) * d
-		os = os + castAsScalar(alpha) * od
+		s = s + as.scalar(alpha) * d
+		os = os + as.scalar(alpha) * od
 
 		sts = t(s) %*% s
 		delta2 = delta*delta 
-		stsScalar = castAsScalar(sts)
+		stsScalar = as.scalar(sts)
 		
 		shouldBreak = FALSE;  # to mimic "break" in the following 'if' condition
 		if (stsScalar > delta2) {
 		   	print("      --- cg reaches trust region boundary")
-			s = s - castAsScalar(alpha) * d
-			os = os - castAsScalar(alpha) * od
+			s = s - as.scalar(alpha) * d
+			os = os - as.scalar(alpha) * od
 			std = t(s) %*% d
 			dtd = t(d) %*% d
 			sts = t(s) %*% s
 			rad = sqrt(std*std + dtd*(delta2 - sts))
-			stdScalar = castAsScalar(std)
+			stdScalar = as.scalar(std)
 			if(stdScalar >= 0) {
 				tau = (delta2 - sts)/(std + rad)
 			} 
@@ -132,9 +132,9 @@ while(!converge) {
 				tau = (rad - std)/dtd
 			}
 						
-			s = s + castAsScalar(tau) * d
-			os = os + castAsScalar(tau) * od
-			r = r - castAsScalar(tau) * Hd
+			s = s + as.scalar(tau) * d
+			os = os + as.scalar(tau) * od
+			r = r - as.scalar(tau) * Hd
 			
 			#break
 			shouldBreak = TRUE;
@@ -143,7 +143,7 @@ while(!converge) {
 		} 
 		
 		if (!shouldBreak) {
-			r = r - castAsScalar(alpha) * Hd
+			r = r - as.scalar(alpha) * Hd
 			old_norm_r2 = norm_r2 
 			norm_r2 = sum(r*r)
 			beta = norm_r2/old_norm_r2
@@ -164,10 +164,10 @@ while(!converge) {
 	objnew = 0.5 * t(wnew) %*% wnew + C * sum(-log(logisticnew))
 	
 	actred = (obj - objnew)
-	actredScalar = castAsScalar(actred)
+	actredScalar = as.scalar(actred)
 	rho = actred / qk
-	qkScalar = castAsScalar(qk)
-	rhoScalar = castAsScalar(rho);
+	qkScalar = as.scalar(qk)
+	rhoScalar = as.scalar(rho);
 	snorm = sqrt(sum( s * s ))
 	print("     Actual    = " + actredScalar)
 	print("     Predicted = " + qkScalar)
@@ -176,12 +176,12 @@ while(!converge) {
 	   delta = min(delta, snorm)
 	}
 	alpha2 = objnew - obj - gs
-	alpha2Scalar = castAsScalar(alpha2)
+	alpha2Scalar = as.scalar(alpha2)
 	if (alpha2Scalar <= 0) {
 	   alpha = sigma3*e
 	} 
 	else {
-	   ascalar = max(sigma1, -0.5*castAsScalar(gs)/alpha2Scalar)  
+	   ascalar = max(sigma1, -0.5*as.scalar(gs)/alpha2Scalar)  
 	   alpha = ascalar*e
 	}
 
@@ -195,7 +195,7 @@ while(!converge) {
 		obj = objnew	
 	} 
 
-	alphaScalar = castAsScalar(alpha)
+	alphaScalar = as.scalar(alpha)
 	if (rhoScalar < eta0){
 		delta = min(max( alphaScalar , sigma1) * snorm, sigma2 * delta )
 	}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml b/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
index 1a9e769..188cd42 100644
--- a/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
+++ b/src/test/scripts/applications/linearLogReg/LinearLogReg.pydml
@@ -91,7 +91,7 @@ while(!converge):
     norm_grad = sqrt(sum(grad*grad))
     
     print("-- Outer Iteration = " + iter)
-    objScalar = castAsScalar(obj)
+    objScalar = scalar(obj)
     print("     Iterations = " + iter + ", Objective = " + objScalar + ", Gradient Norm = " + norm_grad)
     
     # SOLVE TRUST REGION SUB-PROBLEM
@@ -109,37 +109,37 @@ while(!converge):
         alpha_deno = dot(transpose(d), Hd)
         alpha = norm_r2 / alpha_deno
         
-        s = s + castAsScalar(alpha) * d
-        os = os + castAsScalar(alpha) * od
+        s = s + scalar(alpha) * d
+        os = os + scalar(alpha) * od
         
         sts = dot(transpose(s), s)
         delta2 = delta*delta 
-        stsScalar = castAsScalar(sts)
+        stsScalar = scalar(sts)
         
         shouldBreak = False  # to mimic "break" in the following 'if' condition
         if (stsScalar > delta2):
             print("      --- cg reaches trust region boundary")
-            s = s - castAsScalar(alpha) * d
-            os = os - castAsScalar(alpha) * od
+            s = s - scalar(alpha) * d
+            os = os - scalar(alpha) * od
             std = dot(transpose(s), d)
             dtd = dot(transpose(d), d)
             sts = dot(transpose(s), s)
             rad = sqrt(std*std + dtd*(delta2 - sts))
-            stdScalar = castAsScalar(std)
+            stdScalar = scalar(std)
             if(stdScalar >= 0):
                 tau = (delta2 - sts)/(std + rad)
             else:
                 tau = (rad - std)/dtd
             
-            s = s + castAsScalar(tau) * d
-            os = os + castAsScalar(tau) * od
-            r = r - castAsScalar(tau) * Hd
+            s = s + scalar(tau) * d
+            os = os + scalar(tau) * od
+            r = r - scalar(tau) * Hd
             
             #break
             shouldBreak = True
             innerconverge = True
         if (!shouldBreak):
-            r = r - castAsScalar(alpha) * Hd
+            r = r - scalar(alpha) * Hd
             old_norm_r2 = norm_r2
             norm_r2 = sum(r*r)
             beta = norm_r2/old_norm_r2
@@ -159,10 +159,10 @@ while(!converge):
     objnew = dot((0.5 * transpose(wnew)), wnew) + C * sum(-log(logisticnew))
     
     actred = (obj - objnew)
-    actredScalar = castAsScalar(actred)
+    actredScalar = scalar(actred)
     rho = actred / qk
-    qkScalar = castAsScalar(qk)
-    rhoScalar = castAsScalar(rho)
+    qkScalar = scalar(qk)
+    rhoScalar = scalar(rho)
     snorm = sqrt(sum( s * s ))
     print("     Actual    = " + actredScalar)
     print("     Predicted = " + qkScalar)
@@ -170,11 +170,11 @@ while(!converge):
     if (iter==0):
         delta = min(delta, snorm)
     alpha2 = objnew - obj - gs
-    alpha2Scalar = castAsScalar(alpha2)
+    alpha2Scalar = scalar(alpha2)
     if (alpha2Scalar <= 0):
         alpha = sigma3*e
     else:
-        ascalar = max(sigma1, -0.5*castAsScalar(gs)/alpha2Scalar)
+        ascalar = max(sigma1, -0.5*scalar(gs)/alpha2Scalar)
         alpha = ascalar*e
     
     if (rhoScalar > eta0):
@@ -185,7 +185,7 @@ while(!converge):
         logisticD = logisticnew * (1 - logisticnew)
         obj = objnew
     
-    alphaScalar = castAsScalar(alpha)
+    alphaScalar = scalar(alpha)
     if (rhoScalar < eta0):
         delta = min(max( alphaScalar , sigma1) * snorm, sigma2 * delta )
     else:

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/linear_regression/LinearRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/linear_regression/LinearRegression.dml b/src/test/scripts/applications/linear_regression/LinearRegression.dml
index e02c53d..5720b8b 100644
--- a/src/test/scripts/applications/linear_regression/LinearRegression.dml
+++ b/src/test/scripts/applications/linear_regression/LinearRegression.dml
@@ -40,7 +40,7 @@ max_iteration = 3;
 i = 0;
 while(i < max_iteration) {
 	q = ((t(V) %*% (V %*% p)) + eps  * p);
-	alpha = norm_r2 / castAsScalar(t(p) %*% q);
+	alpha = norm_r2 / as.scalar(t(p) %*% q);
 	w = w + alpha * p;
 	old_norm_r2 = norm_r2;
 	r = r + alpha * q;

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/m-svm/m-svm.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/m-svm/m-svm.dml b/src/test/scripts/applications/m-svm/m-svm.dml
index bbf5acc..7b2828f 100644
--- a/src/test/scripts/applications/m-svm/m-svm.dml
+++ b/src/test/scripts/applications/m-svm/m-svm.dml
@@ -136,7 +136,7 @@ if(check_X == 0){
 	debug_str = "# Class, Iter, Obj"
 	for(iter_class in 1:ncol(debug_mat)){
 		for(iter in 1:nrow(debug_mat)){
-			obj = castAsScalar(debug_mat[iter, iter_class])
+			obj = as.scalar(debug_mat[iter, iter_class])
 			if(obj != -1) 
 				debug_str = append(debug_str, iter_class + "," + iter + "," + obj)
 		}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/m-svm/m-svm.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/m-svm/m-svm.pydml b/src/test/scripts/applications/m-svm/m-svm.pydml
index 348f599..fc3669c 100644
--- a/src/test/scripts/applications/m-svm/m-svm.pydml
+++ b/src/test/scripts/applications/m-svm/m-svm.pydml
@@ -129,7 +129,7 @@ else:
     debug_str = "# Class, Iter, Obj"
     for(iter_class in 1:ncol(debug_mat)):
         for(iter in 1:nrow(debug_mat)):
-            obj = castAsScalar(debug_mat[iter, iter_class])
+            obj = scalar(debug_mat[iter, iter_class])
             if(obj != -1):
                 debug_str = append(debug_str, iter_class + "," + iter + "," + obj)
     save(debug_str, $Log)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/mdabivar/MDABivariateStats.dml b/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
index 56163ad..5bb980c 100644
--- a/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
+++ b/src/test/scripts/applications/mdabivar/MDABivariateStats.dml
@@ -65,9 +65,9 @@ mn = colMins(D)
 num_distinct_values = mx-mn+1
 max_num_distinct_values = 0
 for(i1 in 1:nrow(feature_indices)){
-	feature_index1 = castAsScalar(feature_indices[i1,1])
-	num = castAsScalar(num_distinct_values[1,feature_index1])
-	if(castAsScalar(feature_measurement_levels[i1,1]) == 0 & num >= max_num_distinct_values){
+	feature_index1 = as.scalar(feature_indices[i1,1])
+	num = as.scalar(num_distinct_values[1,feature_index1])
+	if(as.scalar(feature_measurement_levels[i1,1]) == 0 & num >= max_num_distinct_values){
 		max_num_distinct_values = num
 	}
 }
@@ -107,8 +107,8 @@ if(label_measurement_level == 0){
 }
 
 parfor(i3 in 1:nrow(feature_indices), check=0){
-	feature_index2 = castAsScalar(feature_indices[i3,1])
-	feature_measurement_level = castAsScalar(feature_measurement_levels[i3,1])
+	feature_index2 = as.scalar(feature_indices[i3,1])
+	feature_measurement_level = as.scalar(feature_measurement_levels[i3,1])
 	
 	feature = D[,feature_index2]
 	

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml b/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
index 7fbc101..134eb4b 100644
--- a/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
+++ b/src/test/scripts/applications/mdabivar/MDABivariateStats.pydml
@@ -64,9 +64,9 @@ mn = colMins(D)
 num_distinct_values = mx-mn+1
 max_num_distinct_values = 0
 for(i1 in 1:nrow(feature_indices)):
-    feature_index1 = castAsScalar(feature_indices[i1,1])
-    num = castAsScalar(num_distinct_values[1,feature_index1])
-    if(castAsScalar(feature_measurement_levels[i1,1]) == 0 & num >= max_num_distinct_values):
+    feature_index1 = scalar(feature_indices[i1,1])
+    num = scalar(num_distinct_values[1,feature_index1])
+    if(scalar(feature_measurement_levels[i1,1]) == 0 & num >= max_num_distinct_values):
         max_num_distinct_values = num
 distinct_label_values = full(0, rows=1, cols=1)
 contingencyTableSz = 1
@@ -99,8 +99,8 @@ if(label_measurement_level == 0):
         featureValues[label_index,i2] = i2-labelCorrection
 
 parfor(i3 in 1:nrow(feature_indices), check=0):
-    feature_index2 = castAsScalar(feature_indices[i3,1])
-    feature_measurement_level = castAsScalar(feature_measurement_levels[i3,1])
+    feature_index2 = scalar(feature_indices[i3,1])
+    feature_measurement_level = scalar(feature_measurement_levels[i3,1])
     
     feature = D[,feature_index2]
     

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_bivariate0.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_bivariate0.dml b/src/test/scripts/applications/parfor/parfor_bivariate0.dml
index 341a0b1..662b745 100644
--- a/src/test/scripts/applications/parfor/parfor_bivariate0.dml
+++ b/src/test/scripts/applications/parfor/parfor_bivariate0.dml
@@ -67,16 +67,16 @@ cat_vars = matrix(0, rows=maxC, cols=numPairs);
 
 
 for( i in 1:s1size ) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
     #print("a1="+a1);
 
     for( j in 1:s2size ) {
         pairID = (i-1)*s2size+j; 
         #print("ID="+pairID+"(i="+i+",j="+j+")");
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
         #print("a2="+a2);
     

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_bivariate1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_bivariate1.dml b/src/test/scripts/applications/parfor/parfor_bivariate1.dml
index aa40d7c..42a956d 100644
--- a/src/test/scripts/applications/parfor/parfor_bivariate1.dml
+++ b/src/test/scripts/applications/parfor/parfor_bivariate1.dml
@@ -67,14 +67,14 @@ cat_vars = matrix(0, rows=maxC, cols=numPairs);
 
 
 parfor( i in 1:s1size, par=4, mode=LOCAL, check=0, opt=NONE) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, par=4, mode=LOCAL, check=0, opt=NONE) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
     
         if (k1 == k2) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_bivariate2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_bivariate2.dml b/src/test/scripts/applications/parfor/parfor_bivariate2.dml
index d0734a9..8ee59b7 100644
--- a/src/test/scripts/applications/parfor/parfor_bivariate2.dml
+++ b/src/test/scripts/applications/parfor/parfor_bivariate2.dml
@@ -67,14 +67,14 @@ cat_vars = matrix(0, rows=maxC, cols=numPairs);
 
 
 parfor( i in 1:s1size, par=4, mode=LOCAL, check=0, opt=NONE) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, par=4, mode=REMOTE_MR, check=0, opt=NONE) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
     
         if (k1 == k2) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_bivariate3.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_bivariate3.dml b/src/test/scripts/applications/parfor/parfor_bivariate3.dml
index 990a6fe..f5c43ef 100644
--- a/src/test/scripts/applications/parfor/parfor_bivariate3.dml
+++ b/src/test/scripts/applications/parfor/parfor_bivariate3.dml
@@ -67,14 +67,14 @@ cat_vars = matrix(0, rows=maxC, cols=numPairs);
 
 
 parfor( i in 1:s1size, par=4, mode=REMOTE_MR, check=0, opt=NONE) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, par=4, mode=LOCAL, check=0, opt=NONE) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
     
         if (k1 == k2) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_bivariate4.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_bivariate4.dml b/src/test/scripts/applications/parfor/parfor_bivariate4.dml
index a19957f..c2e78d5 100644
--- a/src/test/scripts/applications/parfor/parfor_bivariate4.dml
+++ b/src/test/scripts/applications/parfor/parfor_bivariate4.dml
@@ -70,14 +70,14 @@ cat_vars = matrix(0, rows=maxC, cols=numPairs);
 
 
 parfor( i in 1:s1size, check=0) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, check=0) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
     
         if (k1 == k2) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_univariate0.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate0.dml b/src/test/scripts/applications/parfor/parfor_univariate0.dml
index 2a6a9c5..78397cb 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate0.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate0.dml
@@ -77,7 +77,7 @@ else {
 		# project out the i^th column
 		F = A[,i];
 
-		kind = castAsScalar(K[1,i]);
+		kind = as.scalar(K[1,i]);
 
 		if ( kind == 1 ) {
 			print("[" + i + "] Scale");

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_univariate1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate1.dml b/src/test/scripts/applications/parfor/parfor_univariate1.dml
index 1f120ef..64ee633 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate1.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate1.dml
@@ -77,7 +77,7 @@ else {
 		# project out the i^th column
 		F = A[,i];
 
-		kind = castAsScalar(K[1,i]);
+		kind = as.scalar(K[1,i]);
 
 		if ( kind == 1 ) {
 			print("[" + i + "] Scale");

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/parfor/parfor_univariate4.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/parfor/parfor_univariate4.dml b/src/test/scripts/applications/parfor/parfor_univariate4.dml
index 8953c64..465cb8f 100644
--- a/src/test/scripts/applications/parfor/parfor_univariate4.dml
+++ b/src/test/scripts/applications/parfor/parfor_univariate4.dml
@@ -77,7 +77,7 @@ else {
 		# project out the i^th column
 		F = A[,i];
 
-		kind = castAsScalar(K[1,i]);
+		kind = as.scalar(K[1,i]);
 
 		if ( kind == 1 ) {
 			print("[" + i + "] Scale");

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/validation/CV_LogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/CV_LogisticRegression.dml b/src/test/scripts/applications/validation/CV_LogisticRegression.dml
index e890b80..d91a3f7 100644
--- a/src/test/scripts/applications/validation/CV_LogisticRegression.dml
+++ b/src/test/scripts/applications/validation/CV_LogisticRegression.dml
@@ -165,7 +165,7 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
    norm_grad = sqrt(sum(grad*grad))
    
    #print("-- Outer Iteration = " + iter)
-   objScalar = castAsScalar(obj)
+   objScalar = as.scalar(obj)
    #print("     Iterations = " + iter + ", Objective = " + objScalar + ", Gradient Norm = " + norm_grad)
    
    # SOLVE TRUST REGION SUB-PROBLEM
@@ -183,23 +183,23 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
     alpha_deno = t(d) %*% Hd 
     alpha = norm_r2 / alpha_deno
    
-    s = s + castAsScalar(alpha) * d
-    os = os + castAsScalar(alpha) * od
+    s = s + as.scalar(alpha) * d
+    os = os + as.scalar(alpha) * od
   
     sts = t(s) %*% s
     delta2 = delta*delta 
-    stsScalar = castAsScalar(sts)
+    stsScalar = as.scalar(sts)
     
     shouldBreak = FALSE;  # to mimic "break" in the following 'if' condition
     if (stsScalar > delta2) {
         #print("      --- cg reaches trust region boundary")
-     s = s - castAsScalar(alpha) * d
-     os = os - castAsScalar(alpha) * od
+     s = s - as.scalar(alpha) * d
+     os = os - as.scalar(alpha) * od
      std = t(s) %*% d
      dtd = t(d) %*% d
      sts = t(s) %*% s
      rad = sqrt(std*std + dtd*(delta2 - sts))
-     stdScalar = castAsScalar(std)
+     stdScalar = as.scalar(std)
      
      tau = 0; #TODO
      if(stdScalar >= 0) {
@@ -209,9 +209,9 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
       tau = (rad - std)/dtd
      }
         
-     s = s + castAsScalar(tau) * d
-     os = os + castAsScalar(tau) * od
-     r = r - castAsScalar(tau) * Hd
+     s = s + as.scalar(tau) * d
+     os = os + as.scalar(tau) * od
+     r = r - as.scalar(tau) * Hd
      
      #break
      shouldBreak = TRUE;
@@ -220,7 +220,7 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
     } 
     
     if (!shouldBreak) {
-     r = r - castAsScalar(alpha) * Hd
+     r = r - as.scalar(alpha) * Hd
      old_norm_r2 = norm_r2 
      norm_r2 = sum(r*r)
      beta = norm_r2/old_norm_r2
@@ -241,10 +241,10 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
    objnew = 0.5 * t(wnew) %*% wnew + C * sum(-log(logisticnew))
    
    actred = (obj - objnew)
-   actredScalar = castAsScalar(actred)
+   actredScalar = as.scalar(actred)
    rho = actred / qk
-   qkScalar = castAsScalar(qk)
-   rhoScalar = castAsScalar(rho);
+   qkScalar = as.scalar(qk)
+   rhoScalar = as.scalar(rho);
    snorm = sqrt(sum( s * s ))
   
    #print("     Actual    = " + actredScalar)
@@ -254,12 +254,12 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
       delta = min(delta, snorm)
    }
    alpha2 = objnew - obj - gs
-   alpha2Scalar = castAsScalar(alpha2)
+   alpha2Scalar = as.scalar(alpha2)
    if (alpha2Scalar <= 0) {
       alpha = sigma3*e
    } 
    else {
-      ascalar = max(sigma1, -0.5*castAsScalar(gs)/alpha2Scalar)  
+      ascalar = max(sigma1, -0.5*as.scalar(gs)/alpha2Scalar)  
       alpha = ascalar*e
    }
   
@@ -273,7 +273,7 @@ logisticRegression = function (Matrix[double] X, Matrix[double] y, Integer in_in
     obj = objnew 
    } 
   
-   alphaScalar = castAsScalar(alpha)
+   alphaScalar = as.scalar(alpha)
    if (rhoScalar < eta0){
     delta = min(max( alphaScalar , sigma1) * snorm, sigma2 * delta )
    }
@@ -342,7 +342,7 @@ scoreLogRegModel = function (Matrix[double] X_train, Matrix[double] y_train, Mat
 
     b = 0.0;
     if (nrow (w) > num_features) {
-        b = castAsScalar (w [num_features + 1, 1]);
+        b = as.scalar (w [num_features + 1, 1]);
     }
 
 # TRAINING DATA - ESTIMATE PROBABILITIES:
@@ -472,56 +472,56 @@ printFoldStatistics = function (Matrix[double] stats ) return( Integer err )
     stats_min = round (colMins (stats) * 10000.0) / 10000.0;
 /*    
     print ("Training Data, Model-estimated statistics:");
-    print ("    True Positives:  Min = " + castAsScalar (stats_min [1,  1]) + ",  Avg = " + castAsScalar (stats_avg [1,  1]) + ",  Max = " + castAsScalar (stats_max [1,  1]));
-    print ("   False Positives:  Min = " + castAsScalar (stats_min [1,  2]) + ",  Avg = " + castAsScalar (stats_avg [1,  2]) + ",  Max = " + castAsScalar (stats_max [1,  2]));
-    print ("    True Negatives:  Min = " + castAsScalar (stats_min [1,  3]) + ",  Avg = " + castAsScalar (stats_avg [1,  3]) + ",  Max = " + castAsScalar (stats_max [1,  3]));
-    print ("   False Negatives:  Min = " + castAsScalar (stats_min [1,  4]) + ",  Avg = " + castAsScalar (stats_avg [1,  4]) + ",  Max = " + castAsScalar (stats_max [1,  4]));
-    print ("       Precision %:  Min = " + castAsScalar (stats_min [1,  5]) + ",  Avg = " + castAsScalar (stats_avg [1,  5]) + ",  Max = " + castAsScalar (stats_max [1,  5]));
-    print ("Recall (Sensit-y)%:  Min = " + castAsScalar (stats_min [1,  6]) + ",  Avg = " + castAsScalar (stats_avg [1,  6]) + ",  Max = " + castAsScalar (stats_max [1,  6]));
-    print ("     Specificity %:  Min = " + castAsScalar (stats_min [1,  7]) + ",  Avg = " + castAsScalar (stats_avg [1,  7]) + ",  Max = " + castAsScalar (stats_max [1,  7]));
-    print ("      Value - Cost:  Min = " + castAsScalar (stats_min [1,  8]) + ",  Avg = " + castAsScalar (stats_avg [1,  8]) + ",  Max = " + castAsScalar (stats_max [1,  8]));
+    print ("    True Positives:  Min = " + as.scalar (stats_min [1,  1]) + ",  Avg = " + as.scalar (stats_avg [1,  1]) + ",  Max = " + as.scalar (stats_max [1,  1]));
+    print ("   False Positives:  Min = " + as.scalar (stats_min [1,  2]) + ",  Avg = " + as.scalar (stats_avg [1,  2]) + ",  Max = " + as.scalar (stats_max [1,  2]));
+    print ("    True Negatives:  Min = " + as.scalar (stats_min [1,  3]) + ",  Avg = " + as.scalar (stats_avg [1,  3]) + ",  Max = " + as.scalar (stats_max [1,  3]));
+    print ("   False Negatives:  Min = " + as.scalar (stats_min [1,  4]) + ",  Avg = " + as.scalar (stats_avg [1,  4]) + ",  Max = " + as.scalar (stats_max [1,  4]));
+    print ("       Precision %:  Min = " + as.scalar (stats_min [1,  5]) + ",  Avg = " + as.scalar (stats_avg [1,  5]) + ",  Max = " + as.scalar (stats_max [1,  5]));
+    print ("Recall (Sensit-y)%:  Min = " + as.scalar (stats_min [1,  6]) + ",  Avg = " + as.scalar (stats_avg [1,  6]) + ",  Max = " + as.scalar (stats_max [1,  6]));
+    print ("     Specificity %:  Min = " + as.scalar (stats_min [1,  7]) + ",  Avg = " + as.scalar (stats_avg [1,  7]) + ",  Max = " + as.scalar (stats_max [1,  7]));
+    print ("      Value - Cost:  Min = " + as.scalar (stats_min [1,  8]) + ",  Avg = " + as.scalar (stats_avg [1,  8]) + ",  Max = " + as.scalar (stats_max [1,  8]));
     print (" ");
     if (1==1) {
       print(" ")
     }
 */
     print ("Training Data, Label comparison statistics:");
-    print ("    True Positives:  Min = " + castAsScalar (stats_min [1, 11]) + ",  Avg = " + castAsScalar (stats_avg [1, 11]) + ",  Max = " + castAsScalar (stats_max [1, 11]));
-    print ("   False Positives:  Min = " + castAsScalar (stats_min [1, 12]) + ",  Avg = " + castAsScalar (stats_avg [1, 12]) + ",  Max = " + castAsScalar (stats_max [1, 12]));
-    print ("    True Negatives:  Min = " + castAsScalar (stats_min [1, 13]) + ",  Avg = " + castAsScalar (stats_avg [1, 13]) + ",  Max = " + castAsScalar (stats_max [1, 13]));
-    print ("   False Negatives:  Min = " + castAsScalar (stats_min [1, 14]) + ",  Avg = " + castAsScalar (stats_avg [1, 14]) + ",  Max = " + castAsScalar (stats_max [1, 14]));
-    print ("       Precision %:  Min = " + castAsScalar (stats_min [1, 15]) + ",  Avg = " + castAsScalar (stats_avg [1, 15]) + ",  Max = " + castAsScalar (stats_max [1, 15]));
-    print ("Recall (Sensit-y)%:  Min = " + castAsScalar (stats_min [1, 16]) + ",  Avg = " + castAsScalar (stats_avg [1, 16]) + ",  Max = " + castAsScalar (stats_max [1, 16]));
-    print ("     Specificity %:  Min = " + castAsScalar (stats_min [1, 17]) + ",  Avg = " + castAsScalar (stats_avg [1, 17]) + ",  Max = " + castAsScalar (stats_max [1, 17]));
-    print ("      Value - Cost:  Min = " + castAsScalar (stats_min [1, 18]) + ",  Avg = " + castAsScalar (stats_avg [1, 18]) + ",  Max = " + castAsScalar (stats_max [1, 18]));
+    print ("    True Positives:  Min = " + as.scalar (stats_min [1, 11]) + ",  Avg = " + as.scalar (stats_avg [1, 11]) + ",  Max = " + as.scalar (stats_max [1, 11]));
+    print ("   False Positives:  Min = " + as.scalar (stats_min [1, 12]) + ",  Avg = " + as.scalar (stats_avg [1, 12]) + ",  Max = " + as.scalar (stats_max [1, 12]));
+    print ("    True Negatives:  Min = " + as.scalar (stats_min [1, 13]) + ",  Avg = " + as.scalar (stats_avg [1, 13]) + ",  Max = " + as.scalar (stats_max [1, 13]));
+    print ("   False Negatives:  Min = " + as.scalar (stats_min [1, 14]) + ",  Avg = " + as.scalar (stats_avg [1, 14]) + ",  Max = " + as.scalar (stats_max [1, 14]));
+    print ("       Precision %:  Min = " + as.scalar (stats_min [1, 15]) + ",  Avg = " + as.scalar (stats_avg [1, 15]) + ",  Max = " + as.scalar (stats_max [1, 15]));
+    print ("Recall (Sensit-y)%:  Min = " + as.scalar (stats_min [1, 16]) + ",  Avg = " + as.scalar (stats_avg [1, 16]) + ",  Max = " + as.scalar (stats_max [1, 16]));
+    print ("     Specificity %:  Min = " + as.scalar (stats_min [1, 17]) + ",  Avg = " + as.scalar (stats_avg [1, 17]) + ",  Max = " + as.scalar (stats_max [1, 17]));
+    print ("      Value - Cost:  Min = " + as.scalar (stats_min [1, 18]) + ",  Avg = " + as.scalar (stats_avg [1, 18]) + ",  Max = " + as.scalar (stats_max [1, 18]));
     print (" ");
     if (1==1) {
       print(" ")
     }
 /*
     print ("TEST Data, Model-estimated statistics:");
-    print ("    True Positives:  Min = " + castAsScalar (stats_min [1, 21]) + ",  Avg = " + castAsScalar (stats_avg [1, 21]) + ",  Max = " + castAsScalar (stats_max [1, 21]));
-    print ("   False Positives:  Min = " + castAsScalar (stats_min [1, 22]) + ",  Avg = " + castAsScalar (stats_avg [1, 22]) + ",  Max = " + castAsScalar (stats_max [1, 22]));
-    print ("    True Negatives:  Min = " + castAsScalar (stats_min [1, 23]) + ",  Avg = " + castAsScalar (stats_avg [1, 23]) + ",  Max = " + castAsScalar (stats_max [1, 23]));
-    print ("   False Negatives:  Min = " + castAsScalar (stats_min [1, 24]) + ",  Avg = " + castAsScalar (stats_avg [1, 24]) + ",  Max = " + castAsScalar (stats_max [1, 24]));
-    print ("       Precision %:  Min = " + castAsScalar (stats_min [1, 25]) + ",  Avg = " + castAsScalar (stats_avg [1, 25]) + ",  Max = " + castAsScalar (stats_max [1, 25]));
-    print ("Recall (Sensit-y)%:  Min = " + castAsScalar (stats_min [1, 26]) + ",  Avg = " + castAsScalar (stats_avg [1, 26]) + ",  Max = " + castAsScalar (stats_max [1, 26]));
-    print ("     Specificity %:  Min = " + castAsScalar (stats_min [1, 27]) + ",  Avg = " + castAsScalar (stats_avg [1, 27]) + ",  Max = " + castAsScalar (stats_max [1, 27]));
-    print ("      Value - Cost:  Min = " + castAsScalar (stats_min [1, 28]) + ",  Avg = " + castAsScalar (stats_avg [1, 28]) + ",  Max = " + castAsScalar (stats_max [1, 28]));
+    print ("    True Positives:  Min = " + as.scalar (stats_min [1, 21]) + ",  Avg = " + as.scalar (stats_avg [1, 21]) + ",  Max = " + as.scalar (stats_max [1, 21]));
+    print ("   False Positives:  Min = " + as.scalar (stats_min [1, 22]) + ",  Avg = " + as.scalar (stats_avg [1, 22]) + ",  Max = " + as.scalar (stats_max [1, 22]));
+    print ("    True Negatives:  Min = " + as.scalar (stats_min [1, 23]) + ",  Avg = " + as.scalar (stats_avg [1, 23]) + ",  Max = " + as.scalar (stats_max [1, 23]));
+    print ("   False Negatives:  Min = " + as.scalar (stats_min [1, 24]) + ",  Avg = " + as.scalar (stats_avg [1, 24]) + ",  Max = " + as.scalar (stats_max [1, 24]));
+    print ("       Precision %:  Min = " + as.scalar (stats_min [1, 25]) + ",  Avg = " + as.scalar (stats_avg [1, 25]) + ",  Max = " + as.scalar (stats_max [1, 25]));
+    print ("Recall (Sensit-y)%:  Min = " + as.scalar (stats_min [1, 26]) + ",  Avg = " + as.scalar (stats_avg [1, 26]) + ",  Max = " + as.scalar (stats_max [1, 26]));
+    print ("     Specificity %:  Min = " + as.scalar (stats_min [1, 27]) + ",  Avg = " + as.scalar (stats_avg [1, 27]) + ",  Max = " + as.scalar (stats_max [1, 27]));
+    print ("      Value - Cost:  Min = " + as.scalar (stats_min [1, 28]) + ",  Avg = " + as.scalar (stats_avg [1, 28]) + ",  Max = " + as.scalar (stats_max [1, 28]));
     print (" ");
     if (1==1) {
       print(" ")
     }
 */
     print ("TEST Data, Label comparison statistics:");
-    print ("    True Positives:  Min = " + castAsScalar (stats_min [1, 31]) + ",  Avg = " + castAsScalar (stats_avg [1, 31]) + ",  Max = " + castAsScalar (stats_max [1, 31]));
-    print ("   False Positives:  Min = " + castAsScalar (stats_min [1, 32]) + ",  Avg = " + castAsScalar (stats_avg [1, 32]) + ",  Max = " + castAsScalar (stats_max [1, 32]));
-    print ("    True Negatives:  Min = " + castAsScalar (stats_min [1, 33]) + ",  Avg = " + castAsScalar (stats_avg [1, 33]) + ",  Max = " + castAsScalar (stats_max [1, 33]));
-    print ("   False Negatives:  Min = " + castAsScalar (stats_min [1, 34]) + ",  Avg = " + castAsScalar (stats_avg [1, 34]) + ",  Max = " + castAsScalar (stats_max [1, 34]));
-    print ("       Precision %:  Min = " + castAsScalar (stats_min [1, 35]) + ",  Avg = " + castAsScalar (stats_avg [1, 35]) + ",  Max = " + castAsScalar (stats_max [1, 35]));
-    print ("Recall (Sensit-y)%:  Min = " + castAsScalar (stats_min [1, 36]) + ",  Avg = " + castAsScalar (stats_avg [1, 36]) + ",  Max = " + castAsScalar (stats_max [1, 36]));
-    print ("     Specificity %:  Min = " + castAsScalar (stats_min [1, 37]) + ",  Avg = " + castAsScalar (stats_avg [1, 37]) + ",  Max = " + castAsScalar (stats_max [1, 37]));
-    print ("      Value - Cost:  Min = " + castAsScalar (stats_min [1, 38]) + ",  Avg = " + castAsScalar (stats_avg [1, 38]) + ",  Max = " + castAsScalar (stats_max [1, 38]));
+    print ("    True Positives:  Min = " + as.scalar (stats_min [1, 31]) + ",  Avg = " + as.scalar (stats_avg [1, 31]) + ",  Max = " + as.scalar (stats_max [1, 31]));
+    print ("   False Positives:  Min = " + as.scalar (stats_min [1, 32]) + ",  Avg = " + as.scalar (stats_avg [1, 32]) + ",  Max = " + as.scalar (stats_max [1, 32]));
+    print ("    True Negatives:  Min = " + as.scalar (stats_min [1, 33]) + ",  Avg = " + as.scalar (stats_avg [1, 33]) + ",  Max = " + as.scalar (stats_max [1, 33]));
+    print ("   False Negatives:  Min = " + as.scalar (stats_min [1, 34]) + ",  Avg = " + as.scalar (stats_avg [1, 34]) + ",  Max = " + as.scalar (stats_max [1, 34]));
+    print ("       Precision %:  Min = " + as.scalar (stats_min [1, 35]) + ",  Avg = " + as.scalar (stats_avg [1, 35]) + ",  Max = " + as.scalar (stats_max [1, 35]));
+    print ("Recall (Sensit-y)%:  Min = " + as.scalar (stats_min [1, 36]) + ",  Avg = " + as.scalar (stats_avg [1, 36]) + ",  Max = " + as.scalar (stats_max [1, 36]));
+    print ("     Specificity %:  Min = " + as.scalar (stats_min [1, 37]) + ",  Avg = " + as.scalar (stats_avg [1, 37]) + ",  Max = " + as.scalar (stats_max [1, 37]));
+    print ("      Value - Cost:  Min = " + as.scalar (stats_min [1, 38]) + ",  Avg = " + as.scalar (stats_avg [1, 38]) + ",  Max = " + as.scalar (stats_max [1, 38]));
 
     err = 0;
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml b/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
index f63c636..a3099d6 100644
--- a/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
+++ b/src/test/scripts/applications/validation/CV_MultiClassSVM.sasha.dml
@@ -309,18 +309,18 @@ printFoldStatistics = function (Matrix[double] stats ) return( Integer err )
     stats_min = round (colMins (stats) * 10000.0) / 10000.0;
     
     print ("Training Data, Label comparison statistics:");
-    print ("    True Matches:  Min = " + castAsScalar (stats_min [1, 11]) + ",  Avg = " + castAsScalar (stats_avg [1, 11]) + ",  Max = " + castAsScalar (stats_max [1, 11]));
-    print ("   False Matches:  Min = " + castAsScalar (stats_min [1, 12]) + ",  Avg = " + castAsScalar (stats_avg [1, 12]) + ",  Max = " + castAsScalar (stats_max [1, 12]));
-    print ("     Precision %:  Min = " + castAsScalar (stats_min [1, 15]) + ",  Avg = " + castAsScalar (stats_avg [1, 15]) + ",  Max = " + castAsScalar (stats_max [1, 15]));
+    print ("    True Matches:  Min = " + as.scalar (stats_min [1, 11]) + ",  Avg = " + as.scalar (stats_avg [1, 11]) + ",  Max = " + as.scalar (stats_max [1, 11]));
+    print ("   False Matches:  Min = " + as.scalar (stats_min [1, 12]) + ",  Avg = " + as.scalar (stats_avg [1, 12]) + ",  Max = " + as.scalar (stats_max [1, 12]));
+    print ("     Precision %:  Min = " + as.scalar (stats_min [1, 15]) + ",  Avg = " + as.scalar (stats_avg [1, 15]) + ",  Max = " + as.scalar (stats_max [1, 15]));
 
     print (" ");
     if (1==1) {
       print(" ")
     }
     print ("TEST Data, Label comparison statistics:");
-    print ("    True Matches:  Min = " + castAsScalar (stats_min [1, 31]) + ",  Avg = " + castAsScalar (stats_avg [1, 31]) + ",  Max = " + castAsScalar (stats_max [1, 31]));
-    print ("   False Matches:  Min = " + castAsScalar (stats_min [1, 32]) + ",  Avg = " + castAsScalar (stats_avg [1, 32]) + ",  Max = " + castAsScalar (stats_max [1, 32]));
-    print ("     Precision %:  Min = " + castAsScalar (stats_min [1, 35]) + ",  Avg = " + castAsScalar (stats_avg [1, 35]) + ",  Max = " + castAsScalar (stats_max [1, 35]));
+    print ("    True Matches:  Min = " + as.scalar (stats_min [1, 31]) + ",  Avg = " + as.scalar (stats_avg [1, 31]) + ",  Max = " + as.scalar (stats_max [1, 31]));
+    print ("   False Matches:  Min = " + as.scalar (stats_min [1, 32]) + ",  Avg = " + as.scalar (stats_avg [1, 32]) + ",  Max = " + as.scalar (stats_max [1, 32]));
+    print ("     Precision %:  Min = " + as.scalar (stats_min [1, 35]) + ",  Avg = " + as.scalar (stats_avg [1, 35]) + ",  Max = " + as.scalar (stats_max [1, 35]));
 
     err = 0;
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/validation/LinearLogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/LinearLogisticRegression.dml b/src/test/scripts/applications/validation/LinearLogisticRegression.dml
index 473b2dc..1444bd4 100644
--- a/src/test/scripts/applications/validation/LinearLogisticRegression.dml
+++ b/src/test/scripts/applications/validation/LinearLogisticRegression.dml
@@ -105,7 +105,7 @@ while(!converge) {
  norm_grad = sqrt(sum(grad*grad))
  
  print("-- Outer Iteration = " + iter)
- objScalar = castAsScalar(obj)
+ objScalar = as.scalar(obj)
  print("     Iterations = " + iter + ", Objective = " + objScalar + ", Gradient Norm = " + norm_grad)
  
  # SOLVE TRUST REGION SUB-PROBLEM
@@ -123,23 +123,23 @@ while(!converge) {
   alpha_deno = t(d) %*% Hd 
   alpha = norm_r2 / alpha_deno
  
-  s = s + castAsScalar(alpha) * d
-  os = os + castAsScalar(alpha) * od
+  s = s + as.scalar(alpha) * d
+  os = os + as.scalar(alpha) * od
 
   sts = t(s) %*% s
   delta2 = delta*delta 
-  stsScalar = castAsScalar(sts)
+  stsScalar = as.scalar(sts)
   
   shouldBreak = FALSE;  # to mimic "break" in the following 'if' condition
   if (stsScalar > delta2) {
       print("      --- cg reaches trust region boundary")
-   s = s - castAsScalar(alpha) * d
-   os = os - castAsScalar(alpha) * od
+   s = s - as.scalar(alpha) * d
+   os = os - as.scalar(alpha) * od
    std = t(s) %*% d
    dtd = t(d) %*% d
    sts = t(s) %*% s
    rad = sqrt(std*std + dtd*(delta2 - sts))
-   stdScalar = castAsScalar(std)
+   stdScalar = as.scalar(std)
    if(stdScalar >= 0) {
     tau = (delta2 - sts)/(std + rad)
    } 
@@ -147,9 +147,9 @@ while(!converge) {
     tau = (rad - std)/dtd
    }
       
-   s = s + castAsScalar(tau) * d
-   os = os + castAsScalar(tau) * od
-   r = r - castAsScalar(tau) * Hd
+   s = s + as.scalar(tau) * d
+   os = os + as.scalar(tau) * od
+   r = r - as.scalar(tau) * Hd
    
    #break
    shouldBreak = TRUE;
@@ -158,7 +158,7 @@ while(!converge) {
   } 
   
   if (!shouldBreak) {
-   r = r - castAsScalar(alpha) * Hd
+   r = r - as.scalar(alpha) * Hd
    old_norm_r2 = norm_r2 
    norm_r2 = sum(r*r)
    beta = norm_r2/old_norm_r2
@@ -179,10 +179,10 @@ while(!converge) {
  objnew = 0.5 * t(wnew) %*% wnew + C * sum(-log(logisticnew))
  
  actred = (obj - objnew)
- actredScalar = castAsScalar(actred)
+ actredScalar = as.scalar(actred)
  rho = actred / qk
- qkScalar = castAsScalar(qk)
- rhoScalar = castAsScalar(rho);
+ qkScalar = as.scalar(qk)
+ rhoScalar = as.scalar(rho);
  snorm = sqrt(sum( s * s ))
 
  print("     Actual    = " + actredScalar)
@@ -192,12 +192,12 @@ while(!converge) {
     delta = min(delta, snorm)
  }
  alpha2 = objnew - obj - gs
- alpha2Scalar = castAsScalar(alpha2)
+ alpha2Scalar = as.scalar(alpha2)
  if (alpha2Scalar <= 0) {
     alpha = sigma3*e
  } 
  else {
-    ascalar = max(sigma1, -0.5*castAsScalar(gs)/alpha2Scalar)  
+    ascalar = max(sigma1, -0.5*as.scalar(gs)/alpha2Scalar)  
     alpha = ascalar*e
  }
 
@@ -211,7 +211,7 @@ while(!converge) {
   obj = objnew 
  } 
 
- alphaScalar = castAsScalar(alpha)
+ alphaScalar = as.scalar(alpha)
  if (rhoScalar < eta0){
   delta = min(max( alphaScalar , sigma1) * snorm, sigma2 * delta )
  }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml b/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
index b42a315..c7cd4a2 100644
--- a/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
+++ b/src/test/scripts/applications/validation/genRandData4LogisticRegression.dml
@@ -101,17 +101,17 @@ scaleWeights =
     W_ext [, 1] = w_unscaled;
     S1 = colSums (X_data %*% W_ext);
     TF = Rand (rows = 2, cols = 2, min = 1, max = 1);
-    TF [1, 1] = S1 [1, 1] * meanLF * nrow (X_data) / castAsScalar (S1 %*% t(S1));
+    TF [1, 1] = S1 [1, 1] * meanLF * nrow (X_data) / as.scalar (S1 %*% t(S1));
     TF [1, 2] = S1 [1, 2];
-    TF [2, 1] = S1 [1, 2] * meanLF * nrow (X_data) / castAsScalar (S1 %*% t(S1));
+    TF [2, 1] = S1 [1, 2] * meanLF * nrow (X_data) / as.scalar (S1 %*% t(S1));
     TF [2, 2] = - S1 [1, 1];
     TF = W_ext %*% TF;
     Q = t(TF) %*% t(X_data) %*% X_data %*% TF;
     Q [1, 1] = Q [1, 1] - nrow (X_data) * meanLF * meanLF;
     new_sigmaLF = sigmaLF;
-    discr = castAsScalar (Q [1, 1] * Q [2, 2] - Q [1, 2] * Q [2, 1] - nrow (X_data) * Q [2, 2] * sigmaLF * sigmaLF);
+    discr = as.scalar (Q [1, 1] * Q [2, 2] - Q [1, 2] * Q [2, 1] - nrow (X_data) * Q [2, 2] * sigmaLF * sigmaLF);
     if (discr > 0.0) {
-        new_sigmaLF = sqrt (castAsScalar ((Q [1, 1] * Q [2, 2] - Q [1, 2] * Q [2, 1]) / (nrow (X_data) * Q [2, 2])));
+        new_sigmaLF = sqrt (as.scalar ((Q [1, 1] * Q [2, 2] - Q [1, 2] * Q [2, 1]) / (nrow (X_data) * Q [2, 2])));
         discr = -0.0;
     }
     t = Rand (rows = 2, cols = 1, min = 1, max = 1);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/gdfo/LinregCG.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/gdfo/LinregCG.dml b/src/test/scripts/functions/gdfo/LinregCG.dml
index 85a66e4..92f15d7 100644
--- a/src/test/scripts/functions/gdfo/LinregCG.dml
+++ b/src/test/scripts/functions/gdfo/LinregCG.dml
@@ -39,7 +39,7 @@ w = matrix(0, rows = ncol(X), cols = 1);
 i = 0;
 while(i < maxiter) {
 	q = ((t(X) %*% (X %*% p)) + eps  * p);
-	alpha = norm_r2 / castAsScalar(t(p) %*% q);
+	alpha = norm_r2 / as.scalar(t(p) %*% q);
 	w = w + alpha * p;
 	old_norm_r2 = norm_r2;
 	r = r + alpha * q;

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/jmlc/reuse-glm-predict.dml b/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
index 9a29d67..6ff0b68 100644
--- a/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
+++ b/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
@@ -251,15 +251,15 @@ if (fileY != " ")
     str = append (str, "DEVIANCE_G2_PVAL,,TRUE," + G2_scaled_pValue);
 
     for (i in 1:ncol(Y)) {
-        str = append (str, "AVG_TOT_Y," + i + ",," + castAsScalar (avg_tot_Y [1, i]));
-        str = append (str, "STDEV_TOT_Y," + i + ",," + castAsScalar (sqrt (var_tot_Y [1, i])));
-        str = append (str, "AVG_RES_Y," + i + ",," + castAsScalar (avg_res_Y [1, i]));
-        str = append (str, "STDEV_RES_Y," + i + ",," + castAsScalar (sqrt (var_res_Y [1, i])));
-        str = append (str, "PRED_STDEV_RES," + i + ",TRUE," + castAsScalar (sqrt (predicted_avg_var_res_Y [1, i])));
-        str = append (str, "PLAIN_R2," + i + ",," + castAsScalar (plain_R2 [1, i]));
-        str = append (str, "ADJUSTED_R2," + i + ",," + castAsScalar (adjust_R2 [1, i]));
-        str = append (str, "PLAIN_R2_NOBIAS," + i + ",," + castAsScalar (plain_R2_nobias [1, i]));
-        str = append (str, "ADJUSTED_R2_NOBIAS," + i + ",," + castAsScalar (adjust_R2_nobias [1, i]));
+        str = append (str, "AVG_TOT_Y," + i + ",," + as.scalar (avg_tot_Y [1, i]));
+        str = append (str, "STDEV_TOT_Y," + i + ",," + as.scalar (sqrt (var_tot_Y [1, i])));
+        str = append (str, "AVG_RES_Y," + i + ",," + as.scalar (avg_res_Y [1, i]));
+        str = append (str, "STDEV_RES_Y," + i + ",," + as.scalar (sqrt (var_res_Y [1, i])));
+        str = append (str, "PRED_STDEV_RES," + i + ",TRUE," + as.scalar (sqrt (predicted_avg_var_res_Y [1, i])));
+        str = append (str, "PLAIN_R2," + i + ",," + as.scalar (plain_R2 [1, i]));
+        str = append (str, "ADJUSTED_R2," + i + ",," + as.scalar (adjust_R2 [1, i]));
+        str = append (str, "PLAIN_R2_NOBIAS," + i + ",," + as.scalar (plain_R2_nobias [1, i]));
+        str = append (str, "ADJUSTED_R2_NOBIAS," + i + ",," + as.scalar (adjust_R2_nobias [1, i]));
     }
     
     if (fileO != " ") {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/misc/IPAUnknownRecursion.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/IPAUnknownRecursion.dml b/src/test/scripts/functions/misc/IPAUnknownRecursion.dml
index 22570b8..f6be7b0 100644
--- a/src/test/scripts/functions/misc/IPAUnknownRecursion.dml
+++ b/src/test/scripts/functions/misc/IPAUnknownRecursion.dml
@@ -28,7 +28,7 @@ factorial = function(Matrix[Double] arr, Integer pos) return (Matrix[Double] arr
 	}
 	
 	for(i in 1:ncol(arr))
-		print("inside factorial (" + pos + ") " + i + ": " + castAsScalar(arr[1, i]))
+		print("inside factorial (" + pos + ") " + i + ": " + as.scalar(arr[1, i]))
 }
 
 n = $1
@@ -38,7 +38,7 @@ arr = factorial(arr, n)
 R = matrix(0, rows=1, cols=n);
 for(i in 1:n) #copy important to test dynamic rewrites
 {
-   print("main factorial " + i + ": " + castAsScalar(arr[1, i]))
+   print("main factorial " + i + ": " + as.scalar(arr[1, i]))
    R[1,i] = as.scalar(arr[1, i]);
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/misc/dt_change_4b.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/dt_change_4b.dml b/src/test/scripts/functions/misc/dt_change_4b.dml
index c5d8c7e..d443587 100644
--- a/src/test/scripts/functions/misc/dt_change_4b.dml
+++ b/src/test/scripts/functions/misc/dt_change_4b.dml
@@ -22,7 +22,7 @@
 
 Y = matrix(1, rows=10, cols=10);
 X = matrix(7, rows=10, cols=10);
-X = castAsScalar(X[1,1]);
+X = as.scalar(X[1,1]);
 
 print("Result: "+sum(X + Y));
 #expected: "Result: 800.0"

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/misc/dt_change_4c.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/dt_change_4c.dml b/src/test/scripts/functions/misc/dt_change_4c.dml
index ba561bf..3cf60fa 100644
--- a/src/test/scripts/functions/misc/dt_change_4c.dml
+++ b/src/test/scripts/functions/misc/dt_change_4c.dml
@@ -24,7 +24,7 @@ foo = function(Matrix[Double] input) return (Double out)
 {
    if( 1==1 ){} #prevent inlining
   
-   out = castAsScalar(input[1,1]);
+   out = as.scalar(input[1,1]);
 }
 
 Y = matrix(1, rows=10, cols=10);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/misc/dt_change_4f.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/dt_change_4f.dml b/src/test/scripts/functions/misc/dt_change_4f.dml
index 71f083c..1050a2c 100644
--- a/src/test/scripts/functions/misc/dt_change_4f.dml
+++ b/src/test/scripts/functions/misc/dt_change_4f.dml
@@ -23,7 +23,7 @@
 Y = matrix(1, rows=10, cols=10);
 X = matrix(7, rows=10, cols=10);
 if(1==1){}
-X = castAsScalar(X[1,1]);
+X = as.scalar(X[1,1]);
 
 print("Result: "+sum(X + Y));
 #expected: "Result: 800.0"

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor35.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor35.dml b/src/test/scripts/functions/parfor/parfor35.dml
index 81f7564..b1c8da9 100644
--- a/src/test/scripts/functions/parfor/parfor35.dml
+++ b/src/test/scripts/functions/parfor/parfor35.dml
@@ -26,7 +26,7 @@ dummy = matrix(1, rows=1,cols=1);
 
 parfor( i in 1:20 )
 {
-   val = castAsScalar(B[i,i]);
+   val = as.scalar(B[i,i]);
    b = A[i,val]; #due to parser change A[i,B[i,]];  
    c = dummy*(b+i);
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor48b.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor48b.dml b/src/test/scripts/functions/parfor/parfor48b.dml
index c87f920..edeeea4 100644
--- a/src/test/scripts/functions/parfor/parfor48b.dml
+++ b/src/test/scripts/functions/parfor/parfor48b.dml
@@ -22,7 +22,7 @@
 
 A = Rand(rows=10, cols=10, min=0.0, max=1.0, sparsity=1.0)
 
-parfor(i in 1:castAsScalar(A[1,1])){
+parfor(i in 1:as.scalar(A[1,1])){
 	parfor(j in 1:A+ncol(A)){
 		print("i="+i+", j="+j);
 	}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor6.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor6.dml b/src/test/scripts/functions/parfor/parfor6.dml
index aae1370..e50b892 100644
--- a/src/test/scripts/functions/parfor/parfor6.dml
+++ b/src/test/scripts/functions/parfor/parfor6.dml
@@ -24,6 +24,6 @@ A = Rand(rows=10,cols=1);
 
 parfor( i in 1:10 )
 {
-   b = i + castAsScalar(A[i,1]);
+   b = i + as.scalar(A[i,1]);
    #print(b);
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor7.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor7.dml b/src/test/scripts/functions/parfor/parfor7.dml
index 39c14ad..807239b 100644
--- a/src/test/scripts/functions/parfor/parfor7.dml
+++ b/src/test/scripts/functions/parfor/parfor7.dml
@@ -24,7 +24,7 @@ A = Rand(rows=10,cols=1);
 
 parfor( i in 2:10 )
 {
-   b = i + castAsScalar(A[i,1]) + castAsScalar(A[i+1,1]);
+   b = i + as.scalar(A[i,1]) + as.scalar(A[i+1,1]);
    
    #print(b);
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor8.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor8.dml b/src/test/scripts/functions/parfor/parfor8.dml
index 21d96ee..73f1e25 100644
--- a/src/test/scripts/functions/parfor/parfor8.dml
+++ b/src/test/scripts/functions/parfor/parfor8.dml
@@ -25,7 +25,7 @@ a = 1
 
 parfor( i in 2:10 )
 { 
-   b = a + castAsScalar(A[i,1]) + castAsScalar(A[i+1,1]);
+   b = a + as.scalar(A[i,1]) + as.scalar(A[i+1,1]);
    a = i;
   # print(b);
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor9.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor9.dml b/src/test/scripts/functions/parfor/parfor9.dml
index fb50cb7..b9b970f 100644
--- a/src/test/scripts/functions/parfor/parfor9.dml
+++ b/src/test/scripts/functions/parfor/parfor9.dml
@@ -25,7 +25,7 @@ a = 1
 
 parfor( i in 2:10 )
 { 
-   b = a + castAsScalar(A[i,1]) + castAsScalar(A[i-1,1]);
+   b = a + as.scalar(A[i,1]) + as.scalar(A[i-1,1]);
    a = i;
    #print(b);
 }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor_optimizer2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor_optimizer2.dml b/src/test/scripts/functions/parfor/parfor_optimizer2.dml
index e6007af..bc8cdc0 100644
--- a/src/test/scripts/functions/parfor/parfor_optimizer2.dml
+++ b/src/test/scripts/functions/parfor/parfor_optimizer2.dml
@@ -73,14 +73,14 @@ dummy = matrix(1, rows=1, cols=1);
 
 
 parfor( i in 1:s1size, check=0, opt=RULEBASED) {
-    a1 = castAsScalar(S1[,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, check=0) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
     
         if (k1 == k2) {
@@ -233,7 +233,7 @@ computeRanks = function(Matrix[Double] X) return (Matrix[Double] Ranks) {
         if( i>1 ){
            prefixSum = sum(X[1:(i-1),1]);
         } 
-        Rks[i,1] = dummy * (prefixSum + ((castAsScalar(X[i,1])+1)/2));
+        Rks[i,1] = dummy * (prefixSum + ((as.scalar(X[i,1])+1)/2));
     }
     Ranks = Rks;
 }
@@ -268,7 +268,7 @@ bivar_oo = function(Matrix[Double] A, Matrix[Double] B) return (Double sp) {
 
     covXY = 0.0;
     for(i in 1:catA) {
-        covXY = covXY + sum((F[i,]/(W-1)) * (castAsScalar(C[i,1])-meanX) * (t(D[,1])-meanY));
+        covXY = covXY + sum((F[i,]/(W-1)) * (as.scalar(C[i,1])-meanX) * (t(D[,1])-meanY));
     }
 
     sp = covXY/(sqrt(varX)*sqrt(varY));

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor_threadid_recompile1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor_threadid_recompile1.dml b/src/test/scripts/functions/parfor/parfor_threadid_recompile1.dml
index 1995f9d..0370865 100644
--- a/src/test/scripts/functions/parfor/parfor_threadid_recompile1.dml
+++ b/src/test/scripts/functions/parfor/parfor_threadid_recompile1.dml
@@ -29,17 +29,17 @@ bin_defns = matrix(0, rows=num_bin_defns, cols=2)
 attr2pos = matrix(0, rows=nrow(A), cols=2)
 pos = 1
 for(i in 1:nrow(A)){
-	number_of_bins = castAsScalar(A[i,1])
+	number_of_bins = as.scalar(A[i,1])
 	attr2pos[i,1] = pos
 	attr2pos[i,2] = pos + number_of_bins - 1
 	pos = pos + number_of_bins
 }
 
 for(i in 1:nrow(A), check=0){
-	num_bins = castAsScalar(A[i,1])
+	num_bins = as.scalar(A[i,1])
 	
-	start_position = castAsScalar(attr2pos[i,1])
-	end_position = castAsScalar(attr2pos[i,2])
+	start_position = as.scalar(attr2pos[i,1])
+	end_position = as.scalar(attr2pos[i,2])
 	
 	#SEQ CALL 1
 	bin_defns[start_position:end_position,1] = seq(1, num_bins, 1)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/parfor/parfor_threadid_recompile2.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/parfor/parfor_threadid_recompile2.dml b/src/test/scripts/functions/parfor/parfor_threadid_recompile2.dml
index ab89580..7ce8360 100644
--- a/src/test/scripts/functions/parfor/parfor_threadid_recompile2.dml
+++ b/src/test/scripts/functions/parfor/parfor_threadid_recompile2.dml
@@ -29,17 +29,17 @@ bin_defns = matrix(0, rows=num_bin_defns, cols=2)
 attr2pos = matrix(0, rows=nrow(A), cols=2)
 pos = 1
 for(i in 1:nrow(A)){
-	number_of_bins = castAsScalar(A[i,1])
+	number_of_bins = as.scalar(A[i,1])
 	attr2pos[i,1] = pos
 	attr2pos[i,2] = pos + number_of_bins - 1
 	pos = pos + number_of_bins
 }
 
 parfor(i in 1:nrow(A), check=0){
-	num_bins = castAsScalar(A[i,1])
+	num_bins = as.scalar(A[i,1])
 	
-	start_position = castAsScalar(attr2pos[i,1])
-	end_position = castAsScalar(attr2pos[i,2])
+	start_position = as.scalar(attr2pos[i,1])
+	end_position = as.scalar(attr2pos[i,2])
 	
 	#SEQ CALL 1
 	bin_defns[start_position:end_position,1] = seq(1, num_bins, 1)

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/recompile/for_recompile.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/recompile/for_recompile.dml b/src/test/scripts/functions/recompile/for_recompile.dml
index 96d7dac..8947b99 100644
--- a/src/test/scripts/functions/recompile/for_recompile.dml
+++ b/src/test/scripts/functions/recompile/for_recompile.dml
@@ -22,7 +22,7 @@
 
 V = Rand(rows=$1+1, cols=$2+1, min=$3, max=$3);
 Z = Rand(rows=1,cols=1,min=0,max=0);
-for( i in $3:castAsScalar(V[1,1]) )
+for( i in $3:as.scalar(V[1,1]) )
 {
    Z[1,1] = V[1,1]; 
 }  

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/recompile/if_recompile.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/recompile/if_recompile.dml b/src/test/scripts/functions/recompile/if_recompile.dml
index 2d02e01..91f435b 100644
--- a/src/test/scripts/functions/recompile/if_recompile.dml
+++ b/src/test/scripts/functions/recompile/if_recompile.dml
@@ -22,7 +22,7 @@
 
 V = Rand(rows=$1+1, cols=$2+1, min=$3, max=$3);
 Z = Rand(rows=1,cols=1,min=0,max=0);
-if( castAsScalar(V[1,1])>castAsScalar(Z[1,1]) )
+if( as.scalar(V[1,1])>as.scalar(Z[1,1]) )
 {
    Z[1,1] = V[1,1]; 
 }  

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/recompile/parfor_recompile.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/recompile/parfor_recompile.dml b/src/test/scripts/functions/recompile/parfor_recompile.dml
index 5e14440..f8223e0 100644
--- a/src/test/scripts/functions/recompile/parfor_recompile.dml
+++ b/src/test/scripts/functions/recompile/parfor_recompile.dml
@@ -22,7 +22,7 @@
 
 V = Rand(rows=$1+1, cols=$2+1, min=$3, max=$3);
 Z = Rand(rows=1,cols=1,min=0,max=0);
-parfor( i in $3:castAsScalar(V[1,1]), check=0 )
+parfor( i in $3:as.scalar(V[1,1]), check=0 )
 {
    Z[1,1] = V[1,1]; 
 }  

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/recompile/while_recompile.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/recompile/while_recompile.dml b/src/test/scripts/functions/recompile/while_recompile.dml
index 05dd424..de74b0d 100644
--- a/src/test/scripts/functions/recompile/while_recompile.dml
+++ b/src/test/scripts/functions/recompile/while_recompile.dml
@@ -22,7 +22,7 @@
 
 V = Rand(rows=$1+1, cols=$2+1, min=$3, max=$3);
 Z = Rand(rows=1,cols=1,min=0,max=0);
-while( castAsScalar(V[1,1])>castAsScalar(Z[1,1]) )
+while( as.scalar(V[1,1])>as.scalar(Z[1,1]) )
 {
    Z[1,1] = V[1,1]; 
 }  

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/unary/matrix/CastAsScalarTest.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/unary/matrix/CastAsScalarTest.dml b/src/test/scripts/functions/unary/matrix/CastAsScalarTest.dml
index b566e44..7731cac 100644
--- a/src/test/scripts/functions/unary/matrix/CastAsScalarTest.dml
+++ b/src/test/scripts/functions/unary/matrix/CastAsScalarTest.dml
@@ -24,6 +24,6 @@
 $$readhelper$$
 
 A = read("$$indir$$a", rows=1, cols=1, format="text");
-b = castAsScalar(A);
+b = as.scalar(A);
 BHelper = b * Helper;
 write(BHelper, "$$outdir$$b", format="text");
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/unary/matrix/eigen.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/unary/matrix/eigen.dml b/src/test/scripts/functions/unary/matrix/eigen.dml
index e4c7c46..12a2f87 100644
--- a/src/test/scripts/functions/unary/matrix/eigen.dml
+++ b/src/test/scripts/functions/unary/matrix/eigen.dml
@@ -40,7 +40,7 @@ numEval = $2;
 D = matrix(1, numEval, 1);
 for ( i in 1:numEval ) {
     Av = A %*% evec[,i];
-    rhs = castAsScalar(eval[i,1]) * evec[,i];
+    rhs = as.scalar(eval[i,1]) * evec[,i];
     diff = sum(Av-rhs);
     D[i,1] = diff;
 }
@@ -49,7 +49,7 @@ for ( i in 1:numEval ) {
 # TODO: dummy if() must be removed
 v = evec[,1];
 Av = A %*% v;
-rhs = castAsScalar(eval[1,1]) * evec[,1];
+rhs = as.scalar(eval[1,1]) * evec[,1];
 diff = sum(Av-rhs);
 
 D = matrix(1,1,1);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/functions/unary/matrix/qr.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/unary/matrix/qr.dml b/src/test/scripts/functions/unary/matrix/qr.dml
index 184ff80..5454685 100644
--- a/src/test/scripts/functions/unary/matrix/qr.dml
+++ b/src/test/scripts/functions/unary/matrix/qr.dml
@@ -38,7 +38,7 @@ eye = diag(ones);
 Q = eye;
 for( j in n:1 ) {
     v = H[,j];
-    Qj = eye - 2 * (v %*% t(v))/castAsScalar((t(v)%*%v));
+    Qj = eye - 2 * (v %*% t(v))/as.scalar((t(v)%*%v));
     Q = Qj %*% Q;
 }
 


[2/2] incubator-systemml git commit: [SYSTEMML-647] Replace castAsScalar calls

Posted by de...@apache.org.
[SYSTEMML-647] Replace castAsScalar calls

Replace castAsScalar() with as.scalar() in DML.
Replace castAsScalar() with scalar() in PYDML.

Closes #136.


Project: http://git-wip-us.apache.org/repos/asf/incubator-systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-systemml/commit/2da81457
Tree: http://git-wip-us.apache.org/repos/asf/incubator-systemml/tree/2da81457
Diff: http://git-wip-us.apache.org/repos/asf/incubator-systemml/diff/2da81457

Branch: refs/heads/master
Commit: 2da8145744f52d725e56e883997ef9ad7206a9b3
Parents: 7013910
Author: Deron Eriksson <de...@us.ibm.com>
Authored: Wed May 4 18:10:24 2016 -0700
Committer: Deron Eriksson <de...@us.ibm.com>
Committed: Wed May 4 18:11:05 2016 -0700

----------------------------------------------------------------------
 scripts/algorithms/GLM-predict.dml              |  18 ++--
 scripts/algorithms/GLM.dml                      |   6 +-
 scripts/algorithms/Kmeans-predict.dml           |  36 +++----
 scripts/algorithms/Kmeans.dml                   |   8 +-
 scripts/algorithms/StepGLM.dml                  |   6 +-
 scripts/algorithms/Univar-Stats.dml             |   4 +-
 scripts/algorithms/bivar-stats.dml              |  20 ++--
 scripts/algorithms/l2-svm-predict.dml           |   2 +-
 scripts/algorithms/m-svm.dml                    |   2 +-
 scripts/algorithms/stratstats.dml               |   6 +-
 scripts/datagen/genRandData4ChisquaredTest.dml  |   4 +-
 scripts/datagen/genRandData4FTest.dml           |   6 +-
 .../datagen/genRandData4LinearReg_LTstats.dml   |  10 +-
 scripts/datagen/genRandData4LogReg_LTstats.dml  |  10 +-
 scripts/datagen/genRandData4NMF.dml             |   8 +-
 scripts/datagen/genRandData4NMFBlockwise.dml    |   8 +-
 .../apply-transform/apply-transform.dml         |  38 +++----
 .../apply-transform/apply-transform.pydml       |  38 +++----
 .../applications/arima_box-jenkins/arima.dml    |  20 ++--
 .../applications/arima_box-jenkins/arima.pydml  |  20 ++--
 .../applications/cspline/CsplineCG.pydml        |   2 +-
 .../applications/cspline/CsplineDS.pydml        |   2 +-
 .../scripts/applications/ctableStats/ctci.dml   |   2 +-
 .../applications/ctableStats/stratstats.dml     |   6 +-
 .../applications/ctableStats/wilson_score.dml   |  38 +++----
 .../applications/descriptivestats/OddsRatio.dml |   8 +-
 src/test/scripts/applications/glm/GLM.dml       |   6 +-
 src/test/scripts/applications/glm/GLM.pydml     |   6 +-
 src/test/scripts/applications/id3/id3.dml       |  24 ++---
 src/test/scripts/applications/id3/id3.pydml     |  24 ++---
 .../applications/impute/imputeGaussMCMC.dml     |  54 +++++-----
 .../impute/imputeGaussMCMC.nogradient.dml       |  52 +++++-----
 .../applications/impute/old/imputeGaussMCMC.dml |  42 ++++----
 src/test/scripts/applications/impute/tmp.dml    |   8 +-
 .../impute/wfundInputGenerator1.dml             |   2 +-
 .../impute/wfundInputGenerator2.dml             |  24 ++---
 .../applications/linearLogReg/LinearLogReg.dml  |  34 +++----
 .../linearLogReg/LinearLogReg.pydml             |  34 +++----
 .../linear_regression/LinearRegression.dml      |   2 +-
 src/test/scripts/applications/m-svm/m-svm.dml   |   2 +-
 src/test/scripts/applications/m-svm/m-svm.pydml |   2 +-
 .../applications/mdabivar/MDABivariateStats.dml |  10 +-
 .../mdabivar/MDABivariateStats.pydml            |  10 +-
 .../applications/parfor/parfor_bivariate0.dml   |   8 +-
 .../applications/parfor/parfor_bivariate1.dml   |   8 +-
 .../applications/parfor/parfor_bivariate2.dml   |   8 +-
 .../applications/parfor/parfor_bivariate3.dml   |   8 +-
 .../applications/parfor/parfor_bivariate4.dml   |   8 +-
 .../applications/parfor/parfor_univariate0.dml  |   2 +-
 .../applications/parfor/parfor_univariate1.dml  |   2 +-
 .../applications/parfor/parfor_univariate4.dml  |   2 +-
 .../validation/CV_LogisticRegression.dml        | 100 +++++++++----------
 .../validation/CV_MultiClassSVM.sasha.dml       |  12 +--
 .../validation/LinearLogisticRegression.dml     |  34 +++----
 .../genRandData4LogisticRegression.dml          |   8 +-
 src/test/scripts/functions/gdfo/LinregCG.dml    |   2 +-
 .../functions/jmlc/reuse-glm-predict.dml        |  18 ++--
 .../functions/misc/IPAUnknownRecursion.dml      |   4 +-
 .../scripts/functions/misc/dt_change_4b.dml     |   2 +-
 .../scripts/functions/misc/dt_change_4c.dml     |   2 +-
 .../scripts/functions/misc/dt_change_4f.dml     |   2 +-
 src/test/scripts/functions/parfor/parfor35.dml  |   2 +-
 src/test/scripts/functions/parfor/parfor48b.dml |   2 +-
 src/test/scripts/functions/parfor/parfor6.dml   |   2 +-
 src/test/scripts/functions/parfor/parfor7.dml   |   2 +-
 src/test/scripts/functions/parfor/parfor8.dml   |   2 +-
 src/test/scripts/functions/parfor/parfor9.dml   |   2 +-
 .../functions/parfor/parfor_optimizer2.dml      |  12 +--
 .../parfor/parfor_threadid_recompile1.dml       |   8 +-
 .../parfor/parfor_threadid_recompile2.dml       |   8 +-
 .../functions/recompile/for_recompile.dml       |   2 +-
 .../functions/recompile/if_recompile.dml        |   2 +-
 .../functions/recompile/parfor_recompile.dml    |   2 +-
 .../functions/recompile/while_recompile.dml     |   2 +-
 .../functions/unary/matrix/CastAsScalarTest.dml |   2 +-
 .../scripts/functions/unary/matrix/eigen.dml    |   4 +-
 src/test/scripts/functions/unary/matrix/qr.dml  |   2 +-
 77 files changed, 475 insertions(+), 475 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/GLM-predict.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/GLM-predict.dml b/scripts/algorithms/GLM-predict.dml
index 5e998e3..355928b 100644
--- a/scripts/algorithms/GLM-predict.dml
+++ b/scripts/algorithms/GLM-predict.dml
@@ -312,15 +312,15 @@ if (fileY != " ")
     str = append (str, "DEVIANCE_G2_PVAL,,TRUE," + G2_scaled_pValue);
 
     for (i in 1:ncol(Y)) {
-        str = append (str, "AVG_TOT_Y," + i + ",," + castAsScalar (avg_tot_Y [1, i]));
-        str = append (str, "STDEV_TOT_Y," + i + ",," + castAsScalar (sqrt (var_tot_Y [1, i])));
-        str = append (str, "AVG_RES_Y," + i + ",," + castAsScalar (avg_res_Y [1, i]));
-        str = append (str, "STDEV_RES_Y," + i + ",," + castAsScalar (sqrt (var_res_Y [1, i])));
-        str = append (str, "PRED_STDEV_RES," + i + ",TRUE," + castAsScalar (sqrt (predicted_avg_var_res_Y [1, i])));
-        str = append (str, "PLAIN_R2," + i + ",," + castAsScalar (plain_R2 [1, i]));
-        str = append (str, "ADJUSTED_R2," + i + ",," + castAsScalar (adjust_R2 [1, i]));
-        str = append (str, "PLAIN_R2_NOBIAS," + i + ",," + castAsScalar (plain_R2_nobias [1, i]));
-        str = append (str, "ADJUSTED_R2_NOBIAS," + i + ",," + castAsScalar (adjust_R2_nobias [1, i]));
+        str = append (str, "AVG_TOT_Y," + i + ",," + as.scalar (avg_tot_Y [1, i]));
+        str = append (str, "STDEV_TOT_Y," + i + ",," + as.scalar (sqrt (var_tot_Y [1, i])));
+        str = append (str, "AVG_RES_Y," + i + ",," + as.scalar (avg_res_Y [1, i]));
+        str = append (str, "STDEV_RES_Y," + i + ",," + as.scalar (sqrt (var_res_Y [1, i])));
+        str = append (str, "PRED_STDEV_RES," + i + ",TRUE," + as.scalar (sqrt (predicted_avg_var_res_Y [1, i])));
+        str = append (str, "PLAIN_R2," + i + ",," + as.scalar (plain_R2 [1, i]));
+        str = append (str, "ADJUSTED_R2," + i + ",," + as.scalar (adjust_R2 [1, i]));
+        str = append (str, "PLAIN_R2_NOBIAS," + i + ",," + as.scalar (plain_R2_nobias [1, i]));
+        str = append (str, "ADJUSTED_R2_NOBIAS," + i + ",," + as.scalar (adjust_R2_nobias [1, i]));
     }
     
     if (fileO != " ") {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/GLM.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/GLM.dml b/scripts/algorithms/GLM.dml
index 32a55f8..16008e6 100644
--- a/scripts/algorithms/GLM.dml
+++ b/scripts/algorithms/GLM.dml
@@ -453,7 +453,7 @@ if (intercept_status == 2) {
 write (beta_out, fileB, format=fmtB);
 
 if (intercept_status == 1 | intercept_status == 2) {
-    intercept_value = castAsScalar (beta_out [num_features, 1]);
+    intercept_value = as.scalar (beta_out [num_features, 1]);
     beta_noicept = beta_out [1 : (num_features - 1), 1];
 } else {
     beta_noicept = beta_out [1 : num_features, 1];
@@ -461,9 +461,9 @@ if (intercept_status == 1 | intercept_status == 2) {
 min_beta = min (beta_noicept);
 max_beta = max (beta_noicept);
 tmp_i_min_beta = rowIndexMin (t(beta_noicept))
-i_min_beta = castAsScalar (tmp_i_min_beta [1, 1]);
+i_min_beta = as.scalar (tmp_i_min_beta [1, 1]);
 tmp_i_max_beta = rowIndexMax (t(beta_noicept))
-i_max_beta = castAsScalar (tmp_i_max_beta [1, 1]);
+i_max_beta = as.scalar (tmp_i_max_beta [1, 1]);
 
 #####  OVER-DISPERSION PART  #####
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/Kmeans-predict.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/Kmeans-predict.dml b/scripts/algorithms/Kmeans-predict.dml
index 5bd78bd..3823234 100644
--- a/scripts/algorithms/Kmeans-predict.dml
+++ b/scripts/algorithms/Kmeans-predict.dml
@@ -266,19 +266,19 @@ if (num_records != nrow (prY) | ncol (spY) != 1 | ncol (prY) != 1) {
     
     for (i in 1 : nrow (spY_cids))
     {
-        cid = as.integer (castAsScalar (spY_cids [i, 1]));
-        pct = castAsScalar (rounded_percentages [i, 1]);
+        cid = as.integer (as.scalar (spY_cids [i, 1]));
+        pct = as.scalar (rounded_percentages [i, 1]);
         space_pct = "";  if (pct < 100) {space_pct = " ";}  if (pct < 10) {space_pct = "  ";}
         print ("Category " + cid + 
-            ":  best pred. cluster is " + as.integer (castAsScalar (prY_cids [i, 1])) + 
-            ";  full count = " + as.integer (castAsScalar (full_counts [i, 1])) + 
+            ":  best pred. cluster is " + as.integer (as.scalar (prY_cids [i, 1])) + 
+            ";  full count = " + as.integer (as.scalar (full_counts [i, 1])) + 
             ",  matching count = " + space_pct + pct + "% (" +
-            as.integer (castAsScalar (matching_counts [i, 1])) + ")");
+            as.integer (as.scalar (matching_counts [i, 1])) + ")");
             
-        str = append (str, "SPEC_TO_PRED,"  + cid + "," + castAsScalar (prY_cids [i, 1]));
-        str = append (str, "SPEC_FULL_CT,"  + cid + "," + castAsScalar (full_counts [i, 1]));
-        str = append (str, "SPEC_MATCH_CT," + cid + "," + castAsScalar (matching_counts [i, 1]));
-        str = append (str, "SPEC_MATCH_PC," + cid + "," + castAsScalar (rounded_percentages [i, 1]));
+        str = append (str, "SPEC_TO_PRED,"  + cid + "," + as.scalar (prY_cids [i, 1]));
+        str = append (str, "SPEC_FULL_CT,"  + cid + "," + as.scalar (full_counts [i, 1]));
+        str = append (str, "SPEC_MATCH_CT," + cid + "," + as.scalar (matching_counts [i, 1]));
+        str = append (str, "SPEC_MATCH_PC," + cid + "," + as.scalar (rounded_percentages [i, 1]));
     }
 
     [prY_cids, spY_cids, full_counts, matching_counts, rounded_percentages] =
@@ -292,19 +292,19 @@ if (num_records != nrow (prY) | ncol (spY) != 1 | ncol (prY) != 1) {
     
     for (i in 1 : nrow (prY_cids))
     {
-        cid = as.integer (castAsScalar (prY_cids [i, 1]));
-        pct = castAsScalar (rounded_percentages [i, 1]);
+        cid = as.integer (as.scalar (prY_cids [i, 1]));
+        pct = as.scalar (rounded_percentages [i, 1]);
         space_pct = "";  if (pct < 100) {space_pct = " ";}  if (pct < 10) {space_pct = "  ";}
         print ("Cluster " + cid + 
-            ":  best spec. categ. is " + as.integer (castAsScalar (spY_cids [i, 1])) + 
-            ";  full count = " + as.integer (castAsScalar (full_counts [i, 1])) + 
+            ":  best spec. categ. is " + as.integer (as.scalar (spY_cids [i, 1])) + 
+            ";  full count = " + as.integer (as.scalar (full_counts [i, 1])) + 
             ",  matching count = " + space_pct + pct + "% (" +
-            as.integer (castAsScalar (matching_counts [i, 1])) + ")");
+            as.integer (as.scalar (matching_counts [i, 1])) + ")");
 
-        str = append (str, "PRED_TO_SPEC,"  + cid + "," + castAsScalar (spY_cids [i, 1]));
-        str = append (str, "PRED_FULL_CT,"  + cid + "," + castAsScalar (full_counts [i, 1]));
-        str = append (str, "PRED_MATCH_CT," + cid + "," + castAsScalar (matching_counts [i, 1]));
-        str = append (str, "PRED_MATCH_PC," + cid + "," + castAsScalar (rounded_percentages [i, 1]));
+        str = append (str, "PRED_TO_SPEC,"  + cid + "," + as.scalar (spY_cids [i, 1]));
+        str = append (str, "PRED_FULL_CT,"  + cid + "," + as.scalar (full_counts [i, 1]));
+        str = append (str, "PRED_MATCH_CT," + cid + "," + as.scalar (matching_counts [i, 1]));
+        str = append (str, "PRED_MATCH_PC," + cid + "," + as.scalar (rounded_percentages [i, 1]));
     }
 
     print (" ");

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/Kmeans.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/Kmeans.dml b/scripts/algorithms/Kmeans.dml
index 2887baa..8717473 100644
--- a/scripts/algorithms/Kmeans.dml
+++ b/scripts/algorithms/Kmeans.dml
@@ -190,11 +190,11 @@ termination_bitmap = matrix (0, rows = num_runs, cols = 3);
 termination_bitmap_raw = table (seq (1, num_runs, 1), termination_code);
 termination_bitmap [, 1 : ncol(termination_bitmap_raw)] = termination_bitmap_raw;
 termination_stats = colSums (termination_bitmap);
-print ("Number of successful runs = " + as.integer (castAsScalar (termination_stats [1, 1])));
-print ("Number of incomplete runs = " + as.integer (castAsScalar (termination_stats [1, 2])));
-print ("Number of failed runs (with lost centroids) = " + as.integer (castAsScalar (termination_stats [1, 3])));
+print ("Number of successful runs = " + as.integer (as.scalar (termination_stats [1, 1])));
+print ("Number of incomplete runs = " + as.integer (as.scalar (termination_stats [1, 2])));
+print ("Number of failed runs (with lost centroids) = " + as.integer (as.scalar (termination_stats [1, 3])));
 
-num_successful_runs = castAsScalar (termination_stats [1, 1]);
+num_successful_runs = as.scalar (termination_stats [1, 1]);
 if (num_successful_runs > 0) {
     final_wcss_successful = final_wcss * termination_bitmap [, 1];
     worst_wcss = max (final_wcss_successful);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/StepGLM.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/StepGLM.dml b/scripts/algorithms/StepGLM.dml
index 10737ff..a8c8820 100644
--- a/scripts/algorithms/StepGLM.dml
+++ b/scripts/algorithms/StepGLM.dml
@@ -498,7 +498,7 @@ glm = function (Matrix[Double] X, Matrix[Double] Y, Int intercept_status, Double
 
 								
                 if (intercept_status == 1 | intercept_status == 2) {
-					intercept_value = castAsScalar (beta_out [num_features, 1]);
+					intercept_value = as.scalar (beta_out [num_features, 1]);
                     beta_noicept = beta_out [1 : (num_features - 1), 1];
                 } else {
 					beta_noicept = beta_out [1 : num_features, 1];
@@ -506,9 +506,9 @@ glm = function (Matrix[Double] X, Matrix[Double] Y, Int intercept_status, Double
                 min_beta = min (beta_noicept);
                 max_beta = max (beta_noicept);
                 tmp_i_min_beta = rowIndexMin (t(beta_noicept))
-                i_min_beta = castAsScalar (tmp_i_min_beta [1, 1]);
+                i_min_beta = as.scalar (tmp_i_min_beta [1, 1]);
                 tmp_i_max_beta = rowIndexMax (t(beta_noicept))
-                i_max_beta = castAsScalar (tmp_i_max_beta [1, 1]);
+                i_max_beta = as.scalar (tmp_i_max_beta [1, 1]);
                         
                 #####  OVER-DISPERSION PART  #####
                       

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/Univar-Stats.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/Univar-Stats.dml b/scripts/algorithms/Univar-Stats.dml
index 404e002..525118c 100644
--- a/scripts/algorithms/Univar-Stats.dml
+++ b/scripts/algorithms/Univar-Stats.dml
@@ -69,7 +69,7 @@ parfor(i in 1:n, check=0) {
 	# project out the i^th column
 	F = A[,i];
 
-	kind = castAsScalar(K[1,i]);
+	kind = as.scalar(K[1,i]);
 
 	if ( kind == 1 ) {
 		#print("[" + i + "] Scale");
@@ -149,7 +149,7 @@ parfor(i in 1:n, check=0) {
 if (consoleOutput == TRUE) {
 	for(i in 1:n) {
 		print("-------------------------------------------------");
-		kind = castAsScalar(K[1,i]);
+		kind = as.scalar(K[1,i]);
 		if (kind == 1) {
 			print("Feature [" + i + "]: Scale");
 			print(" (01) Minimum             | " + as.scalar(baseStats[1,i]));

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/bivar-stats.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/bivar-stats.dml b/scripts/algorithms/bivar-stats.dml
index 99549dc..8f7b6c1 100644
--- a/scripts/algorithms/bivar-stats.dml
+++ b/scripts/algorithms/bivar-stats.dml
@@ -67,13 +67,13 @@ num_nominal_scale_tests = 0
 
 pair2row = matrix(0, rows=numPairs, cols=2)
 for( i in 1:s1size, check=0) {
-    pre_a1 = castAsScalar(S1[1,i]);
-    pre_k1 = castAsScalar(K1[1,i]);
+    pre_a1 = as.scalar(S1[1,i]);
+    pre_k1 = as.scalar(K1[1,i]);
 
     for( j in 1:s2size, check=0) {
         pre_pairID = (i-1)*s2size+j; 
-        pre_a2 = castAsScalar(S2[1,j]);
-        pre_k2 = castAsScalar(K2[1,j]);
+        pre_a2 = as.scalar(S2[1,j]);
+        pre_k2 = as.scalar(K2[1,j]);
 	
 	if (pre_k1 == pre_k2) {
             if (pre_k1 == 1) {
@@ -167,18 +167,18 @@ maxDomain = as.integer(maxDomainSize);
 if(error_flag) stop(debug_str);
 
 parfor( i in 1:s1size, check=0) {
-    a1 = castAsScalar(S1[1,i]);
-    k1 = castAsScalar(K1[1,i]);
+    a1 = as.scalar(S1[1,i]);
+    k1 = as.scalar(K1[1,i]);
     A1 = D[,a1];
 
     parfor( j in 1:s2size, check=0) {
         pairID = (i-1)*s2size+j; 
-        a2 = castAsScalar(S2[1,j]);
-        k2 = castAsScalar(K2[1,j]);
+        a2 = as.scalar(S2[1,j]);
+        k2 = as.scalar(K2[1,j]);
         A2 = D[,a2];
 
-		rowid1 = castAsScalar(pair2row[pairID, 1])
-    	rowid2 = castAsScalar(pair2row[pairID, 2])
+		rowid1 = as.scalar(pair2row[pairID, 1])
+    	rowid2 = as.scalar(pair2row[pairID, 2])
 
         if (k1 == k2) {
             if (k1 == 1) {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/l2-svm-predict.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/l2-svm-predict.dml b/scripts/algorithms/l2-svm-predict.dml
index a4d6fff..cace79f 100644
--- a/scripts/algorithms/l2-svm-predict.dml
+++ b/scripts/algorithms/l2-svm-predict.dml
@@ -54,7 +54,7 @@ w = w[1:(nrow(w)-4),]
 
 b = 0.0
 if(intercept == 1)
-	b = castAsScalar(w[nrow(w),1])
+	b = as.scalar(w[nrow(w),1])
 
 scores = b + (X %*% w[1:ncol(X),])
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/m-svm.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/m-svm.dml b/scripts/algorithms/m-svm.dml
index 4142ac1..4224d26 100644
--- a/scripts/algorithms/m-svm.dml
+++ b/scripts/algorithms/m-svm.dml
@@ -170,7 +170,7 @@ write(w, $model, format=cmdLine_fmt)
 debug_str = "# Class, Iter, Obj"
 for(iter_class in 1:ncol(debug_mat)){
 	for(iter in 1:nrow(debug_mat)){
-		obj = castAsScalar(debug_mat[iter, iter_class])
+		obj = as.scalar(debug_mat[iter, iter_class])
 		if(obj != -1) 
 			debug_str = append(debug_str, iter_class + "," + iter + "," + obj)
 	}

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/algorithms/stratstats.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/stratstats.dml b/scripts/algorithms/stratstats.dml
index 2b7425d..d380220 100644
--- a/scripts/algorithms/stratstats.dml
+++ b/scripts/algorithms/stratstats.dml
@@ -375,9 +375,9 @@ fStat_tailprob = function (Matrix[double] fStat, Matrix[double] df_1, Matrix[dou
     tailprob = fStat;
     for (i in 1:nrow(fStat)) {
       for (j in 1:ncol(fStat)) {
-        q = castAsScalar (fStat [i, j]);
-        d1 = castAsScalar (df_1 [i, j]);
-        d2 = castAsScalar (df_2 [i, j]);
+        q = as.scalar (fStat [i, j]);
+        d1 = as.scalar (df_1 [i, j]);
+        d2 = as.scalar (df_2 [i, j]);
         if (d1 >= 1 & d2 >= 1 & q >= 0.0) {
             tailprob  [i, j] = pf(target = q, df1 = d1, df2 = d2, lower.tail=FALSE);
         } else {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4ChisquaredTest.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4ChisquaredTest.dml b/scripts/datagen/genRandData4ChisquaredTest.dml
index e25adf2..4709843 100644
--- a/scripts/datagen/genRandData4ChisquaredTest.dml
+++ b/scripts/datagen/genRandData4ChisquaredTest.dml
@@ -65,14 +65,14 @@ one = Rand(rows=1, cols=1, min=1.0, max=1.0, pdf="uniform", seed=0)
 data = Rand(rows=numSamples, cols=2, min=0.0, max=0.0, pdf="uniform", seed=0)
 parfor(s in 1:numSamples){
 	r_mat = Rand(rows=1, cols=1, min=0.0, max=1.0, pdf="uniform", seed=0)
-	r = castAsScalar(r_mat)
+	r = as.scalar(r_mat)
 
 	cat1 = -1
 	cat2 = -1
 	continue = 1
 	for(i in 1:numCategories1){
 		for(j in 1:numCategories2){
-			cdf = castAsScalar(oCDF[i,j])
+			cdf = as.scalar(oCDF[i,j])
 			if(continue == 1 & r <= cdf){
 				cat1 = i
 				cat2 = j

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4FTest.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4FTest.dml b/scripts/datagen/genRandData4FTest.dml
index bdd33b9..9f0e1d6 100644
--- a/scripts/datagen/genRandData4FTest.dml
+++ b/scripts/datagen/genRandData4FTest.dml
@@ -50,7 +50,7 @@ one = Rand(rows=1, cols=1, min=1.0, max=1.0, pdf="uniform")
 copy_start_index = numActualGroups+1
 parfor(i in copy_start_index:numGroups){
 	r = Rand(rows=1, cols=1, min=1.0, max=numActualGroups, pdf="uniform", seed=0)
-	j = castAsScalar(round(r))
+	j = as.scalar(round(r))
 	permut[j,i] = one
 }
 
@@ -77,12 +77,12 @@ for(i in 2:numGroups){
 data = Rand(rows=numSamples, cols=1, min=0.0, max=0.0, pdf="uniform")
 parfor(i in 1:numSamples){
 	r_mat = Rand(rows=1, cols=1, min=0.0, max=1.0, pdf="uniform", seed=0)
-	r1 = castAsScalar(r_mat)
+	r1 = as.scalar(r_mat)
 
 	g = -1
 	continue = 1
 	for(k in 1:numGroups){
-		cdf = castAsScalar(cntCDFs[k,1])
+		cdf = as.scalar(cntCDFs[k,1])
 		if(continue==1 & r1<=cdf){
 			g = k
 			continue=0

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4LinearReg_LTstats.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4LinearReg_LTstats.dml b/scripts/datagen/genRandData4LinearReg_LTstats.dml
index 0bc187a..e4e8384 100644
--- a/scripts/datagen/genRandData4LinearReg_LTstats.dml
+++ b/scripts/datagen/genRandData4LinearReg_LTstats.dml
@@ -92,13 +92,13 @@ actual_meanLT  = colSums (LT) / numSamples;
 actual_sigmaLT = sqrt (colSums ((LT - ones %*% actual_meanLT)^2) / numSamples);
 
 for (i in 1:(numCategories - 1)) {
-    if (castAsScalar (new_sigmaLT [1, i]) == castAsScalar (sigmaLT [1, i])) {
-        print ("Category " + i + ":  Intercept = " + castAsScalar (b_intercept [1, i])); 
+    if (as.scalar (new_sigmaLT [1, i]) == as.scalar (sigmaLT [1, i])) {
+        print ("Category " + i + ":  Intercept = " + as.scalar (b_intercept [1, i])); 
     } else {
-        print ("Category " + i + ":  Intercept = " + castAsScalar (b_intercept [1, i]) + ",  st.dev.(LT) relaxed from " + castAsScalar (sigmaLT [1, i])); 
+        print ("Category " + i + ":  Intercept = " + as.scalar (b_intercept [1, i]) + ",  st.dev.(LT) relaxed from " + as.scalar (sigmaLT [1, i])); 
     }
-    print ("    Wanted LT mean = " + castAsScalar (meanLT [1, i])        + ",  st.dev. = " + castAsScalar (new_sigmaLT [1, i]));
-    print ("    Actual LT mean = " + castAsScalar (actual_meanLT [1, i]) + ",  st.dev. = " + castAsScalar (actual_sigmaLT [1, i]));
+    print ("    Wanted LT mean = " + as.scalar (meanLT [1, i])        + ",  st.dev. = " + as.scalar (new_sigmaLT [1, i]));
+    print ("    Actual LT mean = " + as.scalar (actual_meanLT [1, i]) + ",  st.dev. = " + as.scalar (actual_sigmaLT [1, i]));
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4LogReg_LTstats.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4LogReg_LTstats.dml b/scripts/datagen/genRandData4LogReg_LTstats.dml
index 2ec5aef..1797f4f 100644
--- a/scripts/datagen/genRandData4LogReg_LTstats.dml
+++ b/scripts/datagen/genRandData4LogReg_LTstats.dml
@@ -91,13 +91,13 @@ actual_meanLT  = colSums (LT) / numSamples;
 actual_sigmaLT = sqrt (colSums ((LT - ones %*% actual_meanLT)^2) / numSamples);
 
 for (i in 1:(numCategories - 1)) {
-    if (castAsScalar (new_sigmaLT [1, i]) == castAsScalar (sigmaLT [1, i])) {
-        print ("Category " + i + ":  Intercept = " + castAsScalar (b_intercept [1, i])); 
+    if (as.scalar (new_sigmaLT [1, i]) == as.scalar (sigmaLT [1, i])) {
+        print ("Category " + i + ":  Intercept = " + as.scalar (b_intercept [1, i])); 
     } else {
-        print ("Category " + i + ":  Intercept = " + castAsScalar (b_intercept [1, i]) + ",  st.dev.(LT) relaxed from " + castAsScalar (sigmaLT [1, i])); 
+        print ("Category " + i + ":  Intercept = " + as.scalar (b_intercept [1, i]) + ",  st.dev.(LT) relaxed from " + as.scalar (sigmaLT [1, i])); 
     }
-    print ("    Wanted LT mean = " + castAsScalar (meanLT [1, i])        + ",  st.dev. = " + castAsScalar (new_sigmaLT [1, i]));
-    print ("    Actual LT mean = " + castAsScalar (actual_meanLT [1, i]) + ",  st.dev. = " + castAsScalar (actual_sigmaLT [1, i]));
+    print ("    Wanted LT mean = " + as.scalar (meanLT [1, i])        + ",  st.dev. = " + as.scalar (new_sigmaLT [1, i]));
+    print ("    Actual LT mean = " + as.scalar (actual_meanLT [1, i]) + ",  st.dev. = " + as.scalar (actual_sigmaLT [1, i]));
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4NMF.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4NMF.dml b/scripts/datagen/genRandData4NMF.dml
index cf18430..87e3f47 100644
--- a/scripts/datagen/genRandData4NMF.dml
+++ b/scripts/datagen/genRandData4NMF.dml
@@ -80,7 +80,7 @@ parfor(i in 1:numDocuments){
 	r_w = Rand(rows=numWordsPerDoc, cols=1, min=0, max=1, pdf="uniform", seed=0)
 	
 	for(j in 1:numWordsPerDoc){
-		rz = castAsScalar(r_z[j,1])
+		rz = as.scalar(r_z[j,1])
 		continue = 1
 		
 		z = -1
@@ -88,7 +88,7 @@ parfor(i in 1:numDocuments){
 		#z=1	
 		
 		for(k1 in 1:numTopics){
-			prob = castAsScalar(docTopic[1,k1])
+			prob = as.scalar(docTopic[1,k1])
 			if(continue==1 & rz <= prob){
 				z=k1
 				continue=0
@@ -100,7 +100,7 @@ parfor(i in 1:numDocuments){
 			z = numTopics
 		}
 		
-		rw = castAsScalar(r_w[j,1])
+		rw = as.scalar(r_w[j,1])
 		continue = 1
 		
 		w = -1
@@ -108,7 +108,7 @@ parfor(i in 1:numDocuments){
 		#w = 1
 		
 		for(k2 in 1:numFeatures){
-			prob = castAsScalar(topicDistributions[z,k2])
+			prob = as.scalar(topicDistributions[z,k2])
 			if(continue == 1 & rw <= prob){
 				w = k2
 				continue = 0

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/scripts/datagen/genRandData4NMFBlockwise.dml
----------------------------------------------------------------------
diff --git a/scripts/datagen/genRandData4NMFBlockwise.dml b/scripts/datagen/genRandData4NMFBlockwise.dml
index e3fd67f..06b8057 100644
--- a/scripts/datagen/genRandData4NMFBlockwise.dml
+++ b/scripts/datagen/genRandData4NMFBlockwise.dml
@@ -88,7 +88,7 @@ for( k in seq(1,numDocuments,blocksize) )
   	r_w = Rand(rows=numWordsPerDoc, cols=1, min=0, max=1, pdf="uniform", seed=0)
   	
   	for(j in 1:numWordsPerDoc){
-  		rz = castAsScalar(r_z[j,1])
+  		rz = as.scalar(r_z[j,1])
   		continue = 1
   		
   		z = -1
@@ -96,7 +96,7 @@ for( k in seq(1,numDocuments,blocksize) )
   		#z=1	
   		
   		for(k1 in 1:numTopics){
-  			prob = castAsScalar(docTopic[1,k1])
+  			prob = as.scalar(docTopic[1,k1])
   			if(continue==1 & rz <= prob){
   				z=k1
   				continue=0
@@ -108,7 +108,7 @@ for( k in seq(1,numDocuments,blocksize) )
   			z = numTopics
   		}
   		
-  		rw = castAsScalar(r_w[j,1])
+  		rw = as.scalar(r_w[j,1])
   		continue = 1
   		
   		w = -1
@@ -116,7 +116,7 @@ for( k in seq(1,numDocuments,blocksize) )
   		#w = 1
   		
   		for(k2 in 1:numFeatures){
-  			prob = castAsScalar(topicDistributions[z,k2])
+  			prob = as.scalar(topicDistributions[z,k2])
   			if(continue == 1 & rw <= prob){
   				w = k2
   				continue = 0

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/apply-transform/apply-transform.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/apply-transform/apply-transform.dml b/src/test/scripts/applications/apply-transform/apply-transform.dml
index fdd85c7..5110cb0 100644
--- a/src/test/scripts/applications/apply-transform/apply-transform.dml
+++ b/src/test/scripts/applications/apply-transform/apply-transform.dml
@@ -52,7 +52,7 @@ if(cmdLine_missing_value_maps != " "){
 	missing_indicator_mat = original_X[,(last_data_col+1):ncol(original_X)]
 	
 	parfor(i in 1:nrow(missing_val_maps), check=0){
-		attr_index_mv = castAsScalar(missing_val_maps[i,1])
+		attr_index_mv = as.scalar(missing_val_maps[i,1])
 		attrinfo[attr_index_mv,1] = i
 		attrinfo[attr_index_mv,2] = missing_val_maps[i,2]
 	}	
@@ -61,7 +61,7 @@ if(cmdLine_missing_value_maps != " "){
 if(cmdLine_bin_defns != " "){
 	bin_defns = read(cmdLine_bin_defns)
 	parfor(i in 1:nrow(bin_defns), check=0){
-		attr_index_bin = castAsScalar(bin_defns[i,1])
+		attr_index_bin = as.scalar(bin_defns[i,1])
 		attrinfo[attr_index_bin,3] = bin_defns[i,4]
 		attrinfo[attr_index_bin,4] = bin_defns[i,2]
 		attrinfo[attr_index_bin,5] = bin_defns[i,3]
@@ -71,7 +71,7 @@ if(cmdLine_bin_defns != " "){
 if(cmdLine_dummy_code_maps != " "){
 	dummy_code_maps = read(cmdLine_dummy_code_maps)
 	parfor(i in 1:nrow(dummy_code_maps), check=0){
-		attr_index_dc = castAsScalar(dummy_code_maps[i,1])
+		attr_index_dc = as.scalar(dummy_code_maps[i,1])
 		attrinfo[attr_index_dc,6] = dummy_code_maps[i,2]
 		attrinfo[attr_index_dc,7] = dummy_code_maps[i,3]
 	}
@@ -83,31 +83,31 @@ if(cmdLine_dummy_code_maps != " "){
 if(cmdLine_normalization_maps != " "){
 	normalization_map = read(cmdLine_normalization_maps)
 	parfor(i in 1:nrow(normalization_map), check=0){
-		attr_index_normalization = castAsScalar(normalization_map[i,1])
+		attr_index_normalization = as.scalar(normalization_map[i,1])
 		attrinfo[attr_index_normalization,8] = 1
-		attrinfo[attr_index_normalization,9] = castAsScalar(normalization_map[i,2])
-		attrinfo[attr_index_normalization,10] = castAsScalar(normalization_map[i,3])
+		attrinfo[attr_index_normalization,9] = as.scalar(normalization_map[i,2])
+		attrinfo[attr_index_normalization,10] = as.scalar(normalization_map[i,3])
 	}
 }
 
 #write(attrinfo, "binning/attrinfo.mtx", format="csv")
 
-cols_in_transformed_X = castAsScalar(attrinfo[nrow(attrinfo),6])
+cols_in_transformed_X = as.scalar(attrinfo[nrow(attrinfo),6])
 new_X = matrix(0, rows=nrow(X), cols=cols_in_transformed_X)
 log = matrix(0, rows=ncol(X), cols=2)
 parfor(i in 1:ncol(X), check=0){
 	col = X[,i]
 	
-	mv_col_id = castAsScalar(attrinfo[i,1])
-	global_mean = castAsScalar(attrinfo[i,2])
-	num_bins = castAsScalar(attrinfo[i,3])
-	bin_width = castAsScalar(attrinfo[i,4])
-	min_val = castAsScalar(attrinfo[i,5])
-	dummy_coding_beg_col = castAsScalar(attrinfo[i,6])
-	dummy_coding_end_col = castAsScalar(attrinfo[i,7])
-	normalization_needed = castAsScalar(attrinfo[i,8])
-	normalization_mean = castAsScalar(attrinfo[i,9])
-	normalization_std = castAsScalar(attrinfo[i,10])
+	mv_col_id = as.scalar(attrinfo[i,1])
+	global_mean = as.scalar(attrinfo[i,2])
+	num_bins = as.scalar(attrinfo[i,3])
+	bin_width = as.scalar(attrinfo[i,4])
+	min_val = as.scalar(attrinfo[i,5])
+	dummy_coding_beg_col = as.scalar(attrinfo[i,6])
+	dummy_coding_end_col = as.scalar(attrinfo[i,7])
+	normalization_needed = as.scalar(attrinfo[i,8])
+	normalization_mean = as.scalar(attrinfo[i,9])
+	normalization_std = as.scalar(attrinfo[i,10])
 	
 	if(mv_col_id > 0){ 
 		# fill-in with global mean
@@ -150,7 +150,7 @@ write(new_X, $transformed_X, format="text")
 
 s = "Warning Messages"
 for(i in 1:nrow(log)){
-	if(castAsScalar(log[i,1]) == 1)
-		s = append(s, "Unseen value in column " + i + " (" + castAsScalar(log[i,2]) + ")")
+	if(as.scalar(log[i,1]) == 1)
+		s = append(s, "Unseen value in column " + i + " (" + as.scalar(log[i,2]) + ")")
 }
 write(s, $Log)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/apply-transform/apply-transform.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/apply-transform/apply-transform.pydml b/src/test/scripts/applications/apply-transform/apply-transform.pydml
index f6c40dd..cc95e85 100644
--- a/src/test/scripts/applications/apply-transform/apply-transform.pydml
+++ b/src/test/scripts/applications/apply-transform/apply-transform.pydml
@@ -52,14 +52,14 @@ if(cmdLine_missing_value_maps != " "):
     missing_indicator_mat = original_X[,(last_data_col+1):ncol(original_X)]
     
     parfor(i in 1:nrow(missing_val_maps), check=0):
-        attr_index_mv = castAsScalar(missing_val_maps[i,1])
+        attr_index_mv = scalar(missing_val_maps[i,1])
         attrinfo[attr_index_mv,1] = i
         attrinfo[attr_index_mv,2] = missing_val_maps[i,2]
     
 if(cmdLine_bin_defns != " "):
     bin_defns = read(cmdLine_bin_defns)
     parfor(i in 1:nrow(bin_defns), check=0):
-        attr_index_bin = castAsScalar(bin_defns[i,1])
+        attr_index_bin = scalar(bin_defns[i,1])
         attrinfo[attr_index_bin,3] = bin_defns[i,4]
         attrinfo[attr_index_bin,4] = bin_defns[i,2]
         attrinfo[attr_index_bin,5] = bin_defns[i,3]
@@ -67,7 +67,7 @@ if(cmdLine_bin_defns != " "):
 if(cmdLine_dummy_code_maps != " "):
     dummy_code_maps = read(cmdLine_dummy_code_maps)
     parfor(i in 1:nrow(dummy_code_maps), check=0):
-        attr_index_dc = castAsScalar(dummy_code_maps[i,1])
+        attr_index_dc = scalar(dummy_code_maps[i,1])
         attrinfo[attr_index_dc,6] = dummy_code_maps[i,2]
         attrinfo[attr_index_dc,7] = dummy_code_maps[i,3]
 else:
@@ -77,29 +77,29 @@ else:
 if(cmdLine_normalization_maps != " "):
     normalization_map = read(cmdLine_normalization_maps)
     parfor(i in 1:nrow(normalization_map), check=0):
-        attr_index_normalization = castAsScalar(normalization_map[i,1])
+        attr_index_normalization = scalar(normalization_map[i,1])
         attrinfo[attr_index_normalization,8] = 1
-        attrinfo[attr_index_normalization,9] = castAsScalar(normalization_map[i,2])
-        attrinfo[attr_index_normalization,10] = castAsScalar(normalization_map[i,3])
+        attrinfo[attr_index_normalization,9] = scalar(normalization_map[i,2])
+        attrinfo[attr_index_normalization,10] = scalar(normalization_map[i,3])
 
 #write(attrinfo, "binning/attrinfo.mtx", format="csv")
 
-cols_in_transformed_X = castAsScalar(attrinfo[nrow(attrinfo),6])
+cols_in_transformed_X = scalar(attrinfo[nrow(attrinfo),6])
 new_X = full(0, rows=nrow(X), cols=cols_in_transformed_X)
 log = full(0, rows=ncol(X), cols=2)
 parfor(i in 1:ncol(X), check=0):
     col = X[,i]
     
-    mv_col_id = castAsScalar(attrinfo[i,1])
-    global_mean = castAsScalar(attrinfo[i,2])
-    num_bins = castAsScalar(attrinfo[i,3])
-    bin_width = castAsScalar(attrinfo[i,4])
-    min_val = castAsScalar(attrinfo[i,5])
-    dummy_coding_beg_col = castAsScalar(attrinfo[i,6])
-    dummy_coding_end_col = castAsScalar(attrinfo[i,7])
-    normalization_needed = castAsScalar(attrinfo[i,8])
-    normalization_mean = castAsScalar(attrinfo[i,9])
-    normalization_std = castAsScalar(attrinfo[i,10])
+    mv_col_id = scalar(attrinfo[i,1])
+    global_mean = scalar(attrinfo[i,2])
+    num_bins = scalar(attrinfo[i,3])
+    bin_width = scalar(attrinfo[i,4])
+    min_val = scalar(attrinfo[i,5])
+    dummy_coding_beg_col = scalar(attrinfo[i,6])
+    dummy_coding_end_col = scalar(attrinfo[i,7])
+    normalization_needed = scalar(attrinfo[i,8])
+    normalization_mean = scalar(attrinfo[i,9])
+    normalization_std = scalar(attrinfo[i,10])
     
     if(mv_col_id > 0):
         # fill-in with global mean
@@ -140,7 +140,7 @@ save(new_X, $transformed_X, format="text")
 
 s = "Warning Messages"
 for(i in 1:nrow(log)):
-    if(castAsScalar(log[i,1]) == 1):
-        s = append(s, "Unseen value in column " + i + " (" + castAsScalar(log[i,2]) + ")")
+    if(scalar(log[i,1]) == 1):
+        s = append(s, "Unseen value in column " + i + " (" + scalar(log[i,2]) + ")")
 
 save(s, $Log)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/arima_box-jenkins/arima.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/arima_box-jenkins/arima.dml b/src/test/scripts/applications/arima_box-jenkins/arima.dml
index 73052e0..e21b75e 100644
--- a/src/test/scripts/applications/arima_box-jenkins/arima.dml
+++ b/src/test/scripts/applications/arima_box-jenkins/arima.dml
@@ -43,14 +43,14 @@ arima_css = function(Matrix[Double] w, Matrix[Double] X, Integer pIn, Integer P,
 		ma_ind_ns = P+pIn+i7
 		err_ind_ns = i7
 		ones_ns = Rand(rows=nrow(R)-err_ind_ns, cols=1, min=1, max=1)
-		d_ns = ones_ns * castAsScalar(w[ma_ind_ns,1])
+		d_ns = ones_ns * as.scalar(w[ma_ind_ns,1])
 		R[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] = R[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] + diag(d_ns)
 	}
 	for(i8 in 1:Q){
 		ma_ind_s = P+pIn+qIn+i8
 		err_ind_s = s*i8
 		ones_s = Rand(rows=nrow(R)-err_ind_s, cols=1, min=1, max=1)
-		d_s = ones_s * castAsScalar(w[ma_ind_s,1])
+		d_s = ones_s * as.scalar(w[ma_ind_s,1])
 		R[1+err_ind_s:nrow(R),1:ncol(R)-err_ind_s] = R[1+err_ind_s:nrow(R),1:ncol(R)-err_ind_s] + diag(d_s)
 	}
 	
@@ -91,7 +91,7 @@ arima_css = function(Matrix[Double] w, Matrix[Double] X, Integer pIn, Integer P,
   		}
   		while(iter < max_iter & continue == 1){
   			q = Z%*%p
-  			alpha = norm_r2 / castAsScalar(t(p) %*% q)
+  			alpha = norm_r2 / as.scalar(t(p) %*% q)
   			y_hat = y_hat + alpha * p
   			old_norm_r2 = norm_r2
   			r = r + alpha * q
@@ -203,26 +203,26 @@ parfor(i3 in 1:ncol(simplex)){
 }
 num_func_invoc = num_func_invoc + ncol(simplex)
 
-tol = 1.5 * 10^(-8) * castAsScalar(objvals[1,1])
+tol = 1.5 * 10^(-8) * as.scalar(objvals[1,1])
 
 continue = 1
 while(continue == 1 & num_func_invoc <= max_func_invoc) {
 	best_index = 1
 	worst_index = 1
 	for(i in 2:ncol(objvals)){
-		this = castAsScalar(objvals[1,i])
-		that = castAsScalar(objvals[1,best_index])
+		this = as.scalar(objvals[1,i])
+		that = as.scalar(objvals[1,best_index])
   		if(that > this){
     		best_index = i
   		}
-  		that = castAsScalar(objvals[1,worst_index])
+  		that = as.scalar(objvals[1,worst_index])
   		if(that < this){
     		worst_index = i
   		}
 	}
 	
-	best_obj_val = castAsScalar(objvals[1,best_index])
-	worst_obj_val = castAsScalar(objvals[1,worst_index])
+	best_obj_val = as.scalar(objvals[1,best_index])
+	worst_obj_val = as.scalar(objvals[1,worst_index])
 	if(worst_obj_val <= best_obj_val + tol){
 		continue = 0
 	}
@@ -257,7 +257,7 @@ while(continue == 1 & num_func_invoc <= max_func_invoc) {
 		obj_x_c_in = arima_css(x_c_in, Z, p, P, q, Q, s, useJacobi)
 		num_func_invoc = num_func_invoc + 1
 		
-		if(obj_x_c_in < castAsScalar(objvals[1,worst_index])){
+		if(obj_x_c_in < as.scalar(objvals[1,worst_index])){
 			simplex[,worst_index] = x_c_in
 			objvals[1,worst_index] = obj_x_c_in
 		}else{

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/arima_box-jenkins/arima.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/arima_box-jenkins/arima.pydml b/src/test/scripts/applications/arima_box-jenkins/arima.pydml
index 9b3387c..1df70d1 100644
--- a/src/test/scripts/applications/arima_box-jenkins/arima.pydml
+++ b/src/test/scripts/applications/arima_box-jenkins/arima.pydml
@@ -43,13 +43,13 @@ def arima_css(w:matrix[float], X:matrix[float], pIn: int, P: int, qIn: int, Q:in
         ma_ind_ns = P+pIn+i7
         err_ind_ns = i7
         ones_ns = Rand(rows=nrow(R)-err_ind_ns, cols=1, min=1, max=1)
-        d_ns = ones_ns * castAsScalar(w[ma_ind_ns,1])
+        d_ns = ones_ns * scalar(w[ma_ind_ns,1])
         R[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] = R[1+err_ind_ns:nrow(R),1:ncol(R)-err_ind_ns] + diag(d_ns)
     for(i8 in 1:Q):
         ma_ind_s = P+pIn+qIn+i8
         err_ind_s = s*i8
         ones_s = Rand(rows=nrow(R)-err_ind_s, cols=1, min=1, max=1)
-        d_s = ones_s * castAsScalar(w[ma_ind_s,1])
+        d_s = ones_s * scalar(w[ma_ind_s,1])
         R[1+err_ind_s:nrow(R),1:ncol(R)-err_ind_s] = R[1+err_ind_s:nrow(R),1:ncol(R)-err_ind_s] + diag(d_s)
     
     #checking for strict diagonal dominance
@@ -86,7 +86,7 @@ def arima_css(w:matrix[float], X:matrix[float], pIn: int, P: int, qIn: int, Q:in
         while(iter < max_iter & continue == 1):
             q = dot(Z, p)
             transpose_p = transpose(p)
-            alpha = norm_r2 / castAsScalar(dot(transpose_p, q))
+            alpha = norm_r2 / scalar(dot(transpose_p, q))
             y_hat = y_hat + alpha * p
             old_norm_r2 = norm_r2
             r = r + alpha * q
@@ -187,23 +187,23 @@ parfor(i3 in 1:ncol(simplex)):
 
 num_func_invoc = num_func_invoc + ncol(simplex)
 
-tol = 1.5 * (10**-8) * castAsScalar(objvals[1,1])
+tol = 1.5 * (10**-8) * scalar(objvals[1,1])
 
 continue = 1
 while(continue == 1 & num_func_invoc <= max_func_invoc):
     best_index = 1
     worst_index = 1
     for(i in 2:ncol(objvals)):
-        this = castAsScalar(objvals[1,i])
-        that = castAsScalar(objvals[1,best_index])
+        this = scalar(objvals[1,i])
+        that = scalar(objvals[1,best_index])
         if(that > this):
             best_index = i
-        that = castAsScalar(objvals[1,worst_index])
+        that = scalar(objvals[1,worst_index])
         if(that < this):
             worst_index = i
     
-    best_obj_val = castAsScalar(objvals[1,best_index])
-    worst_obj_val = castAsScalar(objvals[1,worst_index])
+    best_obj_val = scalar(objvals[1,best_index])
+    worst_obj_val = scalar(objvals[1,worst_index])
     if(worst_obj_val <= best_obj_val + tol):
         continue = 0
     
@@ -235,7 +235,7 @@ while(continue == 1 & num_func_invoc <= max_func_invoc):
         obj_x_c_in = arima_css(x_c_in, Z, p, P, q, Q, s, useJacobi)
         num_func_invoc = num_func_invoc + 1
         
-        if(obj_x_c_in < castAsScalar(objvals[1,worst_index])):
+        if(obj_x_c_in < scalar(objvals[1,worst_index])):
             simplex[,worst_index] = x_c_in
             objvals[1,worst_index] = obj_x_c_in
         else:

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/cspline/CsplineCG.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineCG.pydml b/src/test/scripts/applications/cspline/CsplineCG.pydml
index 1c8daad..29c55a1 100644
--- a/src/test/scripts/applications/cspline/CsplineCG.pydml
+++ b/src/test/scripts/applications/cspline/CsplineCG.pydml
@@ -155,7 +155,7 @@ def interpSpline(x: float, X: matrix[float], Y: matrix[float], K: matrix[float])
     
     qm = (1-t)*Y[i-1,1] + t*Y[i,1] + t*(1-t)*(a*(1-t)+b*t)
     
-    q = castAsScalar(qm)
+    q = scalar(qm)
 
 #solve Ax = b
 #   for CG our formulation is

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/cspline/CsplineDS.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/cspline/CsplineDS.pydml b/src/test/scripts/applications/cspline/CsplineDS.pydml
index 2b865b3..40847a7 100644
--- a/src/test/scripts/applications/cspline/CsplineDS.pydml
+++ b/src/test/scripts/applications/cspline/CsplineDS.pydml
@@ -134,7 +134,7 @@ def interpSpline(x: float, X: matrix[float], Y: matrix[float], K: matrix[float])
     
     qm = (1-t)*Y[i-1,1] + t*Y[i,1] + t*(1-t)*(a*(1-t)+b*t)
     
-    q = castAsScalar(qm)
+    q = scalar(qm)
 
 #
 # trunc the matrix by the specified amount in the specified direction.

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/ctableStats/ctci.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/ctci.dml b/src/test/scripts/applications/ctableStats/ctci.dml
index 4aa3ae3..f545138 100644
--- a/src/test/scripts/applications/ctableStats/ctci.dml
+++ b/src/test/scripts/applications/ctableStats/ctci.dml
@@ -112,7 +112,7 @@ for (iLabel in 1:numLabels)
         
         print ("    (partition & label) / (all label) ratios...");
         
-        cntThisLabel = zeros + castAsScalar (cntLabels [iLabel, 1]);
+        cntThisLabel = zeros + as.scalar (cntLabels [iLabel, 1]);
         [ratio2, left_conf_wilson2, right_conf_wilson2] = 
             wilson_confidence (cntThisLabel, cntPartitionsWithLabel);
         [left_conf_exact2] = binomQuantile (cntThisLabel, cntPartitionsWithLabel_minus_1, big_alpha);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/ctableStats/stratstats.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/stratstats.dml b/src/test/scripts/applications/ctableStats/stratstats.dml
index 7eb1858..5d190e7 100644
--- a/src/test/scripts/applications/ctableStats/stratstats.dml
+++ b/src/test/scripts/applications/ctableStats/stratstats.dml
@@ -317,9 +317,9 @@ fStat_tailprob = function (Matrix[double] fStat, Matrix[double] df_1, Matrix[dou
     tailprob = fStat;
     for (i in 1:nrow(fStat)) {
       for (j in 1:ncol(fStat)) {
-        q = castAsScalar (fStat [i, j]);
-        d1 = castAsScalar (df_1 [i, j]);
-        d2 = castAsScalar (df_2 [i, j]);
+        q = as.scalar (fStat [i, j]);
+        d1 = as.scalar (df_1 [i, j]);
+        d2 = as.scalar (df_2 [i, j]);
         if (d1 >= 1 & d2 >= 1 & q >= 0.0) {
             tailprob  [i, j] = pf (target = q, df1 = d1, df2 = d2, lower.tail=FALSE);
         } else {

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/ctableStats/wilson_score.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/ctableStats/wilson_score.dml b/src/test/scripts/applications/ctableStats/wilson_score.dml
index 27d0899..51e31b1 100644
--- a/src/test/scripts/applications/ctableStats/wilson_score.dml
+++ b/src/test/scripts/applications/ctableStats/wilson_score.dml
@@ -37,7 +37,7 @@ source ("Binomial.dml");
 # test_m = Rand (rows = 1, cols = 1, min = 0, max = 0);
 # test_p = Rand (rows = 1, cols = 1, min = 0.00421, max = 0.00421);
 # [alpha] = binomProb (test_n, test_m, test_p);
-# print ("TEST:  Prob [Binom (" + castAsScalar (test_n) + ", " + castAsScalar (test_p) + ") <= " + castAsScalar (test_m) + "]  =  " + castAsScalar (alpha));
+# print ("TEST:  Prob [Binom (" + as.scalar (test_n) + ", " + as.scalar (test_p) + ") <= " + as.scalar (test_m) + "]  =  " + as.scalar (alpha));
 
 print ("BEGIN WILSON SCORE SCRIPT");
 print ("Reading X...");
@@ -87,39 +87,39 @@ result [, 15] = r_m_n_exact;
 print ("M / sum(M)  RESULTS:  Wilson, Exact");
 
 for (i in 1:num_rows) {
-    p1  = castAsScalar (round (result [i,  1] * 100000) / 1000);
-    lw1 = castAsScalar (round (result [i,  2] * 100000) / 1000);
-    rw1 = castAsScalar (round (result [i,  3] * 100000) / 1000);
-    le1 = castAsScalar (round (result [i,  4] * 100000) / 1000);
-    re1 = castAsScalar (round (result [i,  5] * 100000) / 1000);
+    p1  = as.scalar (round (result [i,  1] * 100000) / 1000);
+    lw1 = as.scalar (round (result [i,  2] * 100000) / 1000);
+    rw1 = as.scalar (round (result [i,  3] * 100000) / 1000);
+    le1 = as.scalar (round (result [i,  4] * 100000) / 1000);
+    re1 = as.scalar (round (result [i,  5] * 100000) / 1000);
     print ("Row " + i + ":   "
-        + castAsScalar (M [i, 1]) + "/" + castAsScalar (sum_M [i, 1]) + " = " 
+        + as.scalar (M [i, 1]) + "/" + as.scalar (sum_M [i, 1]) + " = " 
         + p1 + "%  [" + lw1 + "%, " + rw1 + "%]   [" + le1 + "%, " + re1 + "%]");
 }
 
 print ("N / sum(N)  RESULTS:  Wilson, Exact");
 
 for (i in 1:num_rows) {
-    p2  = castAsScalar (round (result [i,  6] * 100000) / 1000);
-    lw2 = castAsScalar (round (result [i,  7] * 100000) / 1000);
-    rw2 = castAsScalar (round (result [i,  8] * 100000) / 1000);
-    le2 = castAsScalar (round (result [i,  9] * 100000) / 1000);
-    re2 = castAsScalar (round (result [i, 10] * 100000) / 1000);
+    p2  = as.scalar (round (result [i,  6] * 100000) / 1000);
+    lw2 = as.scalar (round (result [i,  7] * 100000) / 1000);
+    rw2 = as.scalar (round (result [i,  8] * 100000) / 1000);
+    le2 = as.scalar (round (result [i,  9] * 100000) / 1000);
+    re2 = as.scalar (round (result [i, 10] * 100000) / 1000);
     print ("Row " + i + ":   "
-        + castAsScalar (N [i, 1]) + "/" + castAsScalar (sum_N [i, 1]) + " = " 
+        + as.scalar (N [i, 1]) + "/" + as.scalar (sum_N [i, 1]) + " = " 
         + p2 + "%  [" + lw2 + "%, " + rw2 + "%]   [" + le2 + "%, " + re2 + "%]   ");
 }
 
 print ("M / N  RESULTS:  Wilson, Exact");
 
 for (i in 1:num_rows) {
-    p3  = castAsScalar (round (result [i, 11] * 100000) / 1000);
-    lw3 = castAsScalar (round (result [i, 12] * 100000) / 1000);
-    rw3 = castAsScalar (round (result [i, 13] * 100000) / 1000);
-    le3 = castAsScalar (round (result [i, 14] * 100000) / 1000);
-    re3 = castAsScalar (round (result [i, 15] * 100000) / 1000);
+    p3  = as.scalar (round (result [i, 11] * 100000) / 1000);
+    lw3 = as.scalar (round (result [i, 12] * 100000) / 1000);
+    rw3 = as.scalar (round (result [i, 13] * 100000) / 1000);
+    le3 = as.scalar (round (result [i, 14] * 100000) / 1000);
+    re3 = as.scalar (round (result [i, 15] * 100000) / 1000);
     print ("Row " + i + ":   "
-        + castAsScalar (M [i, 1]) + "/" + castAsScalar (    N [i, 1]) + " = " 
+        + as.scalar (M [i, 1]) + "/" + as.scalar (    N [i, 1]) + " = " 
         + p3 + "%  [" + lw3 + "%, " + rw3 + "%]   [" + le3 + "%, " + re3 + "%]   ");
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/descriptivestats/OddsRatio.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/descriptivestats/OddsRatio.dml b/src/test/scripts/applications/descriptivestats/OddsRatio.dml
index ae52e03..3d8c84b 100644
--- a/src/test/scripts/applications/descriptivestats/OddsRatio.dml
+++ b/src/test/scripts/applications/descriptivestats/OddsRatio.dml
@@ -61,10 +61,10 @@ else {
 
 # Given a 2x2 contingency table, it computes oddsRatio and the corresponding confidence interval
 pair_corr = function(Matrix[Double] A) return (Double oddsRatio, Double left_conf, Double right_conf, Double sd, Double chisquared, Double pvalue, Double crv, Double sigma_away, Double df) {
-	a11 = castAsScalar(A[1,1]);
-	a12 = castAsScalar(A[1,2]);
-	a21 = castAsScalar(A[2,1]);
-	a22 = castAsScalar(A[2,2]);
+	a11 = as.scalar(A[1,1]);
+	a12 = as.scalar(A[1,2]);
+	a21 = as.scalar(A[2,1]);
+	a22 = as.scalar(A[2,2]);
 
 	sd = sqrt(1/a11 + 1/a12 + 1/a21 + 1/a22);
 	oddsRatio = (a11*a22)/(a12*a21);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/glm/GLM.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/glm/GLM.dml b/src/test/scripts/applications/glm/GLM.dml
index 32a55f8..16008e6 100644
--- a/src/test/scripts/applications/glm/GLM.dml
+++ b/src/test/scripts/applications/glm/GLM.dml
@@ -453,7 +453,7 @@ if (intercept_status == 2) {
 write (beta_out, fileB, format=fmtB);
 
 if (intercept_status == 1 | intercept_status == 2) {
-    intercept_value = castAsScalar (beta_out [num_features, 1]);
+    intercept_value = as.scalar (beta_out [num_features, 1]);
     beta_noicept = beta_out [1 : (num_features - 1), 1];
 } else {
     beta_noicept = beta_out [1 : num_features, 1];
@@ -461,9 +461,9 @@ if (intercept_status == 1 | intercept_status == 2) {
 min_beta = min (beta_noicept);
 max_beta = max (beta_noicept);
 tmp_i_min_beta = rowIndexMin (t(beta_noicept))
-i_min_beta = castAsScalar (tmp_i_min_beta [1, 1]);
+i_min_beta = as.scalar (tmp_i_min_beta [1, 1]);
 tmp_i_max_beta = rowIndexMax (t(beta_noicept))
-i_max_beta = castAsScalar (tmp_i_max_beta [1, 1]);
+i_max_beta = as.scalar (tmp_i_max_beta [1, 1]);
 
 #####  OVER-DISPERSION PART  #####
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/glm/GLM.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/glm/GLM.pydml b/src/test/scripts/applications/glm/GLM.pydml
index cb63302..e737a13 100644
--- a/src/test/scripts/applications/glm/GLM.pydml
+++ b/src/test/scripts/applications/glm/GLM.pydml
@@ -435,7 +435,7 @@ if (is_supported == 1):
         save (beta_out, fileB, format=fmtB)
         
         if (intercept_status == 1 | intercept_status == 2):
-            intercept_value = castAsScalar (beta_out [num_features, 1])
+            intercept_value = scalar (beta_out [num_features, 1])
             beta_noicept = beta_out [1 : (num_features - 1), 1]
         else:
             beta_noicept = beta_out [1 : num_features, 1]
@@ -443,9 +443,9 @@ if (is_supported == 1):
         min_beta = min (beta_noicept)
         max_beta = max (beta_noicept)
         tmp_i_min_beta = rowIndexMin (transpose(beta_noicept))
-        i_min_beta = castAsScalar (tmp_i_min_beta [1, 1])
+        i_min_beta = scalar (tmp_i_min_beta [1, 1])
         tmp_i_max_beta = rowIndexMax (transpose(beta_noicept))
-        i_max_beta = castAsScalar (tmp_i_max_beta [1, 1])
+        i_max_beta = scalar (tmp_i_max_beta [1, 1])
         
         #####  OVER-DISPERSION PART  #####
         

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/id3/id3.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/id3/id3.dml b/src/test/scripts/applications/id3/id3.dml
index e033ca2..a127fc8 100644
--- a/src/test/scripts/applications/id3/id3.dml
+++ b/src/test/scripts/applications/id3/id3.dml
@@ -107,7 +107,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 	num_samples = sum(X_subset)
 	
 	print("num non zero labels: " + num_non_zero_labels)
-	mpl = castAsScalar(most_popular_label)
+	mpl = as.scalar(most_popular_label)
 	print("most popular label: " + mpl)
 	print("num remaining attrs: " + num_remaining_attrs)
 	
@@ -135,14 +135,14 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		sz = nrow(attributes)
 		gains = matrix(0, rows=sz, cols=1)
 		for(i in 1:nrow(attributes)){
-			if(castAsScalar(attributes[i,1]) == 1){
+			if(as.scalar(attributes[i,1]) == 1){
 				attr_vals = X[,i]
 				attr_domain = aggregate(target=X_subset, groups=attr_vals, fn="sum")
 
 				hxt_vector = matrix(0, rows=nrow(attr_domain), cols=1)
 				
         for(j in 1:nrow(attr_domain), check=0){
-					if(castAsScalar(attr_domain[j,1]) != 0){
+					if(as.scalar(attr_domain[j,1]) != 0){
 						val = j
 						Tj = X_subset * ppred(X[,i], val, "==")
 						
@@ -168,8 +168,8 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		max_gain = 0
 		for(i4 in 1:nrow(gains)){
 			#print("best attr " + best_attr + " max gain " + max_gain)
-			if(castAsScalar(attributes[i4,1]) == 1){
-				g = castAsScalar(gains[i4,1])
+			if(as.scalar(attributes[i4,1]) == 1){
+				g = as.scalar(gains[i4,1])
 				if(best_attr == -1 | max_gain <= g){
 					max_gain = g
 					best_attr = i4
@@ -212,7 +212,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 				start_pt = 1+(i1-1)*max_sz_subtree
 				tempNodeStore[,start_pt:(start_pt+nrow(nodesi)-1)] = t(nodesi)
 				numSubtreeNodes[i1,1] = nrow(nodesi)
-				if(nrow(edgesi)!=1 | ncol(edgesi)!=1 | castAsScalar(edgesi[1,1])!=-1){
+				if(nrow(edgesi)!=1 | ncol(edgesi)!=1 | as.scalar(edgesi[1,1])!=-1){
 					tempEdgeStore[,start_pt:(start_pt+nrow(edgesi)-1)] = t(edgesi)
 					numSubtreeEdges[i1,1] = nrow(edgesi)
 				}else{
@@ -239,7 +239,7 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		edges = matrix(1, rows=sz, cols=3)
 		numEdges = 0
 		for(i6 in 1:nrow(attr_domain)){
-			num_nodesi = castAsScalar(numSubtreeNodes[i6,1])
+			num_nodesi = as.scalar(numSubtreeNodes[i6,1])
 			if(num_nodesi > 0){
 				edges[numEdges+1,2] = i6
 				numEdges = numEdges + 1
@@ -248,13 +248,13 @@ id3_learn = function(Matrix[Double] X, Matrix[Double] y, Matrix[Double] X_subset
 		
 		nonEmptyAttri = 0
 		for(i7 in 1:nrow(attr_domain)){
-			numNodesInSubtree = castAsScalar(numSubtreeNodes[i7,1])
+			numNodesInSubtree = as.scalar(numSubtreeNodes[i7,1])
 		
 			if(numNodesInSubtree > 0){
 				start_pt1 = 1 + (i7-1)*max_sz_subtree
 				nodes[numNodes+1:numNodes+numNodesInSubtree,] = t(tempNodeStore[,start_pt1:(start_pt1+numNodesInSubtree-1)])
 			
-				numEdgesInSubtree = castAsScalar(numSubtreeEdges[i7,1])
+				numEdgesInSubtree = as.scalar(numSubtreeEdges[i7,1])
 			
 				if(numEdgesInSubtree!=0){
 					edgesi1 = t(tempEdgeStore[,start_pt1:(start_pt1+numEdgesInSubtree-1)])
@@ -298,9 +298,9 @@ y = y + labelCorrection + 0
 nodes[,2] = nodes[,2] - labelCorrection * ppred(nodes[,1], -1, "==")
 for(i3 in 1:nrow(edges)){
 #parfor(i3 in 1:nrow(edges)){
-	e_parent = castAsScalar(edges[i3,1])
-	parent_feature = castAsScalar(nodes[e_parent,1])
-	correction = castAsScalar(featureCorrections[1,parent_feature])
+	e_parent = as.scalar(edges[i3,1])
+	parent_feature = as.scalar(nodes[e_parent,1])
+	correction = as.scalar(featureCorrections[1,parent_feature])
 	edges[i3,2] = edges[i3,2] - correction
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/id3/id3.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/id3/id3.pydml b/src/test/scripts/applications/id3/id3.pydml
index a59e901..17a10e4 100644
--- a/src/test/scripts/applications/id3/id3.pydml
+++ b/src/test/scripts/applications/id3/id3.pydml
@@ -107,7 +107,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
     num_samples = sum(X_subset)
     
     print("num non zero labels: " + num_non_zero_labels)
-    mpl = castAsScalar(most_popular_label)
+    mpl = scalar(most_popular_label)
     print("most popular label: " + mpl)
     print("num remaining attrs: " + num_remaining_attrs)
     
@@ -135,14 +135,14 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         sz = nrow(attributes)
         gains = full(0, rows=sz, cols=1)
         for(i in 1:nrow(attributes)):
-            if(castAsScalar(attributes[i,1]) == 1):
+            if(scalar(attributes[i,1]) == 1):
                 attr_vals = X[,i]
                 attr_domain = aggregate(target=X_subset, groups=attr_vals, fn="sum")
                 
                 hxt_vector = full(0, rows=nrow(attr_domain), cols=1)
                 
                 for(j in 1:nrow(attr_domain), check=0):
-                    if(castAsScalar(attr_domain[j,1]) != 0):
+                    if(scalar(attr_domain[j,1]) != 0):
                         val = j
                         Tj = X_subset * ppred(X[,i], val, "==")
                         
@@ -164,8 +164,8 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         max_gain = 0
         for(i4 in 1:nrow(gains)):
             #print("best attr " + best_attr + " max gain " + max_gain)
-            if(castAsScalar(attributes[i4,1]) == 1):
-                g = castAsScalar(gains[i4,1])
+            if(scalar(attributes[i4,1]) == 1):
+                g = scalar(gains[i4,1])
                 if(best_attr == -1 | max_gain <= g):
                     max_gain = g
                     best_attr = i4
@@ -203,7 +203,7 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
                 start_pt = 1+(i1-1)*max_sz_subtree
                 tempNodeStore[,start_pt:(start_pt+nrow(nodesi)-1)] = t(nodesi)
                 numSubtreeNodes[i1,1] = nrow(nodesi)
-                if(nrow(edgesi)!=1 | ncol(edgesi)!=1 | castAsScalar(edgesi[1,1])!=-1):
+                if(nrow(edgesi)!=1 | ncol(edgesi)!=1 | scalar(edgesi[1,1])!=-1):
                     tempEdgeStore[,start_pt:(start_pt+nrow(edgesi)-1)] = t(edgesi)
                     numSubtreeEdges[i1,1] = nrow(edgesi)
                 else:
@@ -227,20 +227,20 @@ def id3_learn(X:matrix[float], y:matrix[float], X_subset:matrix[float], attribut
         edges = full(1, rows=sz, cols=3)
         numEdges = 0
         for(i6 in 1:nrow(attr_domain)):
-            num_nodesi = castAsScalar(numSubtreeNodes[i6,1])
+            num_nodesi = scalar(numSubtreeNodes[i6,1])
             if(num_nodesi > 0):
                 edges[numEdges+1,2] = i6
                 numEdges = numEdges + 1
         
         nonEmptyAttri = 0
         for(i7 in 1:nrow(attr_domain)):
-            numNodesInSubtree = castAsScalar(numSubtreeNodes[i7,1])
+            numNodesInSubtree = scalar(numSubtreeNodes[i7,1])
             
             if(numNodesInSubtree > 0):
                 start_pt1 = 1 + (i7-1)*max_sz_subtree
                 nodes[numNodes+1:numNodes+numNodesInSubtree,] = transpose(tempNodeStore[,start_pt1:(start_pt1+numNodesInSubtree-1)])
                 
-                numEdgesInSubtree = castAsScalar(numSubtreeEdges[i7,1])
+                numEdgesInSubtree = scalar(numSubtreeEdges[i7,1])
                 
                 if(numEdgesInSubtree!=0):
                     edgesi1 = transpose(tempEdgeStore[,start_pt1:(start_pt1+numEdgesInSubtree-1)])
@@ -279,9 +279,9 @@ y = y + labelCorrection + 0
 nodes[,2] = nodes[,2] - labelCorrection * ppred(nodes[,1], -1, "==")
 for(i3 in 1:nrow(edges)):
 #parfor(i3 in 1:nrow(edges)):
-    e_parent = castAsScalar(edges[i3,1])
-    parent_feature = castAsScalar(nodes[e_parent,1])
-    correction = castAsScalar(featureCorrections[1,parent_feature])
+    e_parent = scalar(edges[i3,1])
+    parent_feature = scalar(nodes[e_parent,1])
+    correction = scalar(featureCorrections[1,parent_feature])
     edges[i3,2] = edges[i3,2] - correction
 
 save(nodes, $3, format="text")

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/imputeGaussMCMC.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/imputeGaussMCMC.dml b/src/test/scripts/applications/impute/imputeGaussMCMC.dml
index 21ecaee..6575d3b 100644
--- a/src/test/scripts/applications/impute/imputeGaussMCMC.dml
+++ b/src/test/scripts/applications/impute/imputeGaussMCMC.dml
@@ -179,7 +179,7 @@ while (is_opt_converged == 0)
         }
         q = t(gradient_change_p);
         
-        alpha = norm_r2 / castAsScalar (t(p) %*% q);
+        alpha = norm_r2 / as.scalar (t(p) %*% q);
         shift_vector_change = alpha * p;
         shift_vector = shift_vector + shift_vector_change;
         old_norm_r2 = norm_r2;
@@ -286,7 +286,7 @@ while (is_enough_gradient_descent == 0)
         q_frees  = t(gradientInFrees_eps_p  - gradientInFrees)  / cg_eps;
         q_params = t(gradientInParams_eps_p - gradientInParams) / cg_eps;
         
-        alpha = norm_r2 / castAsScalar (t(p_frees) %*% q_frees + t(p_params) %*% q_params);
+        alpha = norm_r2 / as.scalar (t(p_frees) %*% q_frees + t(p_params) %*% q_params);
 
         shift_frees  = shift_frees  + alpha * p_frees;
         shift_params = shift_params + alpha * p_params;
@@ -475,8 +475,8 @@ left_swap  = round (0.5 + dim_sample * rnd);
 rnd = Rand (rows = num_swaps, cols = 1, min = 0.0, max = 1.0);
 right_swap = round (0.5 + dim_sample * rnd);
 for (swap_i in 1:num_swaps) {
-    l = castAsScalar (left_swap  [swap_i, 1]);
-    r = castAsScalar (right_swap [swap_i, 1]);
+    l = as.scalar (left_swap  [swap_i, 1]);
+    r = as.scalar (right_swap [swap_i, 1]);
     if (l != r) {
         tmp_row = SampleOrder [l, ];
         SampleOrder [l, ] = SampleOrder [r, ];
@@ -560,7 +560,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
     
     # Create a normally distributed random sample
     
-    dim_half_sample = castAsScalar (round (dim_sample / 2 + 0.1 + zero));
+    dim_half_sample = as.scalar (round (dim_sample / 2 + 0.1 + zero));
     rnd1 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd2 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd_normal_1 = sqrt (- 2.0 * log (rnd1)) * sin (2 * pi * rnd2);
@@ -583,7 +583,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
     {
         # Generate the sample unit-vector and updaters
         
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars_updater = SampleOrder [1 : num_frees, idx];
             regresValues_updater = RegresValueMap %*% CReps %*% freeVars_updater;
             bilinear_updater_vector = regresValues_updater * regresParams;
@@ -607,18 +607,18 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
             
         mean_shift  = - coeff_b / (2.0 * coeff_a);
         sigma_shift = 1.0 / sqrt (2.0 * coeff_a);
-        shift = mean_shift + sigma_shift * castAsScalar (rnd_normal [idx, 1]);
+        shift = mean_shift + sigma_shift * as.scalar (rnd_normal [idx, 1]);
             
 # BEGIN DEBUG INSERT
 # mmm = 1;
-# if (castAsScalar (isVar [1, idx]) > 0.5 &          # IT IS A FREE VARIABLE, NOT A PARAMETER
-#     castAsScalar (freeVars_updater [mmm, 1]) > 0)  # IT IS mmm-TH FREE VARIABLE
+# if (as.scalar (isVar [1, idx]) > 0.5 &          # IT IS A FREE VARIABLE, NOT A PARAMETER
+#     as.scalar (freeVars_updater [mmm, 1]) > 0)  # IT IS mmm-TH FREE VARIABLE
 # {
 # #   print ("freeVars[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a + ",   coeff_b = " + coeff_b);
 #     print ("freeVars[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   mean_shift = " + mean_shift + ",   sigma_shift = " + sigma_shift + ",   shift = " + shift);
 # }
-# if (castAsScalar (isVar [1, idx]) <= 0.5 &       # IT IS A PARAMETER, NOT A FREE VARIABLE
-#     castAsScalar (params_updater [mmm, 1]) > 0)  # IT IS mmm-TH PARAMETER
+# if (as.scalar (isVar [1, idx]) <= 0.5 &       # IT IS A PARAMETER, NOT A FREE VARIABLE
+#     as.scalar (params_updater [mmm, 1]) > 0)  # IT IS mmm-TH PARAMETER
 # {
 # #   print ("  params[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a + ",   coeff_b = " + coeff_b);
 #     print ("  params[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   mean_shift = " + mean_shift + ",   sigma_shift = " + sigma_shift + ",   shift = " + shift);
@@ -628,7 +628,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
         # Perform the updates
 
         bilinear_form = bilinear_form + shift * bilinear_updater;
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars = freeVars + shift * freeVars_updater;
             regresValues = regresValues + shift * regresValues_updater;
         } else {
@@ -653,21 +653,21 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
         sum_of_observed_losses = sum_of_observed_losses + bilinear_form_value;
     }
 
-# v1 =castAsScalar(round(10000*reports[1 + (num_terms - 1) * num_attrs, 1])/10000);
-# v2 =castAsScalar(round(10000*reports[2 + (num_terms - 1) * num_attrs, 1])/10000);
-# v3 =castAsScalar(round(10000*reports[3 + (num_terms - 1) * num_attrs, 1])/10000);
-# v4 =castAsScalar(round(10000*reports[4 + (num_terms - 1) * num_attrs, 1])/10000);
-# w1 =castAsScalar(round(10000*reports_matrix[ 1,num_terms])/10000);
-# w2 =castAsScalar(round(10000*reports_matrix[ 2,num_terms])/10000);
-# w3 =castAsScalar(round(10000*reports_matrix[ 3,num_terms])/10000);
-# w4 =castAsScalar(round(10000*reports_matrix[ 4,num_terms])/10000);
-
-# v5 =castAsScalar(round(reports_matrix[ 5,num_terms]));
-# v8 =castAsScalar(round(reports_matrix[ 8,num_terms]));
-# v9 =castAsScalar(round(reports_matrix[ 9,num_terms]));
-# v10=castAsScalar(round(reports_matrix[10,num_terms]));
-# v16=castAsScalar(round(reports_matrix[16,num_terms]));
-# v19=castAsScalar(round(reports_matrix[19,num_terms]));
+# v1 =as.scalar(round(10000*reports[1 + (num_terms - 1) * num_attrs, 1])/10000);
+# v2 =as.scalar(round(10000*reports[2 + (num_terms - 1) * num_attrs, 1])/10000);
+# v3 =as.scalar(round(10000*reports[3 + (num_terms - 1) * num_attrs, 1])/10000);
+# v4 =as.scalar(round(10000*reports[4 + (num_terms - 1) * num_attrs, 1])/10000);
+# w1 =as.scalar(round(10000*reports_matrix[ 1,num_terms])/10000);
+# w2 =as.scalar(round(10000*reports_matrix[ 2,num_terms])/10000);
+# w3 =as.scalar(round(10000*reports_matrix[ 3,num_terms])/10000);
+# w4 =as.scalar(round(10000*reports_matrix[ 4,num_terms])/10000);
+
+# v5 =as.scalar(round(reports_matrix[ 5,num_terms]));
+# v8 =as.scalar(round(reports_matrix[ 8,num_terms]));
+# v9 =as.scalar(round(reports_matrix[ 9,num_terms]));
+# v10=as.scalar(round(reports_matrix[10,num_terms]));
+# v16=as.scalar(round(reports_matrix[16,num_terms]));
+# v19=as.scalar(round(reports_matrix[19,num_terms]));
 
 #print (" Sample = 1:" + v1 + ", 2:" + v2 + ", 3:" + v3 + ", 4:" + v4);
 ## + ", 5:" + v5 + ", 8:" + v8 + ", 9:" + v9 + ", 10:" + v10 + ", 16:" + v16 + ", 19:" + v19);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml b/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
index 00210c5..897fc21 100644
--- a/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
+++ b/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
@@ -181,7 +181,7 @@ while (is_opt_converged == 0)
             q [i, 1] = (quadratic_plus_both - quadratic_plus_1 - quadratic_plus_p + bilinear_form_value) + q [i, 1];
         }
         
-        alpha = norm_r2 / castAsScalar (t(p) %*% q);
+        alpha = norm_r2 / as.scalar (t(p) %*% q);
         shift_vector = shift_vector + alpha * p;
         old_norm_r2 = norm_r2;
         residual = residual + alpha * q;
@@ -238,8 +238,8 @@ left_swap  = round (0.5 + dim_sample * rnd);
 rnd = Rand (rows = num_swaps, cols = 1, min = 0.0, max = 1.0);
 right_swap = round (0.5 + dim_sample * rnd);
 for (swap_i in 1:num_swaps) {
-    l = castAsScalar (left_swap  [swap_i, 1]);
-    r = castAsScalar (right_swap [swap_i, 1]);
+    l = as.scalar (left_swap  [swap_i, 1]);
+    r = as.scalar (right_swap [swap_i, 1]);
     if (l != r) {
         tmp_row = SampleOrder [l, ];
         SampleOrder [l, ] = SampleOrder [r, ];
@@ -324,7 +324,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
     
     # Create a normally distributed random sample
     
-    dim_half_sample = castAsScalar (round (dim_sample / 2 + 0.1 + zero));
+    dim_half_sample = as.scalar (round (dim_sample / 2 + 0.1 + zero));
     rnd1 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd2 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd_normal_1 = sqrt (- 2.0 * log (rnd1)) * sin (2 * pi * rnd2);
@@ -347,7 +347,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
     {
         # Generate the sample unit-vector and updaters
         
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars_updater = SampleOrder [1 : num_frees, idx];
             regresValues_updater = RegresValueMap %*% CReps %*% freeVars_updater;
             bilinear_updater_vector = regresValues_updater * regresParams;
@@ -372,18 +372,18 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
             
         mean_shift  = - coeff_b / (2.0 * coeff_a);
         sigma_shift = 1.0 / sqrt (2.0 * coeff_a);
-        shift = mean_shift + sigma_shift * castAsScalar (rnd_normal [idx, 1]);
+        shift = mean_shift + sigma_shift * as.scalar (rnd_normal [idx, 1]);
             
 # BEGIN DEBUG INSERT
 # mmm = 1;
-# if (castAsScalar (isVar [1, idx]) > 0.5 &          # IT IS A FREE VARIABLE, NOT A PARAMETER
-#     castAsScalar (freeVars_updater [mmm, 1]) > 0)  # IT IS mmm-TH FREE VARIABLE
+# if (as.scalar (isVar [1, idx]) > 0.5 &          # IT IS A FREE VARIABLE, NOT A PARAMETER
+#     as.scalar (freeVars_updater [mmm, 1]) > 0)  # IT IS mmm-TH FREE VARIABLE
 # {
 # #   print ("freeVars[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a + ",   coeff_b = " + coeff_b);
 #     print ("freeVars[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   mean_shift = " + mean_shift + ",   sigma_shift = " + sigma_shift + ",   shift = " + shift);
 # }
-# if (castAsScalar (isVar [1, idx]) <= 0.5 &       # IT IS A PARAMETER, NOT A FREE VARIABLE
-#     castAsScalar (params_updater [mmm, 1]) > 0)  # IT IS mmm-TH PARAMETER
+# if (as.scalar (isVar [1, idx]) <= 0.5 &       # IT IS A PARAMETER, NOT A FREE VARIABLE
+#     as.scalar (params_updater [mmm, 1]) > 0)  # IT IS mmm-TH PARAMETER
 # {
 # #   print ("  params[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a + ",   coeff_b = " + coeff_b);
 #     print ("  params[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   mean_shift = " + mean_shift + ",   sigma_shift = " + sigma_shift + ",   shift = " + shift);
@@ -393,7 +393,7 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
         # Perform the updates
 
         bilinear_form = bilinear_form + shift * bilinear_updater;
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars = freeVars + shift * freeVars_updater;
             regresValues = regresValues + shift * regresValues_updater;
         } else {
@@ -419,21 +419,21 @@ while ((iter < max_num_iter) & (num_of_observed_reports < max_num_observed_itera
         sum_of_observed_losses = sum_of_observed_losses + bilinear_form_value;
     }
 
-# v1 =castAsScalar(round(10000*reports[1 + (num_terms - 1) * num_attrs, 1])/10000);
-# v2 =castAsScalar(round(10000*reports[2 + (num_terms - 1) * num_attrs, 1])/10000);
-# v3 =castAsScalar(round(10000*reports[3 + (num_terms - 1) * num_attrs, 1])/10000);
-# v4 =castAsScalar(round(10000*reports[4 + (num_terms - 1) * num_attrs, 1])/10000);
-# w1 =castAsScalar(round(10000*reports_matrix[ 1,num_terms])/10000);
-# w2 =castAsScalar(round(10000*reports_matrix[ 2,num_terms])/10000);
-# w3 =castAsScalar(round(10000*reports_matrix[ 3,num_terms])/10000);
-# w4 =castAsScalar(round(10000*reports_matrix[ 4,num_terms])/10000);
-
-# v5 =castAsScalar(round(reports_matrix[ 5,num_terms]));
-# v8 =castAsScalar(round(reports_matrix[ 8,num_terms]));
-# v9 =castAsScalar(round(reports_matrix[ 9,num_terms]));
-# v10=castAsScalar(round(reports_matrix[10,num_terms]));
-# v16=castAsScalar(round(reports_matrix[16,num_terms]));
-# v19=castAsScalar(round(reports_matrix[19,num_terms]));
+# v1 =as.scalar(round(10000*reports[1 + (num_terms - 1) * num_attrs, 1])/10000);
+# v2 =as.scalar(round(10000*reports[2 + (num_terms - 1) * num_attrs, 1])/10000);
+# v3 =as.scalar(round(10000*reports[3 + (num_terms - 1) * num_attrs, 1])/10000);
+# v4 =as.scalar(round(10000*reports[4 + (num_terms - 1) * num_attrs, 1])/10000);
+# w1 =as.scalar(round(10000*reports_matrix[ 1,num_terms])/10000);
+# w2 =as.scalar(round(10000*reports_matrix[ 2,num_terms])/10000);
+# w3 =as.scalar(round(10000*reports_matrix[ 3,num_terms])/10000);
+# w4 =as.scalar(round(10000*reports_matrix[ 4,num_terms])/10000);
+
+# v5 =as.scalar(round(reports_matrix[ 5,num_terms]));
+# v8 =as.scalar(round(reports_matrix[ 8,num_terms]));
+# v9 =as.scalar(round(reports_matrix[ 9,num_terms]));
+# v10=as.scalar(round(reports_matrix[10,num_terms]));
+# v16=as.scalar(round(reports_matrix[16,num_terms]));
+# v19=as.scalar(round(reports_matrix[19,num_terms]));
 
 #print (" Sample = 1:" + v1 + ", 2:" + v2 + ", 3:" + v3 + ", 4:" + v4);
 ## + ", 5:" + v5 + ", 8:" + v8 + ", 9:" + v9 + ", 10:" + v10 + ", 16:" + v16 + ", 19:" + v19);

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml b/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
index 77bd21c..7f9a875 100644
--- a/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
+++ b/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
@@ -173,7 +173,7 @@ while (is_opt_converged == 0)
             q [i, 1] = (quadratic_plus_both - quadratic_plus_1 - quadratic_plus_p + bilinear_form_value) + q [i, 1];
         }
         
-        alpha = norm_r2 / castAsScalar (t(p) %*% q);
+        alpha = norm_r2 / as.scalar (t(p) %*% q);
         shift_vector = shift_vector + alpha * p;
         old_norm_r2 = norm_r2;
         residual = residual + alpha * q;
@@ -228,8 +228,8 @@ left_swap  = round (0.5 + dim_sample * rnd);
 rnd = Rand (rows = num_swaps, cols = 1, min = 0.0, max = 1.0);
 right_swap = round (0.5 + dim_sample * rnd);
 for (swap_i in 1:num_swaps) {
-    l = castAsScalar (left_swap  [swap_i, 1]);
-    r = castAsScalar (right_swap [swap_i, 1]);
+    l = as.scalar (left_swap  [swap_i, 1]);
+    r = as.scalar (right_swap [swap_i, 1]);
     if (l != r) {
         tmp_row = SampleOrder [l, ];
         SampleOrder [l, ] = SampleOrder [r, ];
@@ -265,7 +265,7 @@ for (iter in 1:num_iter)
     
     # Create a normally distributed random sample
     
-    dim_half_sample = castAsScalar (round (dim_sample / 2 + 0.1 + zero));
+    dim_half_sample = as.scalar (round (dim_sample / 2 + 0.1 + zero));
     rnd1 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd2 = Rand (rows = dim_half_sample, cols = 1, min = 0.0, max = 1.0);
     rnd_normal_1 = sqrt (- 2.0 * log (rnd1)) * sin (2 * pi * rnd2);
@@ -288,7 +288,7 @@ for (iter in 1:num_iter)
     {
         # Generate the sample unit-vector and updaters
         
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars_updater = SampleOrder [1 : num_frees, idx];
             regresValues_updater = RegresValueMap %*% CReps %*% freeVars_updater;
             bilinear_updater_vector = regresValues_updater * regresParams;
@@ -309,11 +309,11 @@ for (iter in 1:num_iter)
 
 # BEGIN DEBUG INSERT
 # mmm = 1;
-# if (castAsScalar (isVar [1, idx]) > 0.5) {
-# for (iii in 2:num_frees) {if (castAsScalar (freeVars_updater [iii, 1] - freeVars_updater [mmm, 1]) > 0) {mmm = iii;}}
+# if (as.scalar (isVar [1, idx]) > 0.5) {
+# for (iii in 2:num_frees) {if (as.scalar (freeVars_updater [iii, 1] - freeVars_updater [mmm, 1]) > 0) {mmm = iii;}}
 # print ("freeVars[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a);
 # } else {
-# for (iii in 2:num_params) {if (castAsScalar (params_updater [iii, 1] - params_updater [mmm, 1]) > 0) {mmm = iii;}}
+# for (iii in 2:num_params) {if (as.scalar (params_updater [iii, 1] - params_updater [mmm, 1]) > 0) {mmm = iii;}}
 # print ("  params[" + mmm + "]:  q_minus_1 = " + q_minus_1 + ",   q_plus_1 = " + q_plus_1 + ",   coeff_a = " + coeff_a);
 # }
 # END DEBUG INSERT
@@ -323,12 +323,12 @@ for (iter in 1:num_iter)
             
         mean_shift  = - coeff_b / (2.0 * coeff_a);
         sigma_shift = 1.0 / sqrt (2.0 * coeff_a);
-        shift = mean_shift + sigma_shift * castAsScalar (rnd_normal [idx, 1]);
+        shift = mean_shift + sigma_shift * as.scalar (rnd_normal [idx, 1]);
             
         # Perform the updates
 
         bilinear_form = bilinear_form + shift * bilinear_updater;
-        if (castAsScalar (isVar [1, idx]) > 0.5) {
+        if (as.scalar (isVar [1, idx]) > 0.5) {
             freeVars = freeVars + shift * freeVars_updater;
             regresValues = regresValues + shift * regresValues_updater;
         } else {
@@ -353,16 +353,16 @@ for (iter in 1:num_iter)
     }
 
 
-v1 =castAsScalar(round(reports_matrix[ 1,num_terms]));
-v2 =castAsScalar(round(reports_matrix[ 2,num_terms]));
-v3 =castAsScalar(round(reports_matrix[ 3,num_terms]));
-v4 =castAsScalar(round(reports_matrix[ 4,num_terms]));
-v5 =castAsScalar(round(reports_matrix[ 5,num_terms]));
-v8 =castAsScalar(round(reports_matrix[ 8,num_terms]));
-v9 =castAsScalar(round(reports_matrix[ 9,num_terms]));
-v10=castAsScalar(round(reports_matrix[10,num_terms]));
-v16=castAsScalar(round(reports_matrix[16,num_terms]));
-v19=castAsScalar(round(reports_matrix[19,num_terms]));
+v1 =as.scalar(round(reports_matrix[ 1,num_terms]));
+v2 =as.scalar(round(reports_matrix[ 2,num_terms]));
+v3 =as.scalar(round(reports_matrix[ 3,num_terms]));
+v4 =as.scalar(round(reports_matrix[ 4,num_terms]));
+v5 =as.scalar(round(reports_matrix[ 5,num_terms]));
+v8 =as.scalar(round(reports_matrix[ 8,num_terms]));
+v9 =as.scalar(round(reports_matrix[ 9,num_terms]));
+v10=as.scalar(round(reports_matrix[10,num_terms]));
+v16=as.scalar(round(reports_matrix[16,num_terms]));
+v19=as.scalar(round(reports_matrix[19,num_terms]));
 print (
 " Sample = 1:" + v1 + ", 2:" + v2 + ", 3:" + v3 + ", 4:" + v4 + ", 5:" + v5 +
 ", 8:" + v8 + ", 9:" + v9 + ", 10:" + v10 + ", 16:" + v16 + ", 19:" + v19);
@@ -402,7 +402,7 @@ matricize = function (Matrix[double] v, int n_rows) return (Matrix[double] M)
 {
     zero = matrix (0.0, rows = 1, cols = 1);
     n = nrow (v);
-    n_cols = castAsScalar (round (zero + (n / n_rows)));
+    n_cols = as.scalar (round (zero + (n / n_rows)));
     if (n_cols * n_rows < n) {
         n_cols = n_cols + 1;
     }

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/tmp.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/tmp.dml b/src/test/scripts/applications/impute/tmp.dml
index 9e0417e..c852cce 100644
--- a/src/test/scripts/applications/impute/tmp.dml
+++ b/src/test/scripts/applications/impute/tmp.dml
@@ -26,7 +26,7 @@ blahblah = 0.0 / 0.0; # -0.00099999999;
 print (blahblah);
 x = matrix (0.0, rows = 55, cols = 1);
 x [55, 1] = blahblah;
-print (castAsScalar (x [55, 1]));
+print (as.scalar (x [55, 1]));
 for (i in 1:9) {
     x [i, 1] = -0.001 * i;
 }
@@ -36,9 +36,9 @@ for (i in 1:5) {
 y = atan_temporary (x);
 z = tan (y);
 for (i in 1:nrow(x)) {
-    [x_m, x_e] = round_to_print (castAsScalar (x[i,1]));
-    [a_m, a_e] = round_to_print (castAsScalar (y[i,1]));
-    [t_m, t_e] = round_to_print (castAsScalar (z[i,1]));
+    [x_m, x_e] = round_to_print (as.scalar (x[i,1]));
+    [a_m, a_e] = round_to_print (as.scalar (y[i,1]));
+    [t_m, t_e] = round_to_print (as.scalar (z[i,1]));
     print ("x = " + x_m + "E" + x_e + ";  atan(x) = " + a_m + "E" + a_e + ";  tan(atan(x)) = " + t_m + "E" + t_e);
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-systemml/blob/2da81457/src/test/scripts/applications/impute/wfundInputGenerator1.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/wfundInputGenerator1.dml b/src/test/scripts/applications/impute/wfundInputGenerator1.dml
index 8457fbd..5f7824b 100644
--- a/src/test/scripts/applications/impute/wfundInputGenerator1.dml
+++ b/src/test/scripts/applications/impute/wfundInputGenerator1.dml
@@ -380,7 +380,7 @@ if (is_GROUP_4_ENABLED == 1) {
 for (t in (num_known_terms + num_predicted_terms + 1) : num_terms)
 {
     for (i in 1 : num_attrs) {
-        if (castAsScalar (disabled_known_values [i, t - (num_known_terms + num_predicted_terms)]) == 0.0)
+        if (as.scalar (disabled_known_values [i, t - (num_known_terms + num_predicted_terms)]) == 0.0)
         {
             reg_index = ((t-1) * num_attrs - 1 + i) * num_factors;
             RegresCoeffDefault [reg_index + 1, 1] = 1.0 + zero; # Default coefficient = 1.0