Repository: systemml
Updated Branches:
  refs/heads/master 4a822a22c -> 0aea2b531


[SYSTEMML-2078] Use new builtin constants in various algorithms/tests

Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/0aea2b53
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/0aea2b53
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/0aea2b53

Branch: refs/heads/master
Commit: 0aea2b5315b4300494c7653287afdc96f5626a2e
Parents: 4a822a2
Author: Matthias Boehm <[email protected]>
Authored: Sun Apr 1 18:01:23 2018 -0700
Committer: Matthias Boehm <[email protected]>
Committed: Sun Apr 1 18:01:23 2018 -0700

----------------------------------------------------------------------
 scripts/algorithms/GLM-predict.dml              | 32 +++++------
 scripts/algorithms/GLM.dml                      | 54 +++++++++---------
 scripts/algorithms/KM.dml                       | 22 ++++----
 scripts/algorithms/Kmeans.dml                   |  2 +-
 scripts/algorithms/LinearRegCG.dml              | 10 ++--
 scripts/algorithms/LinearRegDS.dml              | 10 ++--
 scripts/algorithms/StepGLM.dml                  | 56 +++++++++----------
 scripts/algorithms/StepLinearRegDS.dml          | 10 ++--
 scripts/algorithms/decision-tree.dml            | 14 ++---
 scripts/algorithms/random-forest.dml            | 16 +++---
 scripts/algorithms/stratstats.dml               |  8 +--
 scripts/staging/knn.dml                         |  2 +-
 src/test/scripts/applications/glm/GLM.pydml     | 58 ++++++++++----------
 .../applications/impute/imputeGaussMCMC.dml     |  4 +-
 .../impute/imputeGaussMCMC.nogradient.dml       |  2 +-
 .../applications/impute/old/imputeGaussMCMC.dml |  2 +-
 src/test/scripts/applications/impute/tmp.dml    |  4 +-
 .../functions/codegenalg/Algorithm_GLM.R        | 58 ++++++++++----------
 .../functions/codegenalg/Algorithm_LinregCG.R   | 10 ++--
 .../functions/jmlc/reuse-glm-predict.dml        | 32 +++++------
 .../functions/misc/ZeroMatrix_Aggregates.R      |  4 +-
 .../functions/misc/ZeroMatrix_Aggregates.dml    | 10 ++--
 22 files changed, 210 insertions(+), 210 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/GLM-predict.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/GLM-predict.dml 
b/scripts/algorithms/GLM-predict.dml
index 49a593c..1cb571a 100644
--- a/scripts/algorithms/GLM-predict.dml
+++ b/scripts/algorithms/GLM-predict.dml
@@ -149,20 +149,20 @@ if (fileY != " ")
     
     # Statistics To Compute:
     
-    Z_logl               = 0.0 / 0.0;
-    Z_logl_pValue        = 0.0 / 0.0;
-    X2_pearson           = 0.0 / 0.0;
+    Z_logl               = NaN;
+    Z_logl_pValue        = NaN;
+    X2_pearson           = NaN;
     df_pearson           = -1;
-    G2_deviance          = 0.0 / 0.0;
+    G2_deviance          = NaN;
     df_deviance          = -1;
-    X2_pearson_pValue    = 0.0 / 0.0;
-    G2_deviance_pValue   = 0.0 / 0.0;
-    Z_logl_scaled        = 0.0 / 0.0;
-    Z_logl_scaled_pValue = 0.0 / 0.0;
-    X2_scaled            = 0.0 / 0.0;
-    X2_scaled_pValue     = 0.0 / 0.0;
-    G2_scaled            = 0.0 / 0.0;
-    G2_scaled_pValue     = 0.0 / 0.0;
+    X2_pearson_pValue    = NaN;
+    G2_deviance_pValue   = NaN;
+    Z_logl_scaled        = NaN;
+    Z_logl_scaled_pValue = NaN;
+    X2_scaled            = NaN;
+    X2_scaled_pValue     = NaN;
+    G2_scaled            = NaN;
+    G2_scaled_pValue     = NaN;
     
     # set Y_counts to avoid 'Initialization of Y_counts depends on if-else 
execution' warning
     Y_counts = matrix(0.0, rows=1, cols=1);
@@ -391,8 +391,8 @@ glm_means_and_vars =
             y_prob [, 1] = (1 - is_too_small) * (1.0 - y_prob [, 2]) + 
is_too_small * elt * (1.0 - elt / 2);
         } else if (link_type == 5)                      { # Binomial.cauchit
             atan_linear_terms = atan (linear_terms);
-            y_prob [, 1] = 0.5 + atan_linear_terms / 
3.1415926535897932384626433832795;
-            y_prob [, 2] = 0.5 - atan_linear_terms / 
3.1415926535897932384626433832795;
+            y_prob [, 1] = 0.5 + atan_linear_terms / PI;
+            y_prob [, 2] = 0.5 - atan_linear_terms / PI;
         }
         means = y_prob;
         ones_ctg = matrix (1, rows = 2, cols = 1);
@@ -417,8 +417,8 @@ glm_partial_loglikelihood_for_power_dist_and_link =   # 
Assumes: dist_type == 1
     num_records = nrow (Y);
     if (var_power == 1.0) { # Poisson
         if (link_power == 0)  { # Poisson.log
-            is_natural_parameter_log_zero = (linear_terms == -1.0/0.0);
-            natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+            is_natural_parameter_log_zero = (linear_terms == -INF);
+            natural_parameters = replace (target = linear_terms, pattern = 
-INF, replacement = 0);
             b_cumulant = exp (linear_terms);
         } else {                  # Poisson.power_nonlog
             is_natural_parameter_log_zero = (linear_terms == 0);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/GLM.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/GLM.dml b/scripts/algorithms/GLM.dml
index 8bd822f..825dc9f 100644
--- a/scripts/algorithms/GLM.dml
+++ b/scripts/algorithms/GLM.dml
@@ -159,15 +159,15 @@ eps = as.double (eps);
 # Default values for output statistics:
 
 termination_code     = 0;
-min_beta             = 0.0 / 0.0;
-i_min_beta           = 0.0 / 0.0;
-max_beta             = 0.0 / 0.0;
-i_max_beta           = 0.0 / 0.0;
-intercept_value      = 0.0 / 0.0;
-dispersion           = 0.0 / 0.0;
-estimated_dispersion = 0.0 / 0.0;
-deviance_nodisp      = 0.0 / 0.0;
-deviance             = 0.0 / 0.0;
+min_beta             = NaN;
+i_min_beta           = NaN;
+max_beta             = NaN;
+i_max_beta           = NaN;
+intercept_value      = NaN;
+dispersion           = NaN;
+estimated_dispersion = NaN;
+deviance_nodisp      = NaN;
+deviance             = NaN;
 
 print("BEGIN GLM SCRIPT");
 print("Reading X...");
@@ -471,7 +471,7 @@ all_linear_terms = X %*% ssX_beta;
 [g_Y, w] = glm_dist (all_linear_terms, Y, distribution_type, 
variance_as_power_of_the_mean, link_type, link_as_power_of_the_mean);
     
 pearson_residual_sq = g_Y ^ 2 / w;
-pearson_residual_sq = replace (target = pearson_residual_sq, pattern = 
0.0/0.0, replacement = 0);
+pearson_residual_sq = replace (target = pearson_residual_sq, pattern = NaN, 
replacement = 0);
 # pearson_residual_sq = (y_residual ^ 2) / y_var;
 
 if (num_records > num_features) {
@@ -638,7 +638,7 @@ return (Matrix[double] beta, double saturated_log_l, int 
isNaN)
                     - log (- log (0.5)) * (is_zero_y_corr + is_one_y_corr)
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / 
(1.0 - is_zero_y_corr);
             } else if (link_type == 5)                  { # Binomial.cauchit
-                linear_terms = tan ((y_corr - 0.5) * 
3.1415926535897932384626433832795)
+                linear_terms = tan ((y_corr - 0.5) * PI)
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / 
(1.0 - is_zero_y_corr);
         }   }      
     }
@@ -741,9 +741,9 @@ glm_dist = function (Matrix[double] linear_terms, 
Matrix[double] Y,
                 w   =  rowSums (Y) * vec1 / link_power ^ 2;
             }
         } else {
-            is_LT_infinite = cbind(linear_terms==1.0/0.0, 
linear_terms==-1.0/0.0);
-            finite_linear_terms = replace (target =        linear_terms, 
pattern =  1.0/0.0, replacement = 0);
-            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -1.0/0.0, replacement = 0);
+            is_LT_infinite = cbind(linear_terms==INF, linear_terms==-INF);
+            finite_linear_terms = replace (target =        linear_terms, 
pattern =  INF, replacement = 0);
+            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -INF, replacement = 0);
             if (link_type == 2)                           { # Binomial.logit
                 Y_prob = cbind(exp(finite_linear_terms), ones_r);
                 Y_prob = Y_prob / rowSums (Y_prob);
@@ -771,11 +771,11 @@ glm_dist = function (Matrix[double] linear_terms, 
Matrix[double] Y,
                 g_Y =  (rowSums (Y) * the_exp_exp - Y [, 2]) / the_exp_ratio;
                 w   =  the_exp_exp * the_exp * rowSums (Y) / the_exp_ratio;
             } else if (link_type == 5)                  { # Binomial.cauchit
-                Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
3.1415926535897932384626433832795;
+                Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
PI;
                 Y_prob = Y_prob * (1.0 - rowSums (is_LT_infinite)) + 
is_LT_infinite;
                 y_residual = Y [, 1] * Y_prob [, 2] - Y [, 2] * Y_prob [, 1];
                 var_function = rowSums (Y) * Y_prob [, 1] * Y_prob [, 2];
-                link_gradient_normalized = (1 + linear_terms ^ 2) * 
3.1415926535897932384626433832795;
+                link_gradient_normalized = (1 + linear_terms ^ 2) * PI;
                 g_Y =  rowSums (Y) * y_residual / (var_function * 
link_gradient_normalized);
                 w   = (rowSums (Y) ^ 2) / (var_function * 
link_gradient_normalized ^ 2);
             }   
@@ -800,8 +800,8 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
         is_natural_parameter_log_zero = zeros_r;
         if          (var_power == 1.0 & link_power == 0)  { # Poisson.log
             b_cumulant = exp (linear_terms);
-            is_natural_parameter_log_zero = (linear_terms == -1.0/0.0);
-            natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+            is_natural_parameter_log_zero = (linear_terms == -INF);
+            natural_parameters = replace (target = linear_terms, pattern = 
-INF, replacement = 0);
         } else if (var_power == 1.0 & link_power == 1.0)  { # Poisson.id
             if (sum (linear_terms < 0) == 0)  {
                 b_cumulant = linear_terms;
@@ -878,14 +878,14 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
             }
         }
         if (sum (is_natural_parameter_log_zero * abs (Y)) > 0) {
-            log_l = -1.0 / 0.0;
+            log_l = -INF;
             isNaN = 1;
         }
         if (isNaN == 0)
         {
             log_l = sum (Y * natural_parameters - b_cumulant);
             if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 
2.0)) {
-                log_l = -1.0 / 0.0;
+                log_l = -INF;
                 isNaN = 1;
     }   }   }
     
@@ -901,12 +901,12 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
                     isNaN = 1;
                 }
             } else {
-                log_l = -1.0 / 0.0;
+                log_l = -INF;
                 isNaN = 1;
     }   }   }
     
     if (isNaN == 1) {
-        log_l = - 1.0 / 0.0; 
+        log_l = - INF; 
     }
 }
 
@@ -938,9 +938,9 @@ binomial_probability_two_column =
             isNaN = 1;
         }
     } else {              # Binomial.non_power
-        is_LT_infinite = cbind(linear_terms==1.0/0.0, linear_terms==-1.0/0.0);
-        finite_linear_terms = replace (target =        linear_terms, pattern = 
 1.0/0.0, replacement = 0);
-        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+        is_LT_infinite = cbind(linear_terms==INF, linear_terms==-INF);
+        finite_linear_terms = replace (target =        linear_terms, pattern = 
 INF, replacement = 0);
+        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-INF, replacement = 0);
         if (link_type == 2)             { # Binomial.logit
             Y_prob = cbind(exp(finite_linear_terms), ones_r);
             Y_prob = Y_prob / rowSums (Y_prob);
@@ -961,7 +961,7 @@ binomial_probability_two_column =
             Y_prob [, 1] = (1 - is_too_small) * (1 - the_exp_exp) + 
is_too_small * the_exp * (1 - the_exp / 2);
             Y_prob [, 2] = the_exp_exp;
         } else if (link_type == 5)    { # Binomial.cauchit
-            Y_prob = 0.5 + cbind(atan(finite_linear_terms), 
-atan(finite_linear_terms)) / 3.1415926535897932384626433832795;
+            Y_prob = 0.5 + cbind(atan(finite_linear_terms), 
-atan(finite_linear_terms)) / PI;
         } else {
             isNaN = 1;
         }
@@ -1109,7 +1109,7 @@ return (double mantissa, int eee)
 {
     mantissa = 1.0;
     eee = 0;
-    positive_infinity = 1.0 / 0.0;
+    positive_infinity = INF;
     x = abs (x_to_truncate);
     if (x != x / 2.0) {
         log_ten = log (10.0);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/KM.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/KM.dml b/scripts/algorithms/KM.dml
index 8a4b1f7..7659244 100644
--- a/scripts/algorithms/KM.dml
+++ b/scripts/algorithms/KM.dml
@@ -186,11 +186,11 @@ if (n_group_cols > 0) {
        Idx[2:num_records,] = rowMaxs (X[1:(num_records - 1),3:(2 + 
n_group_cols)] != X[2:num_records,3:(2 + n_group_cols)]);
        num_groups = sum (Idx);
 
-       XG = replace (target = XG, pattern = 0, replacement = "Infinity");
+       XG = replace (target = XG, pattern = 0, replacement = INF);
        XG = XG * Idx;
-       XG = replace (target = XG, pattern = "NaN", replacement = 0);   
+       XG = replace (target = XG, pattern = NaN, replacement = 0);     
        G_cols = removeEmpty (target = XG, margin = "rows"); 
-       G_cols = replace (target = G_cols, pattern = "Infinity", replacement = 
0);      
+       G_cols = replace (target = G_cols, pattern = INF, replacement = 0);     
 
        A = removeEmpty (target = diag (Idx), margin = "cols");
        if (ncol (A) > 1) {
@@ -224,9 +224,9 @@ if (n_stratum_cols > 0) {
 
        XS = replace (target = XS, pattern = 0, replacement = "Infinity");
        XS = XS * Idx;
-       XS = replace (target = XS, pattern = "NaN", replacement = 0);   
+       XS = replace (target = XS, pattern = NaN, replacement = 0);     
        S_cols = removeEmpty (target = XS, margin = "rows"); 
-       S_cols = replace (target = S_cols, pattern = "Infinity", replacement = 
0);      
+       S_cols = replace (target = S_cols, pattern = INF, replacement = 0);     
 
        SB = removeEmpty (target = seq (1,num_records), margin = "rows", select 
= Idx); # indices of stratum boundaries 
        A = removeEmpty (target = diag (Idx), margin = "cols");
@@ -317,7 +317,7 @@ parfor (s in 1:num_strata, check = 0) {
        time_stratum = table (seq (1, nrow (Idx1), 1), Idx1) %*% T; # distinct 
timestamps both censored and uncensored per stratum 
        time_stratum_has_zero = sum (time_stratum == 0) > 0;
        if (time_stratum_has_zero) {
-               time_stratum =  replace (target = time_stratum, pattern = 0, 
replacement = "Infinity");
+               time_stratum =  replace (target = time_stratum, pattern = 0, 
replacement = INF);
        }
        n_time_all1 = nrow (n_event_stratum);  # no. of distinct timestamps 
both censored and uncensored per stratum
        n_event_all_stratum_agg = matrix (0, rows = n_time_all1, cols = 1); 
@@ -349,9 +349,9 @@ parfor (s in 1:num_strata, check = 0) {
                        Idx1 = cumsum (n_event_all); 
                        event_occurred = (n_event > 0);
                        if (time_stratum_has_zero) {
-                               time = replace (target = time_stratum * 
event_occurred, pattern = "NaN", replacement = 0);
+                               time = replace (target = time_stratum * 
event_occurred, pattern = NaN, replacement = 0);
                                time = removeEmpty (target = time, margin = 
"rows");
-                               time = replace (target = time, pattern = 
"Infinity", replacement = 0);
+                               time = replace (target = time, pattern = INF, 
replacement = 0);
                        } else {
                                time = removeEmpty (target = time_stratum * 
event_occurred, margin = "rows");
                        }
@@ -518,12 +518,12 @@ parfor (s in 1:num_strata, check = 0) {
                                }
                                if (test_type == "log-rank") {
                                        V1 = n_risk * n_event_stratum * 
(n_risk_stratum - n_event_stratum) / (n_risk_stratum * (n_risk_stratum - 1));
-                                       V1 = replace (target = V1, pattern = 
"NaN", replacement = 0);
+                                       V1 = replace (target = V1, pattern = 
NaN, replacement = 0);
                                        V2 = I_i1i2 - (n_risk_i2j / 
n_risk_stratum);
                                        V[(i1 + 1),(i2 + 1)] = sum (V1 * V2);
                                } else { ### test_type == "wilcoxon"
                                        V1 = (n_risk_stratum ^ 2) * (n_risk * 
n_event_stratum) * (n_risk_stratum - n_event_stratum) / (n_risk_stratum * 
(n_risk_stratum - 1));
-                                       V1 = replace (target = V1, pattern = 
"NaN", replacement = 0);
+                                       V1 = replace (target = V1, pattern = 
NaN, replacement = 0);
                                        V2 = I_i1i2 - (n_risk_i2j / 
n_risk_stratum);
                                        V[(i1 + 1),(i2 + 1)] = sum (V1 * V2) / 
(range ^ 2);
                                }
@@ -582,7 +582,7 @@ if (n_stratum_cols > 0) {
 M_cols = removeEmpty (target = M_cols, margin = "rows");
 tab_M = table (seq (1, nrow (M_cols)), M_cols, nrow (M_cols), nrow (M));
 M = tab_M %*% M;
-M = replace (target = M, pattern = "Infinity", replacement = "NaN");
+M = replace (target = M, pattern = INF, replacement = NaN);
 
 # pull out non-empty rows from TEST
 if (n_group_cols > 0 & n_stratum_cols > 0) {

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/Kmeans.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/Kmeans.dml b/scripts/algorithms/Kmeans.dml
index 99ee264..640cc13 100644
--- a/scripts/algorithms/Kmeans.dml
+++ b/scripts/algorithms/Kmeans.dml
@@ -135,7 +135,7 @@ parfor (run_index in 1 : num_runs, check = 0)
     C_old = C;
     iter_count = 0;
     term_code = 0;
-    wcss = 1.0/0.0; #INF
+    wcss = INF
 
     while (term_code == 0)
     {

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/LinearRegCG.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/LinearRegCG.dml 
b/scripts/algorithms/LinearRegCG.dml
index 656d261..45d7667 100644
--- a/scripts/algorithms/LinearRegCG.dml
+++ b/scripts/algorithms/LinearRegCG.dml
@@ -224,8 +224,8 @@ ss_res = sum (y_residual ^ 2);
 ss_avg_res = ss_res - n * avg_res ^ 2;
 
 R2 = 1 - ss_res / ss_avg_tot;
-dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), 0.0/0.0);
-adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), 
0.0/0.0);
+dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), NaN);
+adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), NaN);
 
 R2_nobias = 1 - ss_avg_res / ss_avg_tot;
 deg_freedom = n - m - 1;
@@ -233,13 +233,13 @@ if (deg_freedom > 0) {
     var_res = ss_avg_res / deg_freedom;
     adjusted_R2_nobias = 1 - var_res / (ss_avg_tot / (n - 1));
 } else {
-    var_res = 0.0 / 0.0;
-    adjusted_R2_nobias = 0.0 / 0.0;
+    var_res = NaN;
+    adjusted_R2_nobias = NaN;
     print ("Warning: zero or negative number of degrees of freedom.");
 }
 
 R2_vs_0 = 1 - ss_res / ss_tot;
-adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), 
0.0/0.0);
+adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), NaN);
 
 str = "AVG_TOT_Y," + avg_tot;                                    #  Average of 
the response value Y
 str = append (str, "STDEV_TOT_Y," + sqrt (var_tot));             #  Standard 
Deviation of the response value Y

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/LinearRegDS.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/LinearRegDS.dml 
b/scripts/algorithms/LinearRegDS.dml
index 3e6a6e3..e6ca736 100644
--- a/scripts/algorithms/LinearRegDS.dml
+++ b/scripts/algorithms/LinearRegDS.dml
@@ -166,8 +166,8 @@ ss_res = sum (y_residual ^ 2);
 ss_avg_res = ss_res - n * avg_res ^ 2;
 
 R2 = 1 - ss_res / ss_avg_tot;
-dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), 0.0/0.0);
-adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), 
0.0/0.0);
+dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), NaN);
+adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), NaN);
 
 R2_nobias = 1 - ss_avg_res / ss_avg_tot;
 deg_freedom = n - m - 1;
@@ -175,13 +175,13 @@ if (deg_freedom > 0) {
     var_res = ss_avg_res / deg_freedom;
     adjusted_R2_nobias = 1 - var_res / (ss_avg_tot / (n - 1));
 } else {
-    var_res = 0.0 / 0.0;
-    adjusted_R2_nobias = 0.0 / 0.0;
+    var_res = NaN;
+    adjusted_R2_nobias = NaN;
     print ("Warning: zero or negative number of degrees of freedom.");
 }
 
 R2_vs_0 = 1 - ss_res / ss_tot;
-adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), 
0.0/0.0);
+adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), NaN);
 
 str = "AVG_TOT_Y," + avg_tot;                                    #  Average of 
the response value Y
 str = append (str, "STDEV_TOT_Y," + sqrt (var_tot));             #  Standard 
Deviation of the response value Y

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/StepGLM.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/StepGLM.dml b/scripts/algorithms/StepGLM.dml
index e598cb8..34ed672 100644
--- a/scripts/algorithms/StepGLM.dml
+++ b/scripts/algorithms/StepGLM.dml
@@ -242,15 +242,15 @@ glm = function (Matrix[Double] X, Matrix[Double] Y, Int 
intercept_status, Double
        # Default values for output statistics:
        regularization = 0.0;
        termination_code     = 0.0;
-       min_beta             = 0.0 / 0.0;
-       i_min_beta           = 0.0 / 0.0;
-       max_beta             = 0.0 / 0.0;
-       i_max_beta           = 0.0 / 0.0;
-       intercept_value      = 0.0 / 0.0;
-       dispersion           = 0.0 / 0.0;
-       estimated_dispersion = 0.0 / 0.0;
-       deviance_nodisp      = 0.0 / 0.0;
-       deviance             = 0.0 / 0.0;                                 
+       min_beta             = NaN;
+       i_min_beta           = NaN;
+       max_beta             = NaN;
+       i_max_beta           = NaN;
+       intercept_value      = NaN;
+       dispersion           = NaN;
+       estimated_dispersion = NaN;
+       deviance_nodisp      = NaN;
+       deviance             = NaN;
                   
        #####   INITIALIZE THE PARAMETERS   #####
                   
@@ -516,7 +516,7 @@ glm = function (Matrix[Double] X, Matrix[Double] Y, Int 
intercept_status, Double
                                [g_Y, w] = glm_dist (all_linear_terms, Y, 
distribution_type, variance_as_power_of_the_mean, link_type, 
link_as_power_of_the_mean);
                         
                 pearson_residual_sq = g_Y ^ 2 / w;
-                pearson_residual_sq = replace (target = pearson_residual_sq, 
pattern = 0.0/0.0, replacement = 0);
+                pearson_residual_sq = replace (target = pearson_residual_sq, 
pattern = NaN, replacement = 0);
                 # pearson_residual_sq = (y_residual ^ 2) / y_var;
                         
                 if (num_records > num_features) {
@@ -702,7 +702,7 @@ glm_initialize = function (Matrix[double] X, Matrix[double] 
Y, int dist_type, do
           - log (- log (0.5)) * (is_zero_y_corr + is_one_y_corr)
           + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - 
is_zero_y_corr);
         } else { if (link_type == 5)                  { # Binomial.cauchit
-          linear_terms = tan ((y_corr - 0.5) * 
3.1415926535897932384626433832795)
+          linear_terms = tan ((y_corr - 0.5) * PI)
           + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - 
is_zero_y_corr);
         }}  }}}}}
     }
@@ -815,11 +815,11 @@ glm_dist = function (Matrix[double] linear_terms, 
Matrix[double] Y,
           w   =  rowSums (Y) * vec1 / link_power ^ 2;
         }
       } else {
-        is_LT_pos_infinite = (linear_terms == 1.0/0.0);
-        is_LT_neg_infinite = (linear_terms == -1.0/0.0);
+        is_LT_pos_infinite = (linear_terms == INF);
+        is_LT_neg_infinite = (linear_terms == -INF);
         is_LT_infinite = is_LT_pos_infinite %*% one_zero + is_LT_neg_infinite 
%*% zero_one;
-        finite_linear_terms = replace (target =        linear_terms, pattern = 
 1.0/0.0, replacement = 0);
-        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+        finite_linear_terms = replace (target =        linear_terms, pattern = 
 INF, replacement = 0);
+        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-INF, replacement = 0);
         if (link_type == 2)                           { # Binomial.logit
           Y_prob = exp (finite_linear_terms) %*% one_zero + ones_r %*% 
zero_one;
           Y_prob = Y_prob / (rowSums (Y_prob) %*% ones_2);
@@ -847,11 +847,11 @@ glm_dist = function (Matrix[double] linear_terms, 
Matrix[double] Y,
           g_Y =  (rowSums (Y) * the_exp_exp - Y [, 2]) / the_exp_ratio;
           w   =  the_exp_exp * the_exp * rowSums (Y) / the_exp_ratio;
         } else { if (link_type == 5)                  { # Binomial.cauchit
-          Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
3.1415926535897932384626433832795;
+          Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / PI;
           Y_prob = Y_prob * ((1.0 - rowSums (is_LT_infinite)) %*% ones_2) + 
is_LT_infinite;
           y_residual = Y [, 1] * Y_prob [, 2] - Y [, 2] * Y_prob [, 1];
           var_function = rowSums (Y) * Y_prob [, 1] * Y_prob [, 2];
-          link_gradient_normalized = (1 + linear_terms ^ 2) * 
3.1415926535897932384626433832795;
+          link_gradient_normalized = (1 + linear_terms ^ 2) * PI;
           g_Y =  rowSums (Y) * y_residual / (var_function * 
link_gradient_normalized);
           w   = (rowSums (Y) ^ 2) / (var_function * link_gradient_normalized ^ 
2);
         }}}}   
@@ -876,8 +876,8 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
       is_natural_parameter_log_zero = zeros_r;
       if          (var_power == 1.0 & link_power == 0)  { # Poisson.log
         b_cumulant = exp (linear_terms);
-        is_natural_parameter_log_zero = (linear_terms == -1.0/0.0);
-        natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+        is_natural_parameter_log_zero = (linear_terms == -INF);
+        natural_parameters = replace (target = linear_terms, pattern = -INF, 
replacement = 0);
       } else { if (var_power == 1.0 & link_power == 1.0)  { # Poisson.id
         if (sum (linear_terms < 0) == 0)  {
           b_cumulant = linear_terms;
@@ -953,14 +953,14 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
         }}}}
       }}}}} }}}}}
       if (sum (is_natural_parameter_log_zero * abs (Y)) > 0) {
-        log_l = -1.0 / 0.0;
+        log_l = -INF;
         isNaN = 1;
       }
       if (isNaN == 0)
       {
         log_l = sum (Y * natural_parameters - b_cumulant);
         if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 2.0)) {
-          log_l = -1.0 / 0.0;
+          log_l = -INF;
           isNaN = 1;
         }   }   }
     
@@ -977,12 +977,12 @@ glm_log_likelihood_part = function (Matrix[double] 
linear_terms, Matrix[double]
             isNaN = 1;
           }
         } else {
-          log_l = -1.0 / 0.0;
+          log_l = -INF;
           isNaN = 1;
         }   }   }
     
     if (isNaN == 1) {
-      log_l = - 1.0 / 0.0; 
+      log_l = - INF; 
     }
   }
 
@@ -1024,11 +1024,11 @@ binomial_probability_two_column =
           } else {isNaN = 1;}
         }}
       } else {              # Binomial.non_power
-        is_LT_pos_infinite = (linear_terms == 1.0/0.0);
-        is_LT_neg_infinite = (linear_terms == -1.0/0.0);
+        is_LT_pos_infinite = (linear_terms == INF);
+        is_LT_neg_infinite = (linear_terms == -INF);
         is_LT_infinite = is_LT_pos_infinite %*% one_zero + is_LT_neg_infinite 
%*% zero_one;
-        finite_linear_terms = replace (target =        linear_terms, pattern = 
 1.0/0.0, replacement = 0);
-        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+        finite_linear_terms = replace (target =        linear_terms, pattern = 
 INF, replacement = 0);
+        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-INF, replacement = 0);
         if (link_type == 2)             { # Binomial.logit
           Y_prob = exp (finite_linear_terms) %*% one_zero + ones_r %*% 
zero_one;
           Y_prob = Y_prob / (rowSums (Y_prob) %*% ones_2);
@@ -1049,7 +1049,7 @@ binomial_probability_two_column =
           Y_prob [, 1] = (1 - is_too_small) * (1 - the_exp_exp) + is_too_small 
* the_exp * (1 - the_exp / 2);
           Y_prob [, 2] = the_exp_exp;
         } else { if (link_type == 5)    { # Binomial.cauchit
-          Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
3.1415926535897932384626433832795;
+          Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / PI;
         } else {
           isNaN = 1;
         }}}}

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/StepLinearRegDS.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/StepLinearRegDS.dml 
b/scripts/algorithms/StepLinearRegDS.dml
index a86bcbb..54624dc 100644
--- a/scripts/algorithms/StepLinearRegDS.dml
+++ b/scripts/algorithms/StepLinearRegDS.dml
@@ -328,8 +328,8 @@ linear_regression = function (Matrix[Double] X, 
Matrix[Double] y, Double m_orig,
         dispersion  = ss_res / (n - m_ext);
         adjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));
       } else {
-        dispersion  = 0.0 / 0.0;
-        adjusted_R2 = 0.0 / 0.0;
+        dispersion  = NaN;
+        adjusted_R2 = NaN;
       }
 
       R2_nobias = 1 - ss_avg_res / ss_avg_tot;
@@ -338,8 +338,8 @@ linear_regression = function (Matrix[Double] X, 
Matrix[Double] y, Double m_orig,
         var_res = ss_avg_res / deg_freedom;
         adjusted_R2_nobias = 1 - var_res / (ss_avg_tot / (n - 1));
       } else {
-        var_res = 0.0 / 0.0;
-        adjusted_R2_nobias = 0.0 / 0.0;
+        var_res = NaN;
+        adjusted_R2_nobias = NaN;
         print ("Warning: zero or negative number of degrees of freedom.");
       }
 
@@ -347,7 +347,7 @@ linear_regression = function (Matrix[Double] X, 
Matrix[Double] y, Double m_orig,
       if (n > m) {
         adjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);
       } else {
-        adjusted_R2_vs_0 = 0.0 / 0.0;
+        adjusted_R2_vs_0 = NaN;
       }
 
       str = "AVG_TOT_Y," + avg_tot;                                    #  
Average of the response value Y

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/decision-tree.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/decision-tree.dml 
b/scripts/algorithms/decision-tree.dml
index 57728f8..f8d3ba0 100644
--- a/scripts/algorithms/decision-tree.dml
+++ b/scripts/algorithms/decision-tree.dml
@@ -296,7 +296,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                        
                        I_gain_scale = cur_impurity - ( ( label_sum_left / 
label_sum_overall ) * impurity_left_scale + ( label_sum_right / 
label_sum_overall ) * impurity_right_scale); 
                
-                       I_gain_scale = replace (target = I_gain_scale, pattern 
= "NaN", replacement = 0);       
+                       I_gain_scale = replace (target = I_gain_scale, pattern 
= NaN, replacement = 0); 
                        
                        # determine best feature to split on and the split value
                        best_scale_gain = max (I_gain_scale);
@@ -341,7 +341,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                        label_dist = replace (target = 
label_dist, pattern = 0, replacement = 1);
                                        log_label_dist = log (label_dist); # / 
log(2)
                                        impurity = - rowSums (label_dist * 
log_label_dist); 
-                                       impurity = replace (target = impurity, 
pattern = "NaN", replacement = 1/0); 
+                                       impurity = replace (target = impurity, 
pattern = NaN, replacement = 1/0); 
                                } else { # imp == "Gini"
                                        impurity = rowSums (label_dist * (1 - 
label_dist));                             
                                }
@@ -370,7 +370,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                label_counts_right = - label_counts_left + 
label_counts_overall;
                                label_sum_right = rowSums (label_counts_right);
                                label_dist_right = label_counts_right / 
label_sum_right;
-                               label_dist_right = replace (target = 
label_dist_right, pattern = "NaN", replacement = 1);
+                               label_dist_right = replace (target = 
label_dist_right, pattern = NaN, replacement = 1);
                                if (imp == "entropy") {
                                        label_dist_right = replace (target = 
label_dist_right, pattern = 0, replacement = 1);
                                        log_label_dist_right = log 
(label_dist_right); # / log (2)
@@ -709,7 +709,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                        }
                                        I_gain_scale = cur_impurity - ( ( 
label_sum_left / label_sum_overall ) * impurity_left_scale + ( label_sum_right 
/ label_sum_overall ) * impurity_right_scale); # I_gain -> I_gain_scale
                        
-                                       I_gain_scale = replace (target = 
I_gain_scale, pattern = "NaN", replacement = 0);               
+                                       I_gain_scale = replace (target = 
I_gain_scale, pattern = NaN, replacement = 0);         
                        
                                        # determine best feature to split on 
and the split value
                                        best_scale_gain = max (I_gain_scale);
@@ -754,7 +754,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                                        label_dist = replace 
(target = label_dist, pattern = 0, replacement = 1);
                                                        log_label_dist = log 
(label_dist); # / log(2)
                                                        impurity = - rowSums 
(label_dist * log_label_dist); 
-                                                       impurity = replace 
(target = impurity, pattern = "NaN", replacement = 1/0); 
+                                                       impurity = replace 
(target = impurity, pattern = NaN, replacement = 1/0); 
                                                } else { # imp == "Gini"
                                                        impurity = rowSums 
(label_dist * (1 - label_dist)); 
                                                }
@@ -770,7 +770,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                        
                                                label_sum_left = rowSums 
(label_counts_left);
                                                label_dist_left = 
label_counts_left / label_sum_left;
-                                               label_dist_left = replace 
(target = label_dist_left, pattern = "NaN", replacement = 1);
+                                               label_dist_left = replace 
(target = label_dist_left, pattern = NaN, replacement = 1);
                                                if (imp == "entropy") {
                                                        label_dist_left = 
replace (target = label_dist_left, pattern = 0, replacement = 1);
                                                        log_label_dist_left = 
log (label_dist_left); # / log(2)
@@ -782,7 +782,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                                label_counts_right = - 
label_counts_left + label_counts_overall;
                                                label_sum_right = rowSums 
(label_counts_right);
                                                label_dist_right = 
label_counts_right / label_sum_right;
-                                               label_dist_right = replace 
(target = label_dist_right, pattern = "NaN", replacement = 1);
+                                               label_dist_right = replace 
(target = label_dist_right, pattern = NaN, replacement = 1);
                                                if (imp == "entropy") {
                                                        label_dist_right = 
replace (target = label_dist_right, pattern = 0, replacement = 1);
                                                        log_label_dist_right = 
log (label_dist_right); # / log (2)

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/random-forest.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/random-forest.dml 
b/scripts/algorithms/random-forest.dml
index 3cdb034..90908d8 100644
--- a/scripts/algorithms/random-forest.dml
+++ b/scripts/algorithms/random-forest.dml
@@ -334,7 +334,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                        
                        I_gain_scale = cur_impurity - ( ( label_sum_left / 
label_sum_overall ) * impurity_left_scale + ( label_sum_right / 
label_sum_overall ) * impurity_right_scale); 
                
-                       I_gain_scale = replace (target = I_gain_scale, pattern 
= "NaN", replacement = 0);       
+                       I_gain_scale = replace (target = I_gain_scale, pattern 
= NaN, replacement = 0); 
                        
                        # determine best feature to split on and the split value
                        feature_start_ind = matrix (0, rows = 1, cols = 
num_scale_features);
@@ -406,7 +406,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                        label_dist = replace (target = 
label_dist, pattern = 0, replacement = 1);
                                        log_label_dist = log (label_dist); # / 
log(2)
                                        impurity = - rowSums (label_dist * 
log_label_dist); 
-                                       impurity = replace (target = impurity, 
pattern = "NaN", replacement = 1/0); 
+                                       impurity = replace (target = impurity, 
pattern = NaN, replacement = 1/0); 
                                } else { # imp == "Gini"
                                        impurity = rowSums (label_dist * (1 - 
label_dist));                             
                                }
@@ -423,7 +423,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                        
                                label_sum_left = rowSums (label_counts_left);
                                label_dist_left = label_counts_left / 
label_sum_left;
-                               label_dist_left = replace (target = 
label_dist_left, pattern = "NaN", replacement = 1);
+                               label_dist_left = replace (target = 
label_dist_left, pattern = NaN, replacement = 1);
                                if (imp == "entropy") {
                                        label_dist_left = replace (target = 
label_dist_left, pattern = 0, replacement = 1);
                                        log_label_dist_left = log 
(label_dist_left); # / log(2)
@@ -435,7 +435,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                label_counts_right = - label_counts_left + 
label_counts_overall;
                                label_sum_right = rowSums (label_counts_right);
                                label_dist_right = label_counts_right / 
label_sum_right;
-                               label_dist_right = replace (target = 
label_dist_right, pattern = "NaN", replacement = 1);
+                               label_dist_right = replace (target = 
label_dist_right, pattern = NaN, replacement = 1);
                                if (imp == "entropy") {
                                        label_dist_right = replace (target = 
label_dist_right, pattern = 0, replacement = 1);
                                        log_label_dist_right = log 
(label_dist_right); # / log (2)
@@ -812,7 +812,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                        }
                                        I_gain_scale = cur_impurity - ( ( 
label_sum_left / label_sum_overall ) * impurity_left_scale + ( label_sum_right 
/ label_sum_overall ) * impurity_right_scale); 
                        
-                                       I_gain_scale = replace (target = 
I_gain_scale, pattern = "NaN", replacement = 0);               
+                                       I_gain_scale = replace (target = 
I_gain_scale, pattern = NaN, replacement = 0);         
                        
                                        # determine best feature to split on 
and the split value
                                        feature_start_ind = matrix (0, rows = 
1, cols = num_scale_features);
@@ -884,7 +884,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                                        label_dist = replace 
(target = label_dist, pattern = 0, replacement = 1);
                                                        log_label_dist = log 
(label_dist); # / log(2)
                                                        impurity = - rowSums 
(label_dist * log_label_dist); 
-                                                       impurity = replace 
(target = impurity, pattern = "NaN", replacement = 1/0); 
+                                                       impurity = replace 
(target = impurity, pattern = NaN, replacement = 1/0); 
                                                } else { # imp == "Gini"
                                                        impurity = rowSums 
(label_dist * (1 - label_dist));                             
                                                }
@@ -901,7 +901,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                        
                                                label_sum_left = rowSums 
(label_counts_left);
                                                label_dist_left = 
label_counts_left / label_sum_left;
-                                               label_dist_left = replace 
(target = label_dist_left, pattern = "NaN", replacement = 1);
+                                               label_dist_left = replace 
(target = label_dist_left, pattern = NaN, replacement = 1);
                                                if (imp == "entropy") {
                                                        label_dist_left = 
replace (target = label_dist_left, pattern = 0, replacement = 1);
                                                        log_label_dist_left = 
log (label_dist_left); # / log(2)
@@ -913,7 +913,7 @@ while ((num_cur_nodes_large + num_cur_nodes_small) > 0 & 
level < depth) {
                                                label_counts_right = - 
label_counts_left + label_counts_overall;
                                                label_sum_right = rowSums 
(label_counts_right);
                                                label_dist_right = 
label_counts_right / label_sum_right;
-                                               label_dist_right = replace 
(target = label_dist_right, pattern = "NaN", replacement = 1);
+                                               label_dist_right = replace 
(target = label_dist_right, pattern = NaN, replacement = 1);
                                                if (imp == "entropy") {
                                                        label_dist_right = 
replace (target = label_dist_right, pattern = 0, replacement = 1);
                                                        log_label_dist_right = 
log (label_dist_right); # / log (2)

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/algorithms/stratstats.dml
----------------------------------------------------------------------
diff --git a/scripts/algorithms/stratstats.dml 
b/scripts/algorithms/stratstats.dml
index 3745fab..1757ec5 100644
--- a/scripts/algorithms/stratstats.dml
+++ b/scripts/algorithms/stratstats.dml
@@ -138,8 +138,8 @@ num_attrs_XY = num_attrs_X * num_attrs_Y;
 
 print ("Preparing the covariates...");
 
-XnoNaNs = replace (target = XwithNaNs, pattern = 0.0/0.0, replacement = 0);
-YnoNaNs = replace (target = YwithNaNs, pattern = 0.0/0.0, replacement = 0);
+XnoNaNs = replace (target = XwithNaNs, pattern = NaN, replacement = 0);
+YnoNaNs = replace (target = YwithNaNs, pattern = NaN, replacement = 0);
 XNaNmask = (XwithNaNs == XwithNaNs);
 YNaNmask = (YwithNaNs == YwithNaNs);
 one_to_num_attrs_X = seq (1, num_attrs_X, 1);
@@ -160,7 +160,7 @@ Y_mask = YNaNmask %*% ProjY;
 
 print ("Preparing the strata...");
 
-SnoNaNs = replace (target = SwithNaNs, pattern = 0.0/0.0, replacement = 0);
+SnoNaNs = replace (target = SwithNaNs, pattern = NaN, replacement = 0);
 S = round (SnoNaNs) * (SnoNaNs > 0);
 Proj_good_stratumID = diag (S > 0);
 Proj_good_stratumID = removeEmpty (target = Proj_good_stratumID, margin = 
"rows");
@@ -391,6 +391,6 @@ sqrt_failsafe = function (Matrix[double] input_A) return 
(Matrix[double] output_
     mask_A = (input_A >= 0);
     prep_A = input_A * mask_A;
     mask_A = mask_A * (prep_A == prep_A);
-    prep_A = replace (target = prep_A, pattern = 0.0/0.0, replacement = 0);
+    prep_A = replace (target = prep_A, pattern = NaN, replacement = 0);
     output_A = sqrt (prep_A) / mask_A;
 }

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/scripts/staging/knn.dml
----------------------------------------------------------------------
diff --git a/scripts/staging/knn.dml b/scripts/staging/knn.dml
index 42ac767..bb638fe 100644
--- a/scripts/staging/knn.dml
+++ b/scripts/staging/knn.dml
@@ -559,7 +559,7 @@ return(
     }
     d_max_err_value = ( max( in_m_data_target ) - min( in_m_data_target ) ) * 
100;
     b_continue_main_loop = TRUE;    #level 1 while loop flag
-    d_min_LOOCV = 1.0/0.0;
+    d_min_LOOCV = INF;
     while( b_continue_main_loop ){
         m_feature_selected_flag = m_main_selected_flag;
         m_this_model_selected_flag = TRUE;

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/applications/glm/GLM.pydml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/glm/GLM.pydml 
b/src/test/scripts/applications/glm/GLM.pydml
index 1875f33..ce7e02a 100644
--- a/src/test/scripts/applications/glm/GLM.pydml
+++ b/src/test/scripts/applications/glm/GLM.pydml
@@ -160,15 +160,15 @@ eps = eps + 0.0
 # Default values for output statistics:
 
 termination_code     = 0
-min_beta             = 0.0 / 0.0
-i_min_beta           = 0.0 / 0.0
-max_beta             = 0.0 / 0.0
-i_max_beta           = 0.0 / 0.0
-intercept_value      = 0.0 / 0.0
-dispersion           = 0.0 / 0.0
-estimated_dispersion = 0.0 / 0.0
-deviance_nodisp      = 0.0 / 0.0
-deviance             = 0.0 / 0.0
+min_beta             = NaN
+i_min_beta           = NaN
+max_beta             = NaN
+i_max_beta           = NaN
+intercept_value      = NaN
+dispersion           = NaN
+estimated_dispersion = NaN
+deviance_nodisp      = NaN
+deviance             = NaN
 
 print("BEGIN GLM SCRIPT")
 print("Reading X...")
@@ -453,7 +453,7 @@ if (is_supported == 1):
         [g_Y, w] = glm_dist (all_linear_terms, Y, distribution_type, 
variance_as_power_of_the_mean, link_type, link_as_power_of_the_mean)
         
         pearson_residual_sq = g_Y ** 2 / w
-        pearson_residual_sq = replace (target = pearson_residual_sq, pattern = 
0.0/0.0, replacement = 0)
+        pearson_residual_sq = replace (target = pearson_residual_sq, pattern = 
NaN, replacement = 0)
         # pearson_residual_sq = (y_residual ** 2) / y_var
         
         if (num_records > num_features):
@@ -729,7 +729,7 @@ def glm_initialize(X: matrix[float], Y: matrix[float], 
dist_type: int, var_power
                                 linear_terms = log (- log (1.0 - y_corr)) - 
log (- log (0.5)) * (is_zero_y_corr + is_one_y_corr) + is_one_y_corr / (1.0 - 
is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
                             else:
                                 if (link_type == 5): # Binomial.cauchit
-                                    linear_terms = tan ((y_corr - 0.5) * 
3.1415926535897932384626433832795) + is_one_y_corr / (1.0 - is_one_y_corr) - 
is_zero_y_corr / (1.0 - is_zero_y_corr)
+                                    linear_terms = tan ((y_corr - 0.5) * PI) + 
is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / (1.0 - is_zero_y_corr)
                                 
                             
                         
@@ -852,11 +852,11 @@ def glm_dist(linear_terms: matrix[float], Y: 
matrix[float], dist_type: int, var_
                 w   =  rowSums (Y) * vec1 / link_power ** 2
             
         else:
-            is_LT_pos_infinite = (linear_terms == (1.0/0.0))
-            is_LT_neg_infinite = (linear_terms == (-1.0/0.0))
+            is_LT_pos_infinite = (linear_terms == INF)
+            is_LT_neg_infinite = (linear_terms == -INF)
             is_LT_infinite = dot(is_LT_pos_infinite, one_zero) + 
dot(is_LT_neg_infinite, zero_one)
-            finite_linear_terms = replace (target =        linear_terms, 
pattern =  1.0/0.0, replacement = 0)
-            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -1.0/0.0, replacement = 0)
+            finite_linear_terms = replace (target =        linear_terms, 
pattern =  INF, replacement = 0)
+            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -INF, replacement = 0)
             if (link_type == 2): # Binomial.logit
                 Y_prob = dot(exp (finite_linear_terms), one_zero) + 
dot(ones_r, zero_one)
                 Y_prob = Y_prob / (dot(rowSums (Y_prob), ones_2))
@@ -887,11 +887,11 @@ def glm_dist(linear_terms: matrix[float], Y: 
matrix[float], dist_type: int, var_
                         w   =  the_exp_exp * the_exp * rowSums (Y) / 
the_exp_ratio
                     else:
                         if (link_type == 5): # Binomial.cauchit
-                            Y_prob = 0.5 + (dot(atan (finite_linear_terms), 
p_one_m_one)) / 3.1415926535897932384626433832795
+                            Y_prob = 0.5 + (dot(atan (finite_linear_terms), 
p_one_m_one)) / PI
                             Y_prob = Y_prob * (dot((1.0 - rowSums 
(is_LT_infinite)), ones_2)) + is_LT_infinite
                             y_residual = Y [, 0] * Y_prob [, 1] - Y [, 1] * 
Y_prob [, 0]
                             var_function = rowSums (Y) * Y_prob [, 0] * Y_prob 
[, 1]
-                            link_gradient_normalized = (1 + linear_terms ** 2) 
* 3.1415926535897932384626433832795
+                            link_gradient_normalized = (1 + linear_terms ** 2) 
* PI
                             g_Y =  rowSums (Y) * y_residual / (var_function * 
link_gradient_normalized)
                             w   = (rowSums (Y) ** 2) / (var_function * 
link_gradient_normalized ** 2)
                         
@@ -915,8 +915,8 @@ def glm_log_likelihood_part(linear_terms: matrix[float], Y: 
matrix[float],
         is_natural_parameter_log_zero = zeros_r
         if (var_power == 1.0 & link_power == 0.0): # Poisson.log
             b_cumulant = exp (linear_terms)
-            is_natural_parameter_log_zero = (linear_terms == (-1.0/0.0))
-            natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0)
+            is_natural_parameter_log_zero = (linear_terms == -INF)
+            natural_parameters = replace (target = linear_terms, pattern = 
-INF, replacement = 0)
         else:
             if (var_power == 1.0 & link_power == 1.0): # Poisson.id
                 if (sum (linear_terms < 0.0) == 0):
@@ -1040,12 +1040,12 @@ def glm_log_likelihood_part(linear_terms: 
matrix[float], Y: matrix[float],
             
         
         if (sum (is_natural_parameter_log_zero * abs (Y)) > 0.0):
-            log_l = -1.0 / 0.0
+            log_l = -INF
             isNaN = 1
         if (isNaN == 0):
             log_l = sum (Y * natural_parameters - b_cumulant)
             if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 
2.0)):
-                log_l = -1.0 / 0.0
+                log_l = -INF
                 isNaN = 1
     # end if (dist_type == 1 & link_type == 1): # POWER DISTRIBUTION
     
@@ -1059,11 +1059,11 @@ def glm_log_likelihood_part(linear_terms: 
matrix[float], Y: matrix[float],
                 if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 
2.0)):
                     isNaN = 1
             else:
-                log_l = -1.0 / 0.0
+                log_l = -INF
                 isNaN = 1
     
     if (isNaN == 1):
-        log_l = - 1.0 / 0.0
+        log_l = - INF
 
 
 def binomial_probability_two_column(linear_terms: matrix[float], link_type: 
int, link_power: float) -> (Y_prob: matrix[float], isNaN: int):
@@ -1100,11 +1100,11 @@ def binomial_probability_two_column(linear_terms: 
matrix[float], link_type: int,
                 else:
                     isNaN = 1
     else: # Binomial.non_power
-        is_LT_pos_infinite = (linear_terms == (1.0/0.0))
-        is_LT_neg_infinite = (linear_terms == (-1.0/0.0))
+        is_LT_pos_infinite = (linear_terms == INF)
+        is_LT_neg_infinite = (linear_terms == -INF)
         is_LT_infinite = dot(is_LT_pos_infinite, one_zero) + 
dot(is_LT_neg_infinite, zero_one)
-        finite_linear_terms = replace (target =        linear_terms, pattern = 
 1.0/0.0, replacement = 0)
-        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-1.0/0.0, replacement = 0)
+        finite_linear_terms = replace (target =        linear_terms, pattern = 
 INF, replacement = 0)
+        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-INF, replacement = 0)
         if (link_type == 2): # Binomial.logit
             Y_prob = dot(exp (finite_linear_terms), one_zero) + dot(ones_r, 
zero_one)
             Y_prob = Y_prob / (dot(rowSums (Y_prob), ones_2))
@@ -1128,7 +1128,7 @@ def binomial_probability_two_column(linear_terms: 
matrix[float], link_type: int,
                     Y_prob [, 1] = the_exp_exp
                 else:
                     if (link_type == 5): # Binomial.cauchit
-                        Y_prob = 0.5 + (dot(atan (finite_linear_terms), 
p_one_m_one)) / 3.1415926535897932384626433832795
+                        Y_prob = 0.5 + (dot(atan (finite_linear_terms), 
p_one_m_one)) / PI
                     else:
                         isNaN = 1
         Y_prob = Y_prob * (dot((1.0 - rowSums (is_LT_infinite)), ones_2)) + 
is_LT_infinite
@@ -1254,7 +1254,7 @@ def straightenX(X: matrix[float], eps: float, 
max_iter_CG: int) -> (w: matrix[fl
 def round_to_print(x_to_truncate: float) -> (mantissa: float, eee: int):
     mantissa = 1.0
     eee = 0
-    positive_infinity = 1.0 / 0.0
+    positive_infinity = INF
     x = abs (x_to_truncate)
     if (x != x / 2.0):
         log_ten = log (10.0)

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/applications/impute/imputeGaussMCMC.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/imputeGaussMCMC.dml 
b/src/test/scripts/applications/impute/imputeGaussMCMC.dml
index 6575d3b..edd1df2 100644
--- a/src/test/scripts/applications/impute/imputeGaussMCMC.dml
+++ b/src/test/scripts/applications/impute/imputeGaussMCMC.dml
@@ -386,7 +386,7 @@ while (is_enough_gradient_descent == 0)
         if (acos_argument >= 0.0) {
             coeff_theta = acos_x;
         } else {
-            coeff_theta = 3.1415926535897932384626433832795 - acos_x;
+            coeff_theta = PI - acos_x;
         }
         
         root_1 = - coeff_aa / 3.0 - 2.0 * sqrt (coeff_Q) * cos (coeff_theta / 
3.0);
@@ -484,7 +484,7 @@ for (swap_i in 1:num_swaps) {
     }
 }
 
-pi = 3.1415926535897932384626433832795;
+pi = PI;
 zero = matrix (0.0, rows = 1, cols = 1);
 
 isVar = colSums (SampleOrder [1 : num_frees, ]);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
----------------------------------------------------------------------
diff --git 
a/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml 
b/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
index 897fc21..483b37f 100644
--- a/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
+++ b/src/test/scripts/applications/impute/imputeGaussMCMC.nogradient.dml
@@ -247,7 +247,7 @@ for (swap_i in 1:num_swaps) {
     }
 }
 
-pi = 3.1415926535897932384626433832795;
+pi = PI;
 zero = matrix (0.0, rows = 1, cols = 1);
 
 isVar = colSums (SampleOrder [1 : num_frees, ]);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml 
b/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
index 7f9a875..58059b9 100644
--- a/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
+++ b/src/test/scripts/applications/impute/old/imputeGaussMCMC.dml
@@ -242,7 +242,7 @@ sum_of_observed_reports = matrix (0.0, rows = num_attrs, 
cols = num_terms);
 sum_of_observed_params = matrix (0.0, rows = num_params, cols = 1);
 
 num_of_observed_reports = 0;
-pi = 3.1415926535897932384626433832795;
+pi = PI;
 zero = matrix (0.0, rows = 1, cols = 1);
 
 # Starting MCMC iterations

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/applications/impute/tmp.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/applications/impute/tmp.dml 
b/src/test/scripts/applications/impute/tmp.dml
index 605ab3e..2e7acbe 100644
--- a/src/test/scripts/applications/impute/tmp.dml
+++ b/src/test/scripts/applications/impute/tmp.dml
@@ -22,7 +22,7 @@
 setwd ("test/scripts/applications/glm");
 source ("Misc.dml");
 
-blahblah = 0.0 / 0.0; # -0.00099999999;
+blahblah = NaN; # -0.00099999999;
 print (blahblah);
 x = matrix (0.0, rows = 55, cols = 1);
 x [55, 1] = blahblah;
@@ -73,7 +73,7 @@ coeff_d =  3.14 * (2 * (-3) * (-1.7));
         if (acos_argument >= 0.0) {
             coeff_theta = acos_x;
         } else {
-            coeff_theta = 3.1415926535897932384626433832795 - acos_x;
+            coeff_theta = PI - acos_x;
         }
         
         root_1 = - coeff_aa / 3.0 - 2.0 * sqrt (coeff_Q) * cos (coeff_theta / 
3.0);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/functions/codegenalg/Algorithm_GLM.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/codegenalg/Algorithm_GLM.R 
b/src/test/scripts/functions/codegenalg/Algorithm_GLM.R
index a1fd302..8c14960 100644
--- a/src/test/scripts/functions/codegenalg/Algorithm_GLM.R
+++ b/src/test/scripts/functions/codegenalg/Algorithm_GLM.R
@@ -151,7 +151,7 @@ glm_initialize <- function (X, Y, dist_type, var_power, 
link_type, link_power, i
                     - log (- log (0.5)) * (is_zero_y_corr + is_one_y_corr)
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / 
(1.0 - is_zero_y_corr);
             } else { if (link_type == 5)                  { # Binomial.cauchit
-                linear_terms = tan ((y_corr - 0.5) * 
3.1415926535897932384626433832795)
+                linear_terms = tan ((y_corr - 0.5) * PI)
                     + is_one_y_corr / (1.0 - is_one_y_corr) - is_zero_y_corr / 
(1.0 - is_zero_y_corr);
         }}  }}}}}
     }
@@ -259,11 +259,11 @@ glm_dist <- function (linear_terms, Y,
                 w   =  rowSums (Y) * vec1 / link_power ^ 2;
             }
         } else {
-            is_LT_pos_infinite = (linear_terms ==  1.0/0.0);
-            is_LT_neg_infinite = (linear_terms == -1.0/0.0);
+            is_LT_pos_infinite = (linear_terms ==  INF);
+            is_LT_neg_infinite = (linear_terms == -INF);
             is_LT_infinite = is_LT_pos_infinite %*% one_zero + 
is_LT_neg_infinite %*% zero_one;
-            finite_linear_terms = replace (target =        linear_terms, 
pattern =  1.0/0.0, replacement = 0);
-            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -1.0/0.0, replacement = 0);
+            finite_linear_terms = replace (target =        linear_terms, 
pattern =  INF, replacement = 0);
+            finite_linear_terms = replace (target = finite_linear_terms, 
pattern = -INF, replacement = 0);
             if (link_type == 2)                           { # Binomial.logit
                 Y_prob = exp (finite_linear_terms) %*% one_zero + ones_r %*% 
zero_one;
                 Y_prob = Y_prob / (rowSums (Y_prob) %*% ones_2);
@@ -291,11 +291,11 @@ glm_dist <- function (linear_terms, Y,
                 g_Y =  (rowSums (Y) * the_exp_exp - Y [, 2]) / the_exp_ratio;
                 w   =  the_exp_exp * the_exp * rowSums (Y) / the_exp_ratio;
             } else { if (link_type == 5)                  { # Binomial.cauchit
-                Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
3.1415926535897932384626433832795;
+                Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
PI;
                 Y_prob = Y_prob * ((1.0 - rowSums (is_LT_infinite)) %*% 
ones_2) + is_LT_infinite;
                 y_residual = Y [, 1] * Y_prob [, 2] - Y [, 2] * Y_prob [, 1];
                 var_function = rowSums (Y) * Y_prob [, 1] * Y_prob [, 2];
-                link_gradient_normalized = (1 + linear_terms ^ 2) * 
3.1415926535897932384626433832795;
+                link_gradient_normalized = (1 + linear_terms ^ 2) * PI;
                 g_Y =  rowSums (Y) * y_residual / (var_function * 
link_gradient_normalized);
                 w   = (rowSums (Y) ^ 2) / (var_function * 
link_gradient_normalized ^ 2);
             }}}}   
@@ -321,8 +321,8 @@ glm_log_likelihood_part <- function (linear_terms, Y,
         is_natural_parameter_log_zero = zeros_r;
         if          (var_power == 1.0 & link_power == 0.0)  { # Poisson.log
             b_cumulant = exp (linear_terms);
-            is_natural_parameter_log_zero = (linear_terms == (-1.0/0.0));
-            natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+            is_natural_parameter_log_zero = (linear_terms == (-INF));
+            natural_parameters = replace (target = linear_terms, pattern = 
-INF, replacement = 0);
         } else { if (var_power == 1.0 & link_power == 1.0)  { # Poisson.id
             if (sum ((linear_terms < 0.0)) == 0)  {
                 b_cumulant = linear_terms;
@@ -398,14 +398,14 @@ glm_log_likelihood_part <- function (linear_terms, Y,
             }}}}
         }}}}} }}}}}
         if (sum (is_natural_parameter_log_zero * abs (Y)) > 0.0) {
-            log_l = -1.0 / 0.0;
+            log_l = -INF;
             isNaN = 1;
         }
         if (isNaN == 0)
         {
             log_l = sum (Y * natural_parameters - b_cumulant);
             if (log_l != log_l | (log_l == log_l + 1.0 & log_l == log_l * 
2.0)) {
-                log_l = -1.0 / 0.0;
+                log_l = -INF;
                 isNaN = 1;
     }   }   }
     
@@ -424,12 +424,12 @@ glm_log_likelihood_part <- function (linear_terms, Y,
                     isNaN = 1;
                 }
             } else {
-                log_l = -1.0 / 0.0;
+                log_l = -INF;
                 isNaN = 1;
     }   }   }
     
     if (isNaN == 1) {
-        log_l = - 1.0 / 0.0; 
+        log_l = - INF; 
     }
 }
 
@@ -469,11 +469,11 @@ binomial_probability_two_column <- function 
(linear_terms, link_type, link_power
             } else {isNaN = 1;}
         }}
     } else {              # Binomial.non_power
-        is_LT_pos_infinite = (linear_terms ==  (1.0/0.0));
-        is_LT_neg_infinite = (linear_terms == (-1.0/0.0));
+        is_LT_pos_infinite = (linear_terms ==  INF);
+        is_LT_neg_infinite = (linear_terms == -INF);
         is_LT_infinite = is_LT_pos_infinite %*% one_zero + is_LT_neg_infinite 
%*% zero_one;
-        finite_linear_terms = replace (target =        linear_terms, pattern = 
 1.0/0.0, replacement = 0);
-        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+        finite_linear_terms = replace (target =        linear_terms, pattern = 
 INF, replacement = 0);
+        finite_linear_terms = replace (target = finite_linear_terms, pattern = 
-INF, replacement = 0);
         if (link_type == 2)             { # Binomial.logit
             Y_prob = exp (finite_linear_terms) %*% one_zero + ones_r %*% 
zero_one;
             Y_prob = Y_prob / (rowSums (Y_prob) %*% ones_2);
@@ -494,7 +494,7 @@ binomial_probability_two_column <- function (linear_terms, 
link_type, link_power
             Y_prob [, 1] = (1 - is_too_small) * (1 - the_exp_exp) + 
is_too_small * the_exp * (1 - the_exp / 2);
             Y_prob [, 2] = the_exp_exp;
         } else { if (link_type == 5)    { # Binomial.cauchit
-            Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / 
3.1415926535897932384626433832795;
+            Y_prob = 0.5 + (atan (finite_linear_terms) %*% p_one_m_one) / PI;
         } else {
             isNaN = 1;
         }}}}
@@ -651,7 +651,7 @@ round_to_print <- function (x_to_truncate)
 {
     mantissa = 1.0;
     eee = 0;
-    positive_infinity = 1.0 / 0.0;
+    positive_infinity = INF;
     x = abs (x_to_truncate);
     if (x != x / 2.0) {
         log_ten = log (10.0);
@@ -721,15 +721,15 @@ eps = as.double (eps);
 # Default values for output statistics:
 
 termination_code     = 0;
-min_beta             = 0.0 / 0.0;
-i_min_beta           = 0.0 / 0.0;
-max_beta             = 0.0 / 0.0;
-i_max_beta           = 0.0 / 0.0;
-intercept_value      = 0.0 / 0.0;
-dispersion           = 0.0 / 0.0;
-estimated_dispersion = 0.0 / 0.0;
-deviance_nodisp      = 0.0 / 0.0;
-deviance             = 0.0 / 0.0;
+min_beta             = NaN;
+i_min_beta           = NaN;
+max_beta             = NaN;
+i_max_beta           = NaN;
+intercept_value      = NaN;
+dispersion           = NaN;
+estimated_dispersion = NaN;
+deviance_nodisp      = NaN;
+deviance             = NaN;
 
 print("BEGIN GLM SCRIPT");
 
@@ -1050,7 +1050,7 @@ g_Y = tmp3[1]
 w = tmp3[2];    
     
 pearson_residual_sq = g_Y ^ 2 / w;
-pearson_residual_sq = replace (target = pearson_residual_sq, pattern = 
0.0/0.0, replacement = 0);
+pearson_residual_sq = replace (target = pearson_residual_sq, pattern = NaN, 
replacement = 0);
 # pearson_residual_sq = (y_residual ^ 2) / y_var;
 
 if (num_records > num_features) {

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R 
b/src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R
index 83d1f1f..81ed544 100644
--- a/src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R
+++ b/src/test/scripts/functions/codegenalg/Algorithm_LinregCG.R
@@ -129,8 +129,8 @@ if (n > m_ext) {
     dispersion  = ss_res / (n - m_ext);
     adjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));
 } else {
-    dispersion  = 0.0 / 0.0;
-    adjusted_R2 = 0.0 / 0.0;
+    dispersion  = NaN;
+    adjusted_R2 = NaN;
 }
 
 plain_R2_nobias = 1 - ss_avg_res / ss_avg_tot;
@@ -139,15 +139,15 @@ if (deg_freedom > 0) {
     var_res = ss_avg_res / deg_freedom;
     adjusted_R2_nobias = 1 - var_res / (ss_avg_tot / (n - 1));
 } else {
-    var_res = 0.0 / 0.0;
-    adjusted_R2_nobias = 0.0 / 0.0;
+    var_res = NaN;
+    adjusted_R2_nobias = NaN;
 }
 
 plain_R2_vs_0 = 1 - ss_res / ss_tot;
 if (n > m) {
     adjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);
 } else {
-    adjusted_R2_vs_0 = 0.0 / 0.0;
+    adjusted_R2_vs_0 = NaN;
 }
 
 if (intercept_status == 2) {

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/jmlc/reuse-glm-predict.dml 
b/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
index f50d399..468464a 100644
--- a/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
+++ b/src/test/scripts/functions/jmlc/reuse-glm-predict.dml
@@ -88,20 +88,20 @@ if (fileY != " ")
     
     # Statistics To Compute:
     
-    Z_logl               = 0.0 / 0.0;
-    Z_logl_pValue        = 0.0 / 0.0;
-    X2_pearson           = 0.0 / 0.0;
+    Z_logl               = NaN;
+    Z_logl_pValue        = NaN;
+    X2_pearson           = NaN;
     df_pearson           = -1;
-    G2_deviance          = 0.0 / 0.0;
+    G2_deviance          = NaN;
     df_deviance          = -1;
-    X2_pearson_pValue    = 0.0 / 0.0;
-    G2_deviance_pValue   = 0.0 / 0.0;
-    Z_logl_scaled        = 0.0 / 0.0;
-    Z_logl_scaled_pValue = 0.0 / 0.0;
-    X2_scaled            = 0.0 / 0.0;
-    X2_scaled_pValue     = 0.0 / 0.0;
-    G2_scaled            = 0.0 / 0.0;
-    G2_scaled_pValue     = 0.0 / 0.0;
+    X2_pearson_pValue    = NaN;
+    G2_deviance_pValue   = NaN;
+    Z_logl_scaled        = NaN;
+    Z_logl_scaled_pValue = NaN;
+    X2_scaled            = NaN;
+    X2_scaled_pValue     = NaN;
+    G2_scaled            = NaN;
+    G2_scaled_pValue     = NaN;
     
     # set Y_counts to avoid 'Initialization of Y_counts depends on if-else 
execution' warning
     Y_counts = matrix(0.0, rows=1, cols=1);
@@ -330,8 +330,8 @@ glm_means_and_vars =
             y_prob [, 1] = (1 - is_too_small) * (1.0 - y_prob [, 2]) + 
is_too_small * elt * (1.0 - elt / 2);
         } else { if (link_type == 5)                      { # Binomial.cauchit
             atan_linear_terms = atan (linear_terms);
-            y_prob [, 1] = 0.5 + atan_linear_terms / 
3.1415926535897932384626433832795;
-            y_prob [, 2] = 0.5 - atan_linear_terms / 
3.1415926535897932384626433832795;
+            y_prob [, 1] = 0.5 + atan_linear_terms / PI;
+            y_prob [, 2] = 0.5 - atan_linear_terms / PI;
         }}}}}}
         means = y_prob;
         ones_ctg = matrix (1, rows = 2, cols = 1);
@@ -356,8 +356,8 @@ glm_partial_loglikelihood_for_power_dist_and_link =   # 
Assumes: dist_type == 1
     num_records = nrow (Y);
     if (var_power == 1.0) { # Poisson
         if (link_power == 0.0)  { # Poisson.log
-            is_natural_parameter_log_zero = (linear_terms == (-1.0/0.0));
-            natural_parameters = replace (target = linear_terms, pattern = 
-1.0/0.0, replacement = 0);
+            is_natural_parameter_log_zero = (linear_terms == -INF);
+            natural_parameters = replace (target = linear_terms, pattern = 
-INF, replacement = 0);
             b_cumulant = exp (linear_terms);
         } else {                  # Poisson.power_nonlog
             is_natural_parameter_log_zero = (linear_terms == 0.0);

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.R
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.R 
b/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.R
index 2fdf920..128002d 100644
--- a/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.R
+++ b/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.R
@@ -28,8 +28,8 @@ n = as.integer(args[1]);
 X = matrix(0, n, 0);
 R = rbind(rbind(rbind(rbind(
   as.matrix(sum(X)==0),
-  as.matrix(min(X)==(1.0/0.0))),
-  as.matrix(max(X)==(-1.0/0.0))),
+  as.matrix(min(X)==INF)),
+  as.matrix(max(X)==-INF)),
   as.matrix(is.nan(mean(X)))),
   as.matrix(is.na(sd(X))));
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/0aea2b53/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.dml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.dml 
b/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.dml
index f2975f0..7bf8cba 100644
--- a/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.dml
+++ b/src/test/scripts/functions/misc/ZeroMatrix_Aggregates.dml
@@ -23,10 +23,10 @@
 X = matrix(0, $1, 0);
 # nary rbind not applicable because not supported in MR
 R = rbind(rbind(rbind(rbind(
-  as.matrix(sum(X)==0),           # 0
-  as.matrix(min(X)==(1.0/0.0))),  # INF
-  as.matrix(max(X)==(-1.0/0.0))), # -INF
-  as.matrix(mean(X)!=mean(X))),   # NaN
-  as.matrix(sd(X)!=sd(X)));       # NaN
+  as.matrix(sum(X)==0),         # 0
+  as.matrix(min(X)==INF)),      # INF
+  as.matrix(max(X)==-INF)),     # -INF
+  as.matrix(mean(X)!=mean(X))), # NaN
+  as.matrix(sd(X)!=sd(X)));     # NaN
 
 write(R, $3);

Reply via email to