[SYSTEMML-1925] Fix consistency SystemML configuration properties

This patch creates consistency in terms of a common prefix for all
configuration properties. Before we used the prefixes dml, systemml, or
none, which is now consolidated to sysml.

Project: http://git-wip-us.apache.org/repos/asf/systemml/repo
Commit: http://git-wip-us.apache.org/repos/asf/systemml/commit/d6139d14
Tree: http://git-wip-us.apache.org/repos/asf/systemml/tree/d6139d14
Diff: http://git-wip-us.apache.org/repos/asf/systemml/diff/d6139d14

Branch: refs/heads/master
Commit: d6139d1470d72b2c21a82ddb7e1898cf6582ed6c
Parents: 0bebfd8
Author: Matthias Boehm <[email protected]>
Authored: Wed Sep 20 22:17:50 2017 -0700
Committer: Matthias Boehm <[email protected]>
Committed: Thu Sep 21 11:54:09 2017 -0700

----------------------------------------------------------------------
 conf/SystemML-config.xml.template               | 54 ++++++++++----------
 docs/beginners-guide-caffe2dml.md               |  8 +--
 docs/standalone-guide.md                        | 24 ++++-----
 .../apache/sysml/api/mlcontext/MLContext.java   |  2 +-
 .../java/org/apache/sysml/conf/DMLConfig.java   | 53 ++++++++++---------
 .../controlprogram/ParForProgramBlock.java      | 21 --------
 src/main/python/systemml/mlcontext.py           |  2 +-
 src/main/python/systemml/mllearn/estimators.py  |  2 +-
 src/main/standalone/SystemML-config.xml         | 24 ++++-----
 src/test/config/SystemML-config.xml             | 24 ++++-----
 .../functions/codegen/APICodegenTest.java       |  3 +-
 .../functions/mlcontext/GNMFTest.java           |  3 +-
 .../SystemML-config-codegen-compress.xml        | 46 ++---------------
 .../codegen/SystemML-config-codegen.xml         | 44 ++--------------
 .../codegen/SystemML-config-codegen6.xml        | 44 ++--------------
 .../compress/SystemML-config-compress.xml       | 39 +-------------
 .../functions/dmlscript/SystemML-config.xml     | 10 ++--
 .../gdfo/SystemML-config-globalopt.xml          | 38 +-------------
 18 files changed, 120 insertions(+), 321 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/conf/SystemML-config.xml.template
----------------------------------------------------------------------
diff --git a/conf/SystemML-config.xml.template 
b/conf/SystemML-config.xml.template
index ef24e30..6d5bf73 100644
--- a/conf/SystemML-config.xml.template
+++ b/conf/SystemML-config.xml.template
@@ -19,77 +19,77 @@
 
 <root>
    <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
+   <sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>
 
    <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
+   <sysml.scratch>scratch_space</sysml.scratch> 
 
    <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>2</optlevel>  
+   <sysml.optlevel>2</sysml.optlevel>  
 
    <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
+   <sysml.numreducers>10</sysml.numreducers> 
    
    <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
+   <sysml.jvmreuse>false</sysml.jvmreuse> 
 
    <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
+   <sysml.defaultblocksize>1000</sysml.defaultblocksize> 
 
    <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
+   <sysml.yarn.appmaster>false</sysml.yarn.appmaster>
 
    <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
+   <sysml.yarn.appmaster.mem>2048</sysml.yarn.appmaster.mem>
 
    <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
+   <sysml.yarn.mapreduce.mem>2048</sysml.yarn.mapreduce.mem>
 
    <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
+   <sysml.yarn.app.queue>default</sysml.yarn.app.queue>
    
    <!-- enables multi-threaded operations in singlenode control program -->
-   <cp.parallel.ops>true</cp.parallel.ops>
+   <sysml.cp.parallel.ops>true</sysml.cp.parallel.ops>
    
    <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
+   <sysml.cp.parallel.io>true</sysml.cp.parallel.io>
    
    <!-- enables compressed linear algebra, experimental feature -->
-   <compressed.linalg>auto</compressed.linalg>
+   <sysml.compressed.linalg>auto</sysml.compressed.linalg>
    
    <!-- enables operator fusion via code generation, experimental feature -->
-   <codegen.enabled>false</codegen.enabled>
+   <sysml.codegen.enabled>false</sysml.codegen.enabled>
    
    <!-- set the codegen java compiler (auto, janino, javac) -->
-   <codegen.compiler>auto</codegen.compiler>
+   <sysml.codegen.compiler>auto</sysml.codegen.compiler>
    
    <!-- if codegen.enabled, enables source code caching of fused operators -->
-   <codegen.plancache>false</codegen.plancache>
+   <sysml.codegen.plancache>true</sysml.codegen.plancache>
    
    <!-- if codegen.enabled, compile literals as constants: 1..heuristic, 
2..always -->
-   <codegen.literals>1</codegen.literals>
+   <sysml.codegen.literals>1</sysml.codegen.literals>
    
    <!-- enables native blas for matrix multiplication and convolution, 
experimental feature (options: auto, mkl, openblas, none) -->
-   <native.blas>none</native.blas>
+   <sysml.native.blas>none</sysml.native.blas>
 
    <!-- prints finegrained statistics information -->
-   <systemml.stats.finegrained>false</systemml.stats.finegrained>
+   <sysml.stats.finegrained>false</sysml.stats.finegrained>
    
    <!-- prints extra statistics information for GPU -->
-   <systemml.stats.extraGPU>false</systemml.stats.extraGPU>
+   <sysml.stats.extraGPU>false</sysml.stats.extraGPU>
 
    <!-- prints extra statistics information for Deep Neural Networks done in 
CP mode -->
-   <systemml.stats.extraDNN>false</systemml.stats.extraDNN>
+   <sysml.stats.extraDNN>false</sysml.stats.extraDNN>
 
     <!-- sets the GPUs to use per process, -1 for all GPUs, a specific GPU 
number (5), a range (eg: 0-2) or a comma separated list (eg: 0,2,4)-->
-    <systemml.gpu.availableGPUs>-1</systemml.gpu.availableGPUs>
+    <sysml.gpu.availableGPUs>-1</sysml.gpu.availableGPUs>
     
     <!-- whether to synchronize GPUs after every GPU instruction -->
-    <systemml.gpu.sync.postProcess>true</systemml.gpu.sync.postProcess>
+    <sysml.gpu.sync.postProcess>true</sysml.gpu.sync.postProcess>
     
     <!-- whether to perform eager CUDA free on rmvar instruction -->
-    <systemml.gpu.eager.cudaFree>false</systemml.gpu.eager.cudaFree>
-    
-    <!-- maximum wrap length for instruction and miscellaneous timer column of 
statistics -->
-   <systemml.stats.maxWrapLength>30</systemml.stats.maxWrapLength>
+    <sysml.gpu.eager.cudaFree>false</sysml.gpu.eager.cudaFree>
+   
+   <!-- maximum wrap length for instruction and miscellaneous timer column of 
statistics -->
+   <sysml.stats.maxWrapLength>30</sysml.stats.maxWrapLength>
 </root>

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/docs/beginners-guide-caffe2dml.md
----------------------------------------------------------------------
diff --git a/docs/beginners-guide-caffe2dml.md 
b/docs/beginners-guide-caffe2dml.md
index 7671c32..12b21d4 100644
--- a/docs/beginners-guide-caffe2dml.md
+++ b/docs/beginners-guide-caffe2dml.md
@@ -152,8 +152,8 @@ Iter:2000, validation loss:173.66147359346, validation 
accuracy:97.4897540983606
 
 - Print the generated DML script along with classification report:  
`lenet.set(debug=True)`
 - Print the heavy hitters instruction and the execution plan (advanced users): 
`lenet.setStatistics(True).setExplain(True)`
-- (Optional but recommended) Enable [native 
BLAS](http://apache.github.io/systemml/native-backend): 
`lenet.setConfigProperty("native.blas", "auto")`
-- Enable experimental feature such as codegen: 
`lenet.setConfigProperty("codegen.enabled", 
"true").setConfigProperty("codegen.plancache", "true")`
+- (Optional but recommended) Enable [native 
BLAS](http://apache.github.io/systemml/native-backend): 
`lenet.setConfigProperty("sysml.native.blas", "auto")`
+- Enable experimental feature such as codegen: 
`lenet.setConfigProperty("sysml.codegen.enabled", 
"true").setConfigProperty("sysml.codegen.plancache", "true")`
 - Force GPU execution (please make sure the required jcuda dependency are 
included): lenet.setGPU(True).setForceGPU(True)
 
 Unlike Caffe where default train and test algorithm is `minibatch`, you can 
specify the
@@ -242,13 +242,13 @@ If you are using OpenBLAS, please ensure that it was 
built with `USE_OPENMP` fla
 For more detail see http://apache.github.io/systemml/native-backend
 
 ```python
-caffe2dmlObject.setConfigProperty("native.blas", "auto")
+caffe2dmlObject.setConfigProperty("sysml.native.blas", "auto")
 ```
 
 - Turn on the experimental codegen feature. This should help reduce 
unnecessary allocation cost after every binary operation.
 
 ```python
-caffe2dmlObject.setConfigProperty("codegen.enabled", 
"true").setConfigProperty("codegen.plancache", "true")
+caffe2dmlObject.setConfigProperty("sysml.codegen.enabled", 
"true").setConfigProperty("sysml.codegen.plancache", "true")
 ```
 
 - Tuned the [Garbage 
Collector](http://spark.apache.org/docs/latest/tuning.html#garbage-collection-tuning).
 

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/docs/standalone-guide.md
----------------------------------------------------------------------
diff --git a/docs/standalone-guide.md b/docs/standalone-guide.md
index a401c30..7116f25 100644
--- a/docs/standalone-guide.md
+++ b/docs/standalone-guide.md
@@ -324,18 +324,18 @@ The `l2-svm-predict.dml` algorithm is used on our test 
data sample to predict th
 The console output should show the accuracy of the trained model in percent, 
i.e.:
 
     15/09/01 01:32:51 INFO api.DMLScript: BEGIN DML run 09/01/2015 01:32:51
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating localtmpdir with value 
/tmp/systemml
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating scratch with value 
scratch_space
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating optlevel with value 2
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating numreducers with value 10
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating jvmreuse with value false
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating defaultblocksize with 
value 1000
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.appmaster with 
value false
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.appmaster.mem 
with value 2048
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.mapreduce.mem 
with value 2048
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating dml.yarn.app.queue with 
value default
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.ops with value 
true
-    15/09/01 01:32:51 INFO conf.DMLConfig: Updating cp.parallel.io with value 
true
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.localtmpdir with 
value /tmp/systemml
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.scratch with value 
scratch_space
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.optlevel with value 2
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.numreducers with 
value 10
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.jvmreuse with value 
false
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.defaultblocksize 
with value 1000
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.yarn.appmaster with 
value false
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.yarn.appmaster.mem 
with value 2048
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.yarn.mapreduce.mem 
with value 2048
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.yarn.app.queue with 
value default
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.parallel.ops with 
value true
+    15/09/01 01:32:51 INFO conf.DMLConfig: Updating sysml.parallel.io with 
value true
     Accuracy (%): 74.14965986394557
     15/09/01 01:32:52 INFO api.DMLScript: SystemML Statistics:
     Total execution time:              0.130 sec.

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/java/org/apache/sysml/api/mlcontext/MLContext.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/api/mlcontext/MLContext.java 
b/src/main/java/org/apache/sysml/api/mlcontext/MLContext.java
index 35720a5..b6b7eb0 100644
--- a/src/main/java/org/apache/sysml/api/mlcontext/MLContext.java
+++ b/src/main/java/org/apache/sysml/api/mlcontext/MLContext.java
@@ -291,7 +291,7 @@ public class MLContext {
 
        /**
         * Set configuration property, such as
-        * {@code setConfigProperty("localtmpdir", "/tmp/systemml")}.
+        * {@code setConfigProperty("sysml.localtmpdir", "/tmp/systemml")}.
         *
         * @param propertyName
         *            property name

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/java/org/apache/sysml/conf/DMLConfig.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/conf/DMLConfig.java 
b/src/main/java/org/apache/sysml/conf/DMLConfig.java
index 857071d..56f96e4 100644
--- a/src/main/java/org/apache/sysml/conf/DMLConfig.java
+++ b/src/main/java/org/apache/sysml/conf/DMLConfig.java
@@ -61,36 +61,35 @@ public class DMLConfig
        
        // external names of configuration properties 
        // (single point of change for all internal refs)
-       public static final String LOCAL_TMP_DIR        = "localtmpdir";
-       public static final String SCRATCH_SPACE        = "scratch";
-       public static final String OPTIMIZATION_LEVEL   = "optlevel";   
-       public static final String NUM_REDUCERS         = "numreducers";
-       public static final String JVM_REUSE            = "jvmreuse";
-       public static final String DEFAULT_BLOCK_SIZE   = "defaultblocksize";   
-       public static final String YARN_APPMASTER       = "dml.yarn.appmaster"; 
        
-       public static final String YARN_APPMASTERMEM    = 
"dml.yarn.appmaster.mem"; 
-       public static final String YARN_MAPREDUCEMEM    = 
"dml.yarn.mapreduce.mem"; 
-       public static final String YARN_APPQUEUE        = "dml.yarn.app.queue"; 
-       public static final String CP_PARALLEL_OPS      = "cp.parallel.ops";
-       public static final String CP_PARALLEL_IO       = "cp.parallel.io";
-       public static final String COMPRESSED_LINALG    = "compressed.linalg"; 
//auto, true, false
-       public static final String NATIVE_BLAS          = "native.blas";
-       public static final String CODEGEN              = "codegen.enabled"; 
//boolean
-       public static final String CODEGEN_COMPILER     = "codegen.compiler"; 
//see SpoofCompiler.CompilerType
-       public static final String CODEGEN_PLANCACHE    = "codegen.plancache"; 
//boolean
-       public static final String CODEGEN_LITERALS     = "codegen.literals"; 
//1..heuristic, 2..always
-       
-       public static final String EXTRA_FINEGRAINED_STATS = 
"systemml.stats.finegrained"; //boolean
-       public static final String STATS_MAX_WRAP_LEN = 
"systemml.stats.maxWrapLength"; //int
-       public static final String EXTRA_GPU_STATS      = 
"systemml.stats.extraGPU"; //boolean
-       public static final String EXTRA_DNN_STATS      = 
"systemml.stats.extraDNN"; //boolean
-       public static final String AVAILABLE_GPUS       = 
"systemml.gpu.availableGPUs"; // String to specify which GPUs to use (a range, 
all GPUs, comma separated list or a specific GPU)
-       public static final String SYNCHRONIZE_GPU      = 
"systemml.gpu.sync.postProcess"; // boolean: whether to synchronize GPUs after 
every instruction 
-       public static final String EAGER_CUDA_FREE              = 
"systemml.gpu.eager.cudaFree"; // boolean: whether to perform eager CUDA free 
on rmvar
+       public static final String LOCAL_TMP_DIR        = "sysml.localtmpdir";
+       public static final String SCRATCH_SPACE        = "sysml.scratch";
+       public static final String OPTIMIZATION_LEVEL   = "sysml.optlevel";
+       public static final String NUM_REDUCERS         = "sysml.numreducers";
+       public static final String JVM_REUSE            = "sysml.jvmreuse";
+       public static final String DEFAULT_BLOCK_SIZE   = 
"sysml.defaultblocksize";
+       public static final String YARN_APPMASTER       = 
"sysml.yarn.appmaster";
+       public static final String YARN_APPMASTERMEM    = 
"sysml.yarn.appmaster.mem";
+       public static final String YARN_MAPREDUCEMEM    = 
"sysml.yarn.mapreduce.mem";
+       public static final String YARN_APPQUEUE        = 
"sysml.yarn.app.queue"; 
+       public static final String CP_PARALLEL_OPS      = 
"sysml.cp.parallel.ops";
+       public static final String CP_PARALLEL_IO       = 
"sysml.cp.parallel.io";
+       public static final String COMPRESSED_LINALG    = 
"sysml.compressed.linalg"; //auto, true, false
+       public static final String NATIVE_BLAS          = "sysml.native.blas";
+       public static final String CODEGEN              = 
"sysml.codegen.enabled"; //boolean
+       public static final String CODEGEN_COMPILER     = 
"sysml.codegen.compiler"; //see SpoofCompiler.CompilerType
+       public static final String CODEGEN_PLANCACHE    = 
"sysml.codegen.plancache"; //boolean
+       public static final String CODEGEN_LITERALS     = 
"sysml.codegen.literals"; //1..heuristic, 2..always
        
+       public static final String EXTRA_FINEGRAINED_STATS = 
"sysml.stats.finegrained"; //boolean
+       public static final String STATS_MAX_WRAP_LEN   = 
"sysml.stats.maxWrapLength"; //int
+       public static final String EXTRA_GPU_STATS      = 
"sysml.stats.extraGPU"; //boolean
+       public static final String EXTRA_DNN_STATS      = 
"sysml.stats.extraDNN"; //boolean
+       public static final String AVAILABLE_GPUS       = 
"sysml.gpu.availableGPUs"; // String to specify which GPUs to use (a range, all 
GPUs, comma separated list or a specific GPU)
+       public static final String SYNCHRONIZE_GPU      = 
"sysml.gpu.sync.postProcess"; // boolean: whether to synchronize GPUs after 
every instruction 
+       public static final String EAGER_CUDA_FREE              = 
"sysml.gpu.eager.cudaFree"; // boolean: whether to perform eager CUDA free on 
rmvar
        // Fraction of available memory to use. The available memory is 
computer when the GPUContext is created
        // to handle the tradeoff on calling cudaMemGetInfo too often.
-       public static final String GPU_MEMORY_UTILIZATION_FACTOR    = 
"gpu.memory.util.factor";
+       public static final String GPU_MEMORY_UTILIZATION_FACTOR = 
"sysml.gpu.memory.util.factor";
 
        // supported prefixes for custom map/reduce configurations
        public static final String PREFIX_MAPRED = "mapred";

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java 
b/src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java
index 4dda1b4..07ca3ee 100644
--- 
a/src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java
+++ 
b/src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java
@@ -1330,27 +1330,6 @@ public class ParForProgramBlock extends ForProgramBlock
                throws DMLRuntimeException 
        {
                //TODO needs as precondition a systematic treatment of 
persistent read information.
-               /*
-               if( LIVEVAR_AWARE_CLEANUP && _sb != null)
-               {
-                       //cleanup shared variables after they are unpinned
-                       VariableSet liveout = _sb.liveOut();
-                       for( Entry<String, Boolean> var : varState.entrySet() ) 
-                       {
-                               String varname = var.getKey();
-                               boolean unpinned = var.getValue();
-                               String fprefix = 
ConfigurationManager.getConfig().getTextValue("scratch") 
-                                                        + Lop.FILE_SEPARATOR + 
Lop.PROCESS_PREFIX + DMLScript.getUUID();
-                               
-                               //delete unpinned vars if not in liveout 
(similar like rmvar) and not persistent input
-                               if( unpinned && 
!liveout.containsVariable(varname) )
-                                             
-                               {
-                                       
VariableCPInstruction.processRemoveVariableInstruction(ec,varname);
-                               }
-                       }
-               }
-               */
        }
        
        /**

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/python/systemml/mlcontext.py
----------------------------------------------------------------------
diff --git a/src/main/python/systemml/mlcontext.py 
b/src/main/python/systemml/mlcontext.py
index 4a555f7..54e1969 100644
--- a/src/main/python/systemml/mlcontext.py
+++ b/src/main/python/systemml/mlcontext.py
@@ -792,7 +792,7 @@ class MLContext(object):
 
     def setConfigProperty(self, propertyName, propertyValue):
         """
-        Set configuration property, such as setConfigProperty("localtmpdir", 
"/tmp/systemml").
+        Set configuration property, such as 
setConfigProperty("sysml.localtmpdir", "/tmp/systemml").
 
         Parameters
         ----------

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/python/systemml/mllearn/estimators.py
----------------------------------------------------------------------
diff --git a/src/main/python/systemml/mllearn/estimators.py 
b/src/main/python/systemml/mllearn/estimators.py
index 66f78be..1df1bb8 100644
--- a/src/main/python/systemml/mllearn/estimators.py
+++ b/src/main/python/systemml/mllearn/estimators.py
@@ -138,7 +138,7 @@ class BaseSystemMLEstimator(Estimator):
         
     def setConfigProperty(self, propertyName, propertyValue):
         """
-        Set configuration property, such as setConfigProperty("localtmpdir", 
"/tmp/systemml").
+        Set configuration property, such as 
setConfigProperty("sysml.localtmpdir", "/tmp/systemml").
 
         Parameters
         ----------

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/main/standalone/SystemML-config.xml
----------------------------------------------------------------------
diff --git a/src/main/standalone/SystemML-config.xml 
b/src/main/standalone/SystemML-config.xml
index 9b52a6d..67e1d6c 100644
--- a/src/main/standalone/SystemML-config.xml
+++ b/src/main/standalone/SystemML-config.xml
@@ -19,38 +19,38 @@
 
 <root>
    <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
+   <sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>
 
    <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
+   <sysml.scratch>scratch_space</sysml.scratch> 
 
    <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>2</optlevel>  
+   <sysml.optlevel>2</sysml.optlevel>  
 
    <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
+   <sysml.numreducers>10</sysml.numreducers> 
    
    <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
+   <sysml.jvmreuse>false</sysml.jvmreuse> 
 
    <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
+   <sysml.defaultblocksize>1000</sysml.defaultblocksize> 
 
    <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
+   <sysml.yarn.appmaster>false</sysml.yarn.appmaster>
 
    <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
+   <sysml.yarn.appmaster.mem>2048</sysml.yarn.appmaster.mem>
 
    <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
+   <sysml.yarn.mapreduce.mem>2048</sysml.yarn.mapreduce.mem>
 
    <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
+   <sysml.yarn.app.queue>default</sysml.yarn.app.queue>
    
    <!-- enables multi-threaded matrix operations in singlenode control program 
-->
-   <cp.parallel.ops>true</cp.parallel.ops>
+   <sysml.cp.parallel.ops>true</sysml.cp.parallel.ops>
    
    <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
+   <sysml.cp.parallel.io>true</sysml.cp.parallel.io>
 </root>

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/config/SystemML-config.xml
----------------------------------------------------------------------
diff --git a/src/test/config/SystemML-config.xml 
b/src/test/config/SystemML-config.xml
index 3b25d99..2b708bd 100644
--- a/src/test/config/SystemML-config.xml
+++ b/src/test/config/SystemML-config.xml
@@ -19,38 +19,38 @@
 
 <root>
    <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
+   <sysml.localtmpdir>/tmp/systemml</sysml.localtmpdir>
 
    <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
+   <sysml.scratch>scratch_space</sysml.scratch> 
 
    <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>2</optlevel>
+   <sysml.optlevel>2</sysml.optlevel>
 
    <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
+   <sysml.numreducers>10</sysml.numreducers> 
    
    <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
+   <sysml.jvmreuse>false</sysml.jvmreuse> 
 
    <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
+   <sysml.defaultblocksize>1000</sysml.defaultblocksize> 
 
    <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
+   <sysml.yarn.appmaster>false</sysml.yarn.appmaster>
 
    <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
+   <sysml.yarn.appmaster.mem>2048</sysml.yarn.appmaster.mem>
 
    <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
+   <sysml.yarn.mapreduce.mem>2048</sysml.yarn.mapreduce.mem>
 
    <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
+   <sysml.yarn.app.queue>default</sysml.yarn.app.queue>
    
    <!-- enables multi-threaded matrix operations in singlenode control program 
-->
-   <cp.parallel.ops>true</cp.parallel.ops>
+   <sysml.cp.parallel.ops>true</sysml.cp.parallel.ops>
    
    <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
+   <sysml.cp.parallel.io>true</sysml.cp.parallel.io>
 </root>

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/java/org/apache/sysml/test/integration/functions/codegen/APICodegenTest.java
----------------------------------------------------------------------
diff --git 
a/src/test/java/org/apache/sysml/test/integration/functions/codegen/APICodegenTest.java
 
b/src/test/java/org/apache/sysml/test/integration/functions/codegen/APICodegenTest.java
index 7e6ead1..de0a26f 100644
--- 
a/src/test/java/org/apache/sysml/test/integration/functions/codegen/APICodegenTest.java
+++ 
b/src/test/java/org/apache/sysml/test/integration/functions/codegen/APICodegenTest.java
@@ -29,6 +29,7 @@ import org.apache.sysml.api.jmlc.PreparedScript;
 import org.apache.sysml.api.mlcontext.MLContext;
 import org.apache.sysml.api.mlcontext.Script;
 import org.apache.sysml.conf.CompilerConfig.ConfigType;
+import org.apache.sysml.conf.DMLConfig;
 import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;
 import org.apache.sysml.runtime.matrix.data.MatrixBlock;
 import org.apache.sysml.runtime.util.DataConverter;
@@ -91,7 +92,7 @@ public class APICodegenTest extends AutomatedTestBase
                                        
.setAppName("MLContextTest").setMaster("local");
                                JavaSparkContext sc = new 
JavaSparkContext(conf);
                                MLContext ml = new MLContext(sc);
-                               ml.setConfigProperty("codegen.enabled", "true");
+                               ml.setConfigProperty(DMLConfig.CODEGEN, "true");
                                ml.setStatistics(true);
                                Script script = dml(s).in("X", mX).out("R");
                                ml.execute(script);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java
----------------------------------------------------------------------
diff --git 
a/src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java
 
b/src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java
index 44f1f15..2f9489b 100644
--- 
a/src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java
+++ 
b/src/test/java/org/apache/sysml/test/integration/functions/mlcontext/GNMFTest.java
@@ -42,6 +42,7 @@ import org.apache.sysml.api.mlcontext.MatrixMetadata;
 import org.apache.sysml.api.mlcontext.Script;
 import org.apache.sysml.api.mlcontext.ScriptFactory;
 import org.apache.sysml.conf.ConfigurationManager;
+import org.apache.sysml.conf.DMLConfig;
 import org.apache.sysml.parser.ParseException;
 import org.apache.sysml.runtime.DMLRuntimeException;
 import org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils;
@@ -182,7 +183,7 @@ public class GNMFTest extends MLContextTestBase
                        
                        if(numRegisteredOutputs >= 2) {
                                script.out("W");
-                               ml.setConfigProperty("cp.parallel.ops", 
"false");
+                               ml.setConfigProperty(DMLConfig.CP_PARALLEL_OPS, 
"false");
                        }
                        
                        MLResults results = ml.execute(script);

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml
----------------------------------------------------------------------
diff --git 
a/src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml 
b/src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml
index 5c8a9b7..d8597fd 100644
--- a/src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml
+++ b/src/test/scripts/functions/codegen/SystemML-config-codegen-compress.xml
@@ -18,45 +18,9 @@
 -->
 
 <root>
-   <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
-
-   <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
-
-   <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>7</optlevel>  
-
-   <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
-   
-   <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
-
-   <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
-
-   <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
-
-   <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
-
-   <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
-
-   <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
-   
-   <!-- enables multi-threaded operations in singlenode control program -->
-   <cp.parallel.ops>true</cp.parallel.ops>
-   
-   <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
-   
-   <!-- enables automatic code generation -->
-   <compressed.linalg>true</compressed.linalg>
-   <codegen.enabled>true</codegen.enabled>
-   <codegen.plancache>true</codegen.plancache>
-   <codegen.literals>1</codegen.literals>
+   <sysml.optlevel>7</sysml.optlevel>
+   <sysml.compressed.linalg>true</sysml.compressed.linalg>
+   <sysml.codegen.enabled>true</sysml.codegen.enabled>
+   <sysml.codegen.plancache>true</sysml.codegen.plancache>
+   <sysml.codegen.literals>1</sysml.codegen.literals>
 </root>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/codegen/SystemML-config-codegen.xml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/codegen/SystemML-config-codegen.xml 
b/src/test/scripts/functions/codegen/SystemML-config-codegen.xml
index 95e0dba..3a00e70 100644
--- a/src/test/scripts/functions/codegen/SystemML-config-codegen.xml
+++ b/src/test/scripts/functions/codegen/SystemML-config-codegen.xml
@@ -18,44 +18,8 @@
 -->
 
 <root>
-   <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
-
-   <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
-
-   <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>7</optlevel>  
-
-   <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
-   
-   <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
-
-   <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
-
-   <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
-
-   <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
-
-   <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
-
-   <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
-   
-   <!-- enables multi-threaded operations in singlenode control program -->
-   <cp.parallel.ops>true</cp.parallel.ops>
-   
-   <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
-   
-   <!-- enables automatic code generation -->
-   <codegen.enabled>true</codegen.enabled>
-   <codegen.plancache>true</codegen.plancache>
-   <codegen.literals>1</codegen.literals>
+   <sysml.optlevel>7</sysml.optlevel>
+   <sysml.codegen.enabled>true</sysml.codegen.enabled>
+   <sysml.codegen.plancache>true</sysml.codegen.plancache>
+   <sysml.codegen.literals>1</sysml.codegen.literals>
 </root>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/codegen/SystemML-config-codegen6.xml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/codegen/SystemML-config-codegen6.xml 
b/src/test/scripts/functions/codegen/SystemML-config-codegen6.xml
index fc41c2a..35d0956 100644
--- a/src/test/scripts/functions/codegen/SystemML-config-codegen6.xml
+++ b/src/test/scripts/functions/codegen/SystemML-config-codegen6.xml
@@ -18,44 +18,8 @@
 -->
 
 <root>
-   <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
-
-   <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
-
-   <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>6</optlevel>  
-
-   <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
-   
-   <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
-
-   <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
-
-   <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
-
-   <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
-
-   <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
-
-   <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
-   
-   <!-- enables multi-threaded operations in singlenode control program -->
-   <cp.parallel.ops>true</cp.parallel.ops>
-   
-   <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
-   
-   <!-- enables automatic code generation -->
-   <codegen.enabled>true</codegen.enabled>
-   <codegen.plancache>true</codegen.plancache>
-   <codegen.literals>1</codegen.literals>
+   <sysml.optlevel>6</sysml.optlevel>
+   <sysml.codegen.enabled>true</sysml.codegen.enabled>
+   <sysml.codegen.plancache>true</sysml.codegen.plancache>
+   <sysml.codegen.literals>1</sysml.codegen.literals>
 </root>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/compress/SystemML-config-compress.xml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/compress/SystemML-config-compress.xml 
b/src/test/scripts/functions/compress/SystemML-config-compress.xml
index 4d56c66..9c4f4ba 100644
--- a/src/test/scripts/functions/compress/SystemML-config-compress.xml
+++ b/src/test/scripts/functions/compress/SystemML-config-compress.xml
@@ -18,42 +18,5 @@
 -->
 
 <root>
-   <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
-
-   <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
-
-   <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>2</optlevel>  
-
-   <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
-   
-   <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
-
-   <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
-
-   <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
-
-   <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
-
-   <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
-
-   <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
-   
-   <!-- enables multi-threaded matrix operations in singlenode control program 
-->
-   <cp.parallel.ops>true</cp.parallel.ops>
-   
-   <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
-
-   <!-- enables compressed linear algebra for cp/spark -->
-   <compressed.linalg>true</compressed.linalg>
+   <sysml.compressed.linalg>true</sysml.compressed.linalg>
 </root>

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/dmlscript/SystemML-config.xml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/dmlscript/SystemML-config.xml 
b/src/test/scripts/functions/dmlscript/SystemML-config.xml
index dc1e298..f0e4512 100644
--- a/src/test/scripts/functions/dmlscript/SystemML-config.xml
+++ b/src/test/scripts/functions/dmlscript/SystemML-config.xml
@@ -17,9 +17,9 @@
  * under the License.
 -->
 <root>
-<numreducers>10</numreducers>
-<scratch>scratch_space</scratch>
-<defaultblocksize>1000</defaultblocksize>
-<cp.parallel.ops>true</cp.parallel.ops>
-<cp.parallel.io>false</cp.parallel.io>
+<sysml.numreducers>10</sysml.numreducers>
+<sysml.scratch>scratch_space</sysml.scratch>
+<sysml.defaultblocksize>1000</sysml.defaultblocksize>
+<sysml.cp.parallel.ops>true</sysml.cp.parallel.ops>
+<sysml.cp.parallel.io>false</sysml.cp.parallel.io>
 </root>

http://git-wip-us.apache.org/repos/asf/systemml/blob/d6139d14/src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml
----------------------------------------------------------------------
diff --git a/src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml 
b/src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml
index 9cf9316..a59d797 100644
--- a/src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml
+++ b/src/test/scripts/functions/gdfo/SystemML-config-globalopt.xml
@@ -18,43 +18,7 @@
 -->
 
 <root>
-   <!-- local fs tmp working directory-->
-   <localtmpdir>/tmp/systemml</localtmpdir>
-
-   <!-- hdfs tmp working directory--> 
-   <scratch>scratch_space</scratch> 
-
-   <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 
2 -->
-   <optlevel>4</optlevel>  
-
-   <!-- default number of reduce tasks per MR job, default: 2 x number of 
nodes -->
-   <numreducers>10</numreducers> 
-   
-   <!-- override jvm reuse flag for specific MR jobs, valid values: true | 
false  -->
-   <jvmreuse>false</jvmreuse> 
-
-   <!-- default block dim for binary block files -->
-   <defaultblocksize>1000</defaultblocksize> 
-
-   <!-- run systemml control program as yarn appmaster, in case of MR1 always 
falls back to client, please disable for debug mode -->
-   <dml.yarn.appmaster>false</dml.yarn.appmaster>
-
-   <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested 
memory is 1.5x this parameter -->
-   <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>
-
-   <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested 
memory is 1.5x this parameter, negative values ignored  -->
-   <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>
-
-   <!-- yarn application submission queue, relevant for default capacity 
scheduler -->
-   <dml.yarn.app.queue>default</dml.yarn.app.queue>
-   
-   <!-- enables multi-threaded operations in singlenode control program -->
-   <cp.parallel.ops>true</cp.parallel.ops>
-   
-   <!-- enables multi-threaded read/write in singlenode control program -->
-   <cp.parallel.io>true</cp.parallel.io>
-   
-   
+   <sysml.optlevel>4</sysml.optlevel>
    <!-- piggybacked test for custom mapred/mapreduce configurations -->
    <mapreduce.task.io.sort.mb>50</mapreduce.task.io.sort.mb>
 </root>

Reply via email to