http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
index 5e2c8aa..a4e7e31 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
@@ -19,184 +19,250 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>namenode_host</name>
-    <value></value>
+    <value/>
     <description>NameNode Host.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>snamenode_host</name>
-    <value></value>
+    <value/>
     <description>Secondary NameNode.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>fs_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>datanode_hosts</name>
-    <value></value>
+    <value/>
     <description>List of Datanode Hosts.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_data_dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Data directories for Data Nodes.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>hdfs_log_dir_prefix</name>
     <value>/var/log/hadoop</value>
     <description>Hadoop Log Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>hadoop_pid_dir_prefix</name>
     <value>/var/run/hadoop</value>
     <description>Hadoop PID Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_webhdfs_enabled</name>
     <value>true</value>
     <description>WebHDFS enabled</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>hadoop_heapsize</name>
     <value>1024</value>
     <description>Hadoop maximum Java heap size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>namenode_heapsize</name>
     <value>1024</value>
     <description>NameNode Java heap size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_newsize</name>
     <value>200</value>
     <description>Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_maxnewsize</name>
     <value>640</value>
     <description>NameNode maximum new generation size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_permsize</name>
     <value>128</value>
     <description>NameNode permanent generation size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>namenode_opt_maxpermsize</name>
     <value>256</value>
     <description>NameNode maximum permanent generation size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>
     <description>Reserved space for HDFS</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
     <description>DataNode maximum Java heap size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_failed_volume_tolerated</name>
     <value>0</value>
     <description>DataNode volumes failure toleration</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>fs_checkpoint_period</name>
     <value>21600</value>
     <description>HDFS Maximum Checkpoint Delay</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>fs_checkpoint_size</name>
     <value>0.5</value>
     <description>FS Checkpoint Size.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>proxyuser_group</name>
     <value>users</value>
     <description>Proxy user group.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_exclude</name>
-    <value></value>
+    <value/>
     <description>HDFS Exclude hosts.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_include</name>
-    <value></value>
+    <value/>
     <description>HDFS Include hosts.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_replication</name>
     <value>3</value>
     <description>Default Block Replication.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_block_local_path_access_user</name>
     <value>hbase</value>
     <description>Default Block Replication.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_address</name>
     <value>50010</value>
     <description>Port for datanode address.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_http_address</name>
     <value>50075</value>
     <description>Port for datanode address.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>dfs_datanode_data_dir_perm</name>
     <value>750</value>
     <description>Datanode dir perms.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security_enabled</name>
     <value>false</value>
     <description>Hadoop Security</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>kerberos_domain</name>
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>kadmin_pw</name>
-    <value></value>
+    <value/>
     <description>Kerberos realm admin password</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>Kerberos keytab path.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-  
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>
     <description>KeyTab Directory.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-    <property>
+  <property>
     <name>namenode_formatted_mark_dir</name>
     <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
     <description>Formatteed Mark Directory.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-    <property>
+  <property>
     <name>hdfs_user</name>
     <value>hdfs</value>
     <description>User and Groups.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-  
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
index 6ec304d..4b4285f 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hadoop-policy.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,9 +16,7 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration>
   <property>
     <name>security.client.protocol.acl</name>
@@ -29,8 +26,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.client.datanode.protocol.acl</name>
     <value>*</value>
@@ -39,8 +37,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.datanode.protocol.acl</name>
     <value>*</value>
@@ -49,8 +48,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.inter.datanode.protocol.acl</name>
     <value>*</value>
@@ -59,8 +59,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.namenode.protocol.acl</name>
     <value>*</value>
@@ -69,8 +70,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.inter.tracker.protocol.acl</name>
     <value>*</value>
@@ -79,8 +81,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.job.submission.protocol.acl</name>
     <value>*</value>
@@ -89,8 +92,9 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.task.umbilical.protocol.acl</name>
     <value>*</value>
@@ -99,17 +103,19 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
- <property>
+  <property>
     <name>security.admin.operations.protocol.acl</name>
     <value>hadoop</value>
     <description>ACL for AdminOperationsProtocol. Used for admin commands.
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
     <value>hadoop</value>
@@ -118,9 +124,10 @@
     group names. The user and group list is separated by a blank. For
     e.g. "alice,bob users,wheel".  A special value of "*" means all
     users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<property>
+  <property>
     <name>security.refresh.policy.protocol.acl</name>
     <value>hadoop</value>
     <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
@@ -128,7 +135,7 @@
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
index e277def..939dd99 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/hdfs-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,53 +16,52 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration>
-
-<!-- file system properties -->
-
+  <!-- file system properties -->
   <property>
     <name>dfs.name.dir</name>
     <!-- cluster variant -->
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem the DFS name node
       should store the name table.  If this is a comma-delimited list
       of directories then the name table is replicated in all of the
       directories, for redundancy. </description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.support.append</name>
     <value>true</value>
     <description>to enable dfs append</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.webhdfs.enabled</name>
     <value>true</value>
     <description>to enable webhdfs</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<!--
+  <!--
  <property>
     <name>dfs.datanode.socket.write.timeout</name>
     <value>0</value>
     <description>DFS Client write socket timeout</description>
   </property>
 -->
-
   <property>
     <name>dfs.datanode.failed.volumes.tolerated</name>
     <value>0</value>
     <description>#of failed disks dn would tolerate</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.block.local-path-access.user</name>
     <value>hbase</value>
@@ -71,11 +69,12 @@
     circuit reads.
     </description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.data.dir</name>
-    <value></value>
+    <value/>
     <description>Determines where on the local filesystem an DFS data node
   should store its blocks.  If this is a comma-delimited
   list of directories, then data will be stored in all named
@@ -83,18 +82,20 @@
   Directories that do not exist are ignored.
   </description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value/>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
     excluded.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<!--
+  <!--
   <property>
     <name>dfs.hosts</name>
     <value></value>
@@ -104,7 +105,6 @@
     permitted.</description>
   </property>
 -->
-
   <property>
     <name>dfs.checksum.type</name>
     <value>CRC32</value>
@@ -112,34 +112,39 @@
     compatibility, it is being set to CRC32. Once all migration steps
     are complete, we can change it to CRC32C and take advantage of the
     additional performance benefit.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in 
seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.heartbeat.interval</name>
     <value>3</value>
     <description>Determines datanode heartbeat interval in 
seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.safemode.threshold.pct</name>
     <value>1.0f</value>
@@ -149,8 +154,9 @@
         Values less than or equal to 0 mean not to start in safe mode.
         Values greater than 1 will make safe mode permanent.
         </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.balance.bandwidthPerSec</name>
     <value>6250000</value>
@@ -159,244 +165,271 @@
         can utilize for the balancing purpose in term of
         the number of bytes per second.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.address</name>
     <value>0.0.0.0:50010</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.http.address</name>
     <value>0.0.0.0:50075</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.block.size</name>
     <value>134217728</value>
     <description>The default block size for new files.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
+    <value/>
+    <description>The name of the default file system.  Either the
 literal string "local" or a host:port for HDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space 
free for non dfs use.
+    <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much 
space free for non dfs use.
 </description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
 The datanode ipc server address and port.
 If the port is 0 then the server will start on a free port.
 </description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>022</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.datanode.max.xcievers</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <!-- Permissions configuration -->
+  <property>
+    <name>dfs.umaskmode</name>
+    <value>022</value>
+    <description>
 The octal umask used when creating files and directories.
 </description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.web.ugi</name>
+    <!-- cluster variant -->
+    <value>gopher,gopher</value>
+    <description>The user account used by the web interface.
 Syntax: USERNAME,GROUP1,GROUP2, ...
 </description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions</name>
+    <value>true</value>
+    <description>
 If "true", enable permission checking in HDFS.
 If "false", permission checking is turned off,
 but all other behavior is unchanged.
 Switching from one parameter value to the other does not change the mode,
 owner or group of files or directories.
 </description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are 
allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.permissions.supergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are 
allowed</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>ipc.server.max.response.size</name>
+    <value>5242880</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
 If "true", access tokens are used as capabilities for accessing datanodes.
 If "false", no access tokens are checked on accessing datanodes.
 </description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value/>
+    <description>
 Kerberos principal name for the NameNode
 </description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value/>
     <description>
         Kerberos principal name for the secondary NameNode.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
-<!--
+  <!--
   This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
 -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
-
+    <value/>
+    <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value/>
     <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value/>
     <description>Address of secondary namenode web server</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.https.port</name>
     <value>50490</value>
     <description>The https port where secondary-namenode binds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>
       The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
       HTTP SPENGO specification.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
+    <value/>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host 
principals.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
+    <value/>
+    <description>
         Combined keytab file containing the namenode service and host 
principals.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
+    <value/>
+    <description>
         The filename of the keytab file for the DataNode.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.https.port</name>
     <value>50470</value>
- <description>The https port where namenode binds</description>
-
+    <description>The https port where namenode binds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
+    <value/>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
+    <description>The permissions that should be there on dfs.data.dir
 directories. The datanode will not come up if the permissions are
 different on existing dfs.data.dir directories. If the directories
 don't exist, they will be created with this permission.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.access.time.precision</name>
     <value>0</value>
@@ -404,20 +437,23 @@ don't exist, they will be created with this 
permission.</description>
                  The default value is 1 hour. Setting a value of 0 disables
                  access times for HDFS.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the 
HDFS</description>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the 
HDFS</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>ipc.server.read.threadpool.size</name>
     <value>5</value>
-    <description></description>
+    <description/>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>dfs.namenode.check.stale.datanode</name>
     <value>true</value>
@@ -426,6 +462,7 @@ don't exist, they will be created with this 
permission.</description>
       for more than 30s (i.e. in a stale state) are used for reads only if all
       other remote replicas have failed.
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
index 786c9ce..c34402e 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HIVE/configuration/hive-site.xml
@@ -16,121 +16,138 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 
express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-
 <configuration>
   <property>
     <name>hive.metastore.local</name>
     <value>false</value>
     <description>controls whether to connect to remove metastore server or
     open a new metastore server in Hive Client JVM</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
+    <value/>
     <description>JDBC connect string for a JDBC metastore</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionDriverName</name>
     <value>com.mysql.jdbc.Driver</value>
     <description>Driver class name for a JDBC metastore</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
+    <value/>
     <description>username to use against metastore database</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property require-input="true">
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
+    <value/>
     <property-type>PASSWORD</property-type>
     <description>password to use against metastore database</description>
     <value-attributes>
       <type>password</type>
     </value-attributes>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/apps/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value/>
     <description>If true, the metastore thrift interface will be secured with 
SASL.
      Clients must authenticate with Kerberos.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value/>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value/>
     <description>The service principal for the metastore thrift server. The 
special
     string _HOST will be replaced automatically with the correct host 
name.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be 
pinned in the cache</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value/>
     <description>URI for client to contact metastore server</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hadoop.clientside.fs.operations</name>
     <value>true</value>
     <description>FS operations are owned by client</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.enabled</name>
     <value>true</value>
     <description>enable or disable the hive client authorization</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
     The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  
</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/global.xml
index ceedd56..a497471 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/global.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/global.xml
@@ -19,26 +19,33 @@
  * limitations under the License.
  */
 -->
-
 <configuration>
   <property>
     <name>hs_host</name>
-    <value></value>
+    <value/>
     <description>History Server.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>
     <description>Mapreduce Log Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_pid_dir_prefix</name>
     <value>/var/run/hadoop-mapreduce</value>
     <description>Mapreduce PID Dir Prefix</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
   <property>
     <name>mapred_user</name>
     <value>mapred</value>
     <description>Mapreduce User</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
index ce12380..3f83f98 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,23 +16,20 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- mapred-queue-acls.xml -->
 <configuration>
-
-
-<!-- queue default -->
-
+  <!-- queue default -->
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <!-- END ACLs -->
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
index 5896f13..1e916a0 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -1,6 +1,5 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -17,85 +16,91 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-
 <!-- Put site-specific property overrides in this file. -->
-
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude";>
-
-<!-- i/o properties -->
-
+  <!-- i/o properties -->
   <property>
     <name>io.sort.mb</name>
     <value>100</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>io.sort.record.percent</name>
     <value>.2</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>io.sort.spill.percent</name>
     <value>0.1</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>io.sort.factor</name>
     <value>100</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<!-- map/reduce properties -->
-
+  <!-- map/reduce properties -->
   <property>
     <name>mapred.system.dir</name>
-    <value></value>
+    <value/>
     <description>No description</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
-    <value></value>
+    <value/>
     <description>No description</description>
     <final>true</final>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.reduce.parallel.copies</name>
     <value>30</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
+    <value/>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
                may be executed in parallel.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
                may be executed in parallel.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.reduce.slowstart.completed.maps</name>
     <value>0.05</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
@@ -105,8 +110,9 @@
   0 indicates we want to DON'T have any threshold and instead depend only on
   the ramfs's memory consumption to trigger the merge.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
@@ -115,32 +121,36 @@
   storing in-memory map outputs, as defined by
   mapred.job.shuffle.input.buffer.percent.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.map.output.compression.codec</name>
-    <value></value>
+    <value/>
     <description>If the map outputs are compressed, how should they be
       compressed
     </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how 
should
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how 
should
                they be compressed? Should be one of NONE, RECORD or BLOCK.
   </description>
-</property>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <property>
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
@@ -149,25 +159,26 @@
   remaining map outputs in memory must consume less than this threshold before
   the reduce can begin.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
   is 10 Gb.)  If the estimated input size of the reduce is greater than
   this value, job is failed. A value of -1 means that there is no limit
   set. </description>
-</property>
-
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <!-- copied from kryptonite configuration -->
   <property>
     <name>mapred.compress.map.output</name>
-    <value></value>
+    <value/>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
   <property>
     <name>mapred.task.timeout</name>
     <value>600000</value>
@@ -175,126 +186,147 @@
   terminated if it neither reads an input, writes an output, nor
   updates its status string.
   </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>jetty.connector</name>
     <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.child.root.logger</name>
     <value>INFO,TLA</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.child.java.opts</name>
     <value>-Xmx512m</value>
     <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
+    <value/>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.job.map.memory.mb</name>
     <value>1024</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>mapred.job.reduce.memory.mb</name>
     <value>1024</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
     if node is reported blacklisted by 16 successful jobs within 
timeout-window, it will be graylisted
   </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value/>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value/>
+    <final>true</final>
+    <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value/>
     <description>The filename of the keytab for the task tracker</description>
- </property>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, 
the JobTracker will fail the job during
+    <description>If the size of the split metainfo file is larger than this, 
the JobTracker will fail the job during
     initialize.
    </description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapreduce.shuffle.port</name>
-  <value>8081</value>
-  <description>Default port that the ShuffleHandler will run on. 
ShuffleHandler is a service run at the NodeManager to facilitate transfers of 
intermediate Map outputs to requesting Reducers.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobhistory.intermediate-done-dir</name>
-  <value>/mr-history/tmp</value>
-  <description>Directory where history files are written by MapReduce 
jobs.</description>
-</property>
-
-<property>
-  <name>mapreduce.jobhistory.done-dir</name>
-  <value>/mr-history/done</value>
-  <description>Directory where history files are managed by the MR JobHistory 
Server.</description>
-</property>
-
-<property>       
-  <name>mapreduce.jobhistory.address</name>       
-  <value>localhost:10020</value>  
-  <description>Enter your JobHistoryServer hostname.</description>
-</property>
-
-<property>       
-  <name>mapreduce.jobhistory.webapp.address</name>       
-  <value>localhost:19888</value>  
-  <description>Enter your JobHistoryServer hostname.</description>
-</property>
-
-<property>
-  <name>mapreduce.framework.name</name>
-  <value>yarn</value>
-  <description>No description</description>
-</property>
-
+    <value/>
+    <description>The keytab for the job history server principal.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>8081</value>
+    <description>Default port that the ShuffleHandler will run on. 
ShuffleHandler is a service run at the NodeManager to facilitate transfers of 
intermediate Map outputs to requesting Reducers.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>Directory where history files are written by MapReduce 
jobs.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>Directory where history files are managed by the MR 
JobHistory Server.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>No description</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
index 69dfe30..0c705ae 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/OOZIE/configuration/oozie-site.xml
@@ -15,11 +15,9 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
-
+-->
 <configuration>
-
-<!--
+  <!--
     Refer to the oozie-default.xml file for the complete list of
     Oozie configuration properties and their default values.
 -->
@@ -27,145 +25,164 @@
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
     The Oozie system ID.
     </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
      System mode for  Oozie at startup.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
      Specifies whether security (user name/admin role) is enabled or not.
      If disabled any user can manage Oozie system and manage any job.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
      Jobs older than this value, in days, will be purged by the PurgeService.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
      Interval at which the purge service will run, in seconds.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
      Maximum concurrency for a given callable type.
      Each command is a callable type (submit, start, run, signal, job, jobs, 
suspend,resume, etc).
      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, 
sub-workflow, etc).
      All commands that use action executors (action-start, action-end, 
action-kill and action-check) use
      the action type as the callable type.
      </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in 
minutes) for normal job.
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in 
minutes) for normal job.
       -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
      </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value> </value>
+    <description>
       Whitelisted job tracker for Oozie service.
       </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
       </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value> </value>
+    <description>
       </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
       </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
       </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
 
 
 
 
 
         </value>
-      <description>The mapping from kerberos principal names to local OS user 
names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
+    <description>The mapping from kerberos principal names to local OS user 
names.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
           Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the 
HOST:PORT of
           the Hadoop service (JobTracker, HDFS). The wildcard '*' 
configuration is
           used when there is no exact match for an authority. The 
HADOOP_CONF_DIR contains
@@ -173,73 +190,85 @@
           the Oozie configuration directory; though the path can be absolute 
(i.e. to point
           to Hadoop client conf/ directories in the local filesystem.
       </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
             org.apache.oozie.action.email.EmailActionExecutor,
             org.apache.oozie.action.hadoop.HiveActionExecutor,
             org.apache.oozie.action.hadoop.ShellActionExecutor,
             org.apache.oozie.action.hadoop.SqoopActionExecutor,
             org.apache.oozie.action.hadoop.DistcpActionExecutor
         </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        
<value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,hive-action-0.3.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    
<value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,hive-action-0.3.xsd</value>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
             Creates Oozie DB.
 
             If set to true, it creates the DB schema if it does not exist. If 
the DB schema exists is a NOP.
             If set to false, it does not create the DB schema. If the DB 
schema does not exist it fails start up.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
             JDBC driver class.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        
<value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    
<value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
             JDBC URL.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>sa</value>
+    <description>
             DB user name.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value> </value>
+    <description>
             DB user password.
 
             IMPORTANT: if password is emtpy leave a 1 space string, the 
service trims the value,
                        if empty Configuration assumes it is NULL.
         </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
              Max number of connections.
         </description>
-    </property>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6919aa50/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
index 31d0113..7d2f1c2 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -16,111 +16,122 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 
express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 -->
-
 <!-- The default settings for Templeton. -->
 <!-- Edit templeton-site.xml to change settings for your local -->
 <!-- install. -->
-
 <configuration>
-
   <property>
     <name>templeton.port</name>
-      <value>50111</value>
+    <value>50111</value>
     <description>The HTTP port for the main server.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.hadoop.conf.dir</name>
     <value>/etc/hadoop/conf</value>
     <description>The path to the Hadoop configuration.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.jar</name>
     <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
     <description>The path to the Templeton jar file.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.libjars</name>
     <value>/usr/lib/zookeeper/zookeeper.jar</value>
     <description>Jars to add the the classpath.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
   <property>
     <name>templeton.hadoop</name>
     <value>/usr/bin/hadoop</value>
     <description>The path to the Hadoop executable.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.pig.archive</name>
     <value>hdfs:///apps/webhcat/pig.tar.gz</value>
     <description>The path to the Pig archive.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.pig.path</name>
     <value>pig.tar.gz/pig/bin/pig</value>
     <description>The path to the Pig executable.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.hcat</name>
     <value>/usr/bin/hcat</value>
     <description>The path to the hcatalog executable.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.hive.archive</name>
     <value>hdfs:///apps/webhcat/hive.tar.gz</value>
     <description>The path to the Hive archive.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.hive.path</name>
     <value>hive.tar.gz/hive/bin/hive</value>
     <description>The path to the Hive executable.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.hive.properties</name>
-    <value></value>
+    <value/>
     <description>Properties to set when running hive.</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
-
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value/>
     <description>ZooKeeper servers, as comma separated host:port 
pairs</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
     <name>templeton.storage.class</name>
     <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
     <description>The class to use as storage</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
   <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>
      Enable the override path in templeton.override.jars
    </description>
- </property>
-
- <property>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
+  <property>
     <name>templeton.streaming.jar</name>
     <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
+  </property>
   <property>
     <name>templeton.exec.timeout</name>
     <value>60000</value>
     <description>Time out for templeton api</description>
+    <on-ambari-upgrade add="false" change="true" delete="true"/>
+    <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
-
 </configuration>

Reply via email to