http://git-wip-us.apache.org/repos/asf/ambari/blob/225afe35/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json 
b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
new file mode 100644
index 0000000..19ef81f
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.2/configs/ranger-admin-upgrade.json
@@ -0,0 +1,1029 @@
+{
+    "configuration_attributes": {
+        "spark-defaults": {}, 
+        "mapred-site": {}, 
+        "krb5-conf": {}, 
+        "kafka-log4j": {}, 
+        "ranger-env": {}, 
+        "spark-javaopts-properties": {}, 
+        "ams-hbase-env": {}, 
+        "spark-log4j-properties": {}, 
+        "kerberos-env": {}, 
+        "ams-hbase-security-site": {}, 
+        "admin-properties": {}, 
+        "tez-site": {}, 
+        "spark-env": {}, 
+        "hdfs-site": {}, 
+        "ams-env": {}, 
+        "ams-site": {}, 
+        "ams-hbase-policy": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "kafka-broker": {}, 
+        "mapred-env": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "zoo.cfg": {}, 
+        "tez-env": {}, 
+        "spark-metrics-properties": {}, 
+        "core-site": {}, 
+        "ams-hbase-site": {}, 
+        "yarn-env": {}, 
+        "ams-hbase-log4j": {}, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "yarn-site": {}, 
+        "capacity-scheduler": {}, 
+        "ranger-site": {}, 
+        "kafka-env": {}, 
+        "yarn-log4j": {}, 
+        "usersync-properties": {}, 
+        "ams-log4j": {}, 
+        "zookeeper-env": {}, 
+        "cluster-env": {}
+    }, 
+    "commandParams": {
+        "restart_type": "rolling_upgrade", 
+        "service_package_folder": "common-services/RANGER/0.4.0/package", 
+        "script": "scripts/ranger_admin.py", 
+        "upgrade_direction": "upgrade", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.2.2.0-2399", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "roleCommand": "CUSTOM_COMMAND", 
+    "kerberosCommandParams": [], 
+    "clusterName": "c1", 
+    "hostname": "c6408.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6407.ambari.apache.org:8080/resources/";, 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_67", 
+        "ambari_db_rca_url": 
"jdbc:postgresql://c6407.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "custom_command": "RESTART", 
+        "oracle_jdbc_url": 
"http://c6407.ambari.apache.org:8080/resources//ojdbc6.jar";, 
+        "repo_info": 
"[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.2.2.0-2398\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.2.2.0-2442\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]";,
 
+        "group_list": "[\"hadoop\",\"users\",\"ranger\",\"spark\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "stack_version": "2.2", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u67-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": 
"[\"root\",\"ambari-qa\",\"hdfs\",\"ranger\",\"spark\",\"mapred\",\"tez\",\"zookeeper\",\"rangerlogger\",\"kafka\",\"yarn\",\"ams\",\"rangeradmin\"]",
 
+        "mysql_jdbc_url": 
"http://c6407.ambari.apache.org:8080/resources//mysql-connector-java.jar";
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {
+        "component_category": "MASTER"
+    }, 
+    "serviceName": "RANGER", 
+    "role": "RANGER_ADMIN", 
+    "forceRefreshConfigTags": [], 
+    "taskId": 308, 
+    "public_hostname": "c6408.ambari.apache.org", 
+    "configurations": {
+        "spark-defaults": {
+            "spark.yarn.applicationMaster.waitTries": "10", 
+            "spark.history.kerberos.keytab": 
"/etc/security/keytabs/spark.service.keytab", 
+            "spark.yarn.preserve.staging.files": "false", 
+            "spark.yarn.submit.file.replication": "3", 
+            "spark.history.kerberos.principal": "spark/[email protected]", 
+            "spark.yarn.driver.memoryOverhead": "384", 
+            "spark.yarn.queue": "default", 
+            "spark.yarn.containerLauncherMaxThreads": "25", 
+            "spark.yarn.scheduler.heartbeat.interval-ms": "5000", 
+            "spark.history.ui.port": "18080", 
+            "spark.yarn.max.executor.failures": "3", 
+            "spark.driver.extraJavaOptions": "", 
+            "spark.history.provider": 
"org.apache.spark.deploy.yarn.history.YarnHistoryProvider", 
+            "spark.yarn.am.extraJavaOptions": "", 
+            "spark.yarn.executor.memoryOverhead": "384"
+        }, 
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6408.ambari.apache.org:10020", 
+            "mapreduce.jobhistory.webapp.spnego-keytab-file": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "682", 
+            "mapreduce.map.java.opts": "-Xmx546m", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.application.classpath": 
"$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.application.framework.path": 
"/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": 
"-Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.job.emit-timeline-data": "false", 
+            "mapreduce.task.io.sort.mb": "273", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "682", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "682", 
+            "mapreduce.jobhistory.principal": "jhs/[email protected]", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", 
+            "mapreduce.admin.user.env": 
"LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": 
"c6408.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.keytab": 
"/etc/security/keytabs/jhs.service.keytab", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", 
+            "mapreduce.jobhistory.webapp.spnego-principal": 
"HTTP/[email protected]", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m 
-Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", 
+            "mapreduce.jobhistory.bind-host": "0.0.0.0", 
+            "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
+        }, 
+        "krb5-conf": {
+            "kdc_host": "c6407.ambari.apache.org", 
+            "admin_server_host": "c6407.ambari.apache.org", 
+            "libdefaults_forwardable": "true", 
+            "conf_dir": "/etc", 
+            "libdefaults_dns_lookup_kdc": "false", 
+            "logging_admin_server": "FILE:/var/log/kadmind.log", 
+            "libdefaults_default_tgs_enctypes": "", 
+            "content": "\n[libdefaults]\n  renew_lifetime = 
{{libdefaults_renew_lifetime}}\n  forwardable = {{libdefaults_forwardable}}\n  
default_realm = {{realm|upper()}}\n  ticket_lifetime = 
{{libdefaults_ticket_lifetime}}\n  dns_lookup_realm = 
{{libdefaults_dns_lookup_realm}}\n  dns_lookup_kdc = 
{{libdefaults_dns_lookup_kdc}}\n  {% if libdefaults_default_tgs_enctypes %}\n  
default_tgs_enctypes = {{libdefaults_default_tgs_enctypes}}\n  {% endif %}\n  
{% if libdefaults_default_tkt_enctypes %}\n  default_tkt_enctypes = 
{{libdefaults_default_tkt_enctypes}}\n  {% endif %}\n\n{% if domains 
%}\n[domain_realm]\n{% for domain in domains.split(',') %}\n  {{domain}} = 
{{realm|upper()}}\n{% endfor %}\n{% endif %}\n\n[logging]\n  default = 
{{logging_default}}\n{#\n# The following options are unused unless a managed 
KDC is installed\n  admin_server = {{logging_admin_server}}\n  kdc = 
{{logging_admin_kdc}}\n#}\n\n[realms]\n  {{realm}} = {\n    admin_server = 
{{admin_server_host|default(kdc_hos
 t, True)}}\n    kdc = {{kdc_host}}\n  }\n\n{# Append additional realm 
declarations below #}\n    ", 
+            "libdefaults_ticket_lifetime": "24h", 
+            "logging_kdc": "FILE:/var/log/krb5kdc.log", 
+            "domains": "", 
+            "manage_krb5_conf": "true", 
+            "logging_default": "FILE:/var/log/krb5libs.log", 
+            "libdefaults_dns_lookup_realm": "false", 
+            "libdefaults_renew_lifetime": "7d", 
+            "libdefaults_default_tkt_enctypes": ""
+        }, 
+        "kafka-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
 http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the 
License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, 
stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache
 .log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p 
%m 
(%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d]
 %p %m 
(%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d]
 %p %m 
(%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-
 
request.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d]
 %p %m 
(%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d]
 %p %m 
(%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d]
 %p %m (%c)%n\n\n# Turn on all our debugging 
info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, 
kafkaAppender\n#log4j
 .logger.kafka.client.ClientUtils=DEBUG, 
kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, 
kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG,
 
kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO,
 kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, 
requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE,
 requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, 
requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN,
 
requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE,
 
controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO,
 
cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE,
 stateChangeAppender\nlog4j.additivity.state.change.logger=false"
+        }, 
+        "ranger-env": {
+            "ranger_group": "ranger", 
+            "ranger_admin_password": "ambari123", 
+            "oracle_home": "-", 
+            "admin_username": "admin", 
+            "ranger_user": "ranger", 
+            "ranger_admin_username": "amb_ranger_admin", 
+            "admin_password": "admin", 
+            "ranger_admin_log_dir": "/var/log/ranger/admin", 
+            "ranger_usersync_log_dir": "/var/log/ranger/usersync"
+        }, 
+        "spark-javaopts-properties": {
+            "content": " "
+        }, 
+        "ams-hbase-env": {
+            "hbase_pid_dir": "/var/run/ambari-metrics-collector/", 
+            "hbase_regionserver_xmn_max": "512m", 
+            "hbase_regionserver_xmn_ratio": "0.2", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport 
HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH 
elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum 
amount of heap to use, in MB. Default is 1000.\nexport 
HBASE_HEAPSIZE={{hbase_heapsize}}\n\n# Extra Java runtime options.\n# Below are 
what we set by default. May only work with SUN JVM.\n# For more on why as well 
as other possible settings,\n# see 
http://wiki.apache.org/hadoop/PerformanceTuning\nexport 
HBASE_OPTS=\"-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log 
-Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to 
enable java garbage collection logging.\n# export HBASE_OPT
 S=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps 
-Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX 
exporting\n# See jmxremote.password and jmxremote.access in 
$JRE_HOME/lib/management to configure remote password access.\n# More details 
at: 
http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# 
export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.authenticate=false\"\nexport 
HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} 
-XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} 
-Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10103\"\n# export 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE 
-Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which 
HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexpor
 t HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. 
Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o 
SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by 
default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing 
this instance of hbase. $USER by default.\n# export 
HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 
'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are 
stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds 
to sleep between slave commands. Unset by default. This\n# can be useful in 
large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than 
the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase 
whether it should manage it's own instance of Zookeeper or not.\nexport 
HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport 
HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth
 .login.config={{client_jaas_config_file}}\"\nexport 
HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS 
-Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport 
HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS 
-Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\nexport 
HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS 
-Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n{% 
endif %}\n\n# use embedded native 
libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\n{% if 
disable_hadoop_environment %}\n# Unset HADOOP_HOME to avoid importing HADOOP 
installed cluster related configs like: 
/usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME=`pwd`\n{% endif %}", 
+            "hbase_regionserver_heapsize": "1024m", 
+            "hbase_log_dir": "/var/log/ambari-metrics-collector"
+        }, 
+        "spark-log4j-properties": {
+            "content": "\n# Set everything to be logged to the 
console\nlog4j.rootCategory=INFO, 
console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too 
verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
+        }, 
+        "kerberos-env": {
+            "create_attributes_template": "\n{\n  \"objectClass\": [\"top\", 
\"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": 
\"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": 
\"$principal_name\",\n  #end\n  \"userPrincipalName\": 
\"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  
\"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}\n    ", 
+            "realm": "EXAMPLE.COM", 
+            "container_dn": "", 
+            "ldap_url": "", 
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
+            "kdc_type": "mit-kdc"
+        }, 
+        "ams-hbase-security-site": {
+            "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": 
"true", 
+            "hadoop.security.authentication": "kerberos", 
+            "hbase.security.authorization": "true", 
+            "hbase.master.kerberos.principal": "amshbase/[email protected]", 
+            "hbase.regionserver.keytab.file": 
"/etc/security/keytabs/ams-hbase.regionserver.keytab", 
+            "zookeeper.znode.parent": "/ams-hbase-secure", 
+            "hbase.regionserver.kerberos.principal": 
"amshbase/[email protected]", 
+            "hbase.myclient.keytab": 
"/etc/security/keytabs/ams.collector.keytab", 
+            "ams.zookeeper.keytab": 
"/etc/security/keytabs/zk.service.ams.keytab", 
+            "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": 
"true", 
+            "hbase.master.keytab.file": 
"/etc/security/keytabs/ams-hbase.master.keytab", 
+            "hbase.security.authentication": "kerberos", 
+            "ams.zookeeper.principal": "zookeeper/[email protected]", 
+            "hbase.coprocessor.master.classes": 
"org.apache.hadoop.hbase.security.access.AccessController", 
+            "hbase.myclient.principal": "amshbase/[email protected]", 
+            "hbase.coprocessor.region.classes": 
"org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
 
+            "hbase.zookeeper.property.jaasLoginRenew": "3600000", 
+            "hbase.zookeeper.property.authProvider.1": 
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider"
+        }, 
+        "admin-properties": {
+            "db_password": "admin", 
+            "db_root_user": "root", 
+            "xa_ldap_groupSearchBase": "\"ou=groups,dc=xasecure,dc=net\"", 
+            "xa_ldap_ad_domain": "\"xasecure.net\"", 
+            "SQL_COMMAND_INVOKER": "mysql", 
+            "SQL_CONNECTOR_JAR": "/usr/share/java/mysql-connector-java.jar", 
+            "xa_ldap_userDNpattern": 
"\"uid={0},ou=users,dc=xasecure,dc=net\"", 
+            "remoteLoginEnabled": "true", 
+            "audit_db_name": "ranger_audit", 
+            "ambari_user_password": "admin", 
+            "authServicePort": "5151", 
+            "audit_db_password": "admin", 
+            "DB_FLAVOR": "MYSQL", 
+            "audit_db_user": "rangerlogger", 
+            "xa_ldap_groupRoleAttribute": "\"cn\"", 
+            "xa_ldap_url": "\"ldap://71.127.43.33:389\"";, 
+            "db_name": "ranger", 
+            "authentication_method": "UNIX", 
+            "xa_ldap_groupSearchFilter": 
"\"(member=uid={0},ou=users,dc=xasecure,dc=net)\"", 
+            "policymgr_http_enabled": "true", 
+            "authServiceHostName": "localhost", 
+            "xa_ldap_ad_url": "\"ldap://ad.xasecure.net:389\"";, 
+            "unix_group": "ranger", 
+            "policymgr_external_url": "http://localhost:6080";, 
+            "db_user": "rangeradmin", 
+            "db_host": "localhost", 
+            "unix_user": "ranger", 
+            "db_root_password": "rootpassword"
+        }, 
+        "tez-site": {
+            "tez.task.get-task.sleep.interval-ms.max": "200", 
+            "tez.task.max-events-per-heartbeat": "500", 
+            "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc 
-XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.runtime.compress": "true", 
+            "tez.runtime.io.sort.mb": "272", 
+            "tez.runtime.convert.user-payload.to.history-text": "false", 
+            "tez.generate.debug.artifacts": "false", 
+            "tez.am.tez-ui.history-url.template": 
"__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", 
+            "tez.am.log.level": "INFO", 
+            "tez.counters.max.groups": "1000", 
+            "tez.runtime.unordered.output.buffer.size-mb": "51", 
+            "tez.shuffle-vertex-manager.max-src-fraction": "0.4", 
+            "tez.counters.max": "2000", 
+            "tez.task.resource.memory.mb": "682", 
+            "tez.history.logging.service.class": 
"org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService", 
+            "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", 
+            "tez.task.am.heartbeat.counter.interval-ms.max": "4000", 
+            "tez.am.max.app.attempts": "2", 
+            "tez.am.launch.env": 
"LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
 
+            "tez.am.container.idle.release-timeout-max.millis": "20000", 
+            "tez.am.launch.cluster-default.cmd-opts": "-server 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.am.container.idle.release-timeout-min.millis": "10000", 
+            "tez.runtime.compress.codec": 
"org.apache.hadoop.io.compress.SnappyCodec", 
+            "tez.task.launch.cluster-default.cmd-opts": "-server 
-Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.task.launch.env": 
"LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
 
+            "tez.am.container.reuse.enabled": "true", 
+            "tez.session.am.dag.submit.timeout.secs": "300", 
+            "tez.grouping.min-size": "16777216", 
+            "tez.grouping.max-size": "1073741824", 
+            "tez.session.client.timeout.secs": "-1", 
+            "tez.cluster.additional.classpath.prefix": 
"/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
 
+            "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc 
-XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.staging-dir": "/tmp/${user.name}/staging", 
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250", 
+            "tez.am.maxtaskfailures.per.node": "10", 
+            "tez.am.container.reuse.non-local-fallback.enabled": "false", 
+            "tez.am.container.reuse.locality.delay-allocation-millis": "250", 
+            "tez.am.container.reuse.rack-fallback.enabled": "true", 
+            "tez.grouping.split-waves": "1.7", 
+            "tez.shuffle-vertex-manager.min-src-fraction": "0.2", 
+            "tez.am.resource.memory.mb": "1364"
+        }, 
+        "spark-env": {
+            "content": "\n#!/usr/bin/env bash\n\n# This file is sourced when 
running various Spark programs.\n# Copy it as spark-env.sh and edit that to 
configure Spark for your site.\n\n# Options read in YARN client 
mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 
2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 
1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) 
(Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 
2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your 
application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop 
queue to use for allocation requests (Default: 
@~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to 
be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated 
list of archives to be distributed with the job.\n\n# Generic options for the 
daemons used in the standalone deploy mode\n\
 n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport 
SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are 
stored.(Default:${SPARK_HOME}/logs)\n#export 
SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport 
SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: 
/tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this 
instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The 
scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport 
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport 
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}", 
+            "spark_pid_dir": "/var/run/spark", 
+            "spark_log_dir": "/var/log/spark", 
+            "spark_group": "spark", 
+            "spark_user": "spark"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.ha.namenodes.ha": "nn1,nn2", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.namenode.kerberos.internal.spnego.principal": 
"HTTP/[email protected]", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:1019", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.journalnode.kerberos.principal": "jn/[email protected]", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.safemode.threshold-pct": "0.99f", 
+            "dfs.namenode.checkpoint.edits.dir": 
"${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/[email protected]", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6407.ambari.apache.org:50470", 
+            "dfs.ha.automatic-failover.enabled": "true", 
+            "dfs.namenode.http-address.ha.nn2": 
"c6408.ambari.apache.org:50070", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.namenode.http-address.ha.nn1": 
"c6407.ambari.apache.org:50070", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.rpc-address.ha.nn2": "c6408.ambari.apache.org:8020", 
+            "dfs.namenode.rpc-address.ha.nn1": "c6407.ambari.apache.org:8020", 
+            "dfs.nameservices": "ha", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6407.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.https-address.ha.nn2": 
"c6408.ambari.apache.org:50470", 
+            "dfs.namenode.https-address.ha.nn1": 
"c6407.ambari.apache.org:50470", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.web.authentication.kerberos.keytab": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "dfs.datanode.kerberos.principal": "dn/[email protected]", 
+            "dfs.namenode.shared.edits.dir": 
"qjournal://c6407.ambari.apache.org:8485;c6408.ambari.apache.org:8485;c6409.ambari.apache.org:8485/ha",
 
+            "dfs.ha.fencing.methods": "shell(/bin/true)", 
+            "dfs.journalnode.keytab.file": 
"/etc/security/keytabs/jn.service.keytab", 
+            "dfs.datanode.http.address": "0.0.0.0:1022", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.web.authentication.kerberos.principal": 
"HTTP/[email protected]", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.keytab.file": 
"/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.keytab.file": 
"/etc/security/keytabs/nn.service.keytab", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.journalnode.kerberos.internal.spnego.principal": 
"HTTP/[email protected]", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.client.failover.proxy.provider.ha": 
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "ams-env": {
+            "ambari_metrics_user": "ams", 
+            "metrics_monitor_log_dir": "/var/log/ambari-metrics-monitor", 
+            "metrics_collector_log_dir": "/var/log/ambari-metrics-collector", 
+            "metrics_monitor_pid_dir": "/var/run/ambari-metrics-monitor", 
+            "content": "\n# Set environment variables here.\n\n# The java 
implementation to use. Java 1.6 required.\nexport 
JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport 
AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for 
outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid 
directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor 
pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS 
HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}\n\n# AMS 
Collector options\nexport 
AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native\"\n{%
 if security_enabled %}\nexport AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS 
-Djava.security.auth.login.config={{ams_collector_jaas_config_file}}\"\n{% 
endif %}", 
+            "metrics_collector_pid_dir": "/var/run/ambari-metrics-collector"
+        }, 
+        "ams-site": {
+            "timeline.metrics.host.aggregator.minute.ttl": "604800", 
+            
"timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier": "2", 
+            "timeline.metrics.cluster.aggregator.hourly.disabled": "false", 
+            "timeline.metrics.cluster.aggregator.minute.timeslice.interval": 
"15", 
+            "timeline.metrics.service.resultset.fetchSize": "2000", 
+            "timeline.metrics.service.checkpointDelay": "60", 
+            "timeline.metrics.cluster.aggregator.hourly.ttl": "31536000", 
+            "timeline.metrics.hbase.compression.scheme": "SNAPPY", 
+            "timeline.metrics.cluster.aggregator.hourly.interval": "3600", 
+            "timeline.metrics.host.aggregator.ttl": "86400", 
+            
"timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier": "2", 
+            "timeline.metrics.service.webapp.address": "0.0.0.0:6188", 
+            "timeline.metrics.aggregator.checkpoint.dir": 
"/var/lib/ambari-metrics-collector/checkpoint", 
+            "timeline.metrics.host.aggregator.minute.disabled": "false", 
+            "timeline.metrics.cluster.aggregator.minute.ttl": "2592000", 
+            "timeline.metrics.service.operation.mode": "embedded", 
+            
"timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier": "2", 
+            "timeline.metrics.host.aggregator.hourly.disabled": "false", 
+            
"timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier": "2", 
+            "timeline.metrics.service.rpc.address": "0.0.0.0:60200", 
+            "timeline.metrics.cluster.aggregator.minute.disabled": "false", 
+            "timeline.metrics.host.aggregator.hourly.ttl": "2592000", 
+            "timeline.metrics.host.aggregator.minute.interval": "300", 
+            "timeline.metrics.service.default.result.limit": "5760", 
+            "timeline.metrics.hbase.data.block.encoding": "FAST_DIFF", 
+            "timeline.metrics.cluster.aggregator.minute.interval": "120", 
+            "timeline.metrics.host.aggregator.hourly.interval": "3600"
+        }, 
+        "ams-hbase-policy": {
+            "security.admin.protocol.acl": "*", 
+            "security.masterregion.protocol.acl": "*", 
+            "security.client.protocol.acl": "*"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 
.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 
er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 
false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# 
mapred audit 
logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi
 le}\n\n# Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging 
levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\n#\n
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# 
Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n#
 HDFS block state change log from block manager\n#\n# Uncomment the following 
to suppress normal block state change\n# messages from BlockManager in 
NameNode.\n#log4j.logger.BlockStateChange=WARN"
+        }, 
+        "kafka-broker": {
+            "kafka.ganglia.metrics.port": "8671", 
+            "socket.send.buffer.bytes": "102400", 
+            "num.network.threads": "3", 
+            "log.segment.bytes": "1073741824", 
+            "kafka.ganglia.metrics.host": "localhost", 
+            "message.max.bytes": "1000000", 
+            "replica.lag.time.max.ms": "10000", 
+            "kafka.ganglia.metrics.group": "kafka", 
+            "kafka.timeline.metrics.maxRowCacheSize": "10000", 
+            "fetch.purgatory.purge.interval.requests": "10000", 
+            "replica.lag.max.messages": "4000", 
+            "port": "6667", 
+            "kafka.metrics.reporters": "{{kafka_metrics_reporters}}", 
+            "log.retention.bytes": "-1", 
+            "producer.purgatory.purge.interval.requests": "10000", 
+            "log.flush.scheduler.interval.ms": "3000", 
+            "default.replication.factor": "1", 
+            "replica.high.watermark.checkpoint.interval.ms": "5000", 
+            "zookeeper.connect": 
"c6407.ambari.apache.org:2181,c6408.ambari.apache.org:2181,c6409.ambari.apache.org:2181",
 
+            "controlled.shutdown.retry.backoff.ms": "5000", 
+            "kafka.timeline.metrics.host": "{{metric_collector_host}}", 
+            "kafka.timeline.metrics.reporter.sendInterval": "5900", 
+            "num.partitions": "1", 
+            "log.flush.interval.messages": "10000", 
+            "replica.fetch.min.bytes": "1", 
+            "zookeeper.sync.time.ms": "2000", 
+            "kafka.timeline.metrics.reporter.enabled": "true", 
+            "controlled.shutdown.max.retries": "3", 
+            "replica.fetch.wait.max.ms": "500", 
+            "controlled.shutdown.enable": "false", 
+            "log.roll.hours": "168", 
+            "replica.socket.receive.buffer.bytes": "65536", 
+            "kafka.ganglia.metrics.reporter.enabled": "true", 
+            "zookeeper.connection.timeout.ms": "6000", 
+            "replica.fetch.max.bytes": "1048576", 
+            "num.replica.fetchers": "1", 
+            "socket.request.max.bytes": "104857600", 
+            "log.cleanup.interval.mins": "10", 
+            "controller.message.queue.size": "10", 
+            "queued.max.requests": "500", 
+            "socket.receive.buffer.bytes": "102400", 
+            "kafka.timeline.metrics.port": "{{metric_collector_port}}", 
+            "num.io.threads": "8", 
+            "log.flush.interval.ms": "3000", 
+            "log.dirs": "/kafka-logs", 
+            "controller.socket.timeout.ms": "30000", 
+            "replica.socket.timeout.ms": "30000", 
+            "zookeeper.session.timeout.ms": "30000", 
+            "auto.create.topics.enable": "true", 
+            "log.index.size.max.bytes": "10485760", 
+            "log.index.interval.bytes": "4096", 
+            "log.retention.hours": "168"
+        }, 
+        "mapred-env": {
+            "content": "\n# export 
JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport 
HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport 
HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export 
HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log 
files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export 
HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export 
HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export 
HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. 
$USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for 
daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION 
$HADOOP_OPTS\"", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": 
"hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
 
+            "POLICY_USER": "ambari-qa", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": 
"__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "SSL_KEYSTORE_FILE_PATH": 
"/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": 
"__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": 
"/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport 
TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop 
install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The 
java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "spark-metrics-properties": {
+            "content": "\n# syntax: 
[instance].sink|source.[name].[options]=[value]\n\n# This file configures 
Spark's internal metrics system. The metrics system is\n# divided into 
instances which correspond to internal components.\n# Each instance can be 
configured to report its metrics to one or more sinks.\n# Accepted values for 
[instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and 
\"applications\". A wild card \"*\" can be used as an instance name, in\n# 
which case all instances will inherit the supplied property.\n#\n# Within an 
instance, a \"source\" specifies a particular set of grouped metrics.\n# there 
are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, 
WorkerSource, etc, which will\n# collect a Spark component's internal state. 
Each instance is paired with a\n# Spark source that is added automatically.\n# 
2. Common sources, like JvmSource, which will collect low level state.\n# These 
can be added through configuration options and ar
 e then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics 
are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The 
sink|source field specifies whether the property relates to a sink or\n# 
source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The 
[options] field is the specific property of this source or sink. The\n# source 
or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a 
new sink, set the \"class\" option to a fully qualified class\n# name (see 
examples below).\n# 2. Some sinks involve a polling period. The minimum allowed 
polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by 
more specific properties.\n# For example, master.sink.console.period takes 
precedence over\n# *.sink.console.period.\n# 4. A metrics specific 
configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" 
should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you wa
 nt to\n# customize metrics system. You can also put the file in 
${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet 
is added by default as a sink in master, worker and client\n# driver, you can 
send http request \"/metrics/json\" to get a snapshot of all the\n# registered 
metrics in json format. For master, requests \"/metrics/master/json\" and\n# 
\"/metrics/applications/json\" can be sent seperately to get metrics snapshot 
of\n# instance master and applications. MetricsServlet may not be configured by 
self.\n#\n\n## List of available sinks and their properties.\n\n# 
org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# 
period 10 Poll period\n# unit seconds Units of poll period\n\n# 
org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 
10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to 
store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: 
Default: Description:\n# 
 host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of 
Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll 
period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia 
network mode ('unicast' or 'multicast')\n\n# 
org.apache.spark.metrics.sink.JmxSink\n\n# 
org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# 
path VARIES* Path prefix from the web server root\n# sample false Whether to 
show entire set of samples for histograms ('false' or 'true')\n#\n# * Default 
path is /metrics/json for all instances except the master. The master has two 
paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # 
Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: 
Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE 
Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll 
period\n# prefix EMPTY STRING Prefix to prepend to metric name
 \n\n## Examples\n# Enable JmxSink for all instances by class 
name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable 
ConsoleSink for all instances by class 
name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# 
Polling period for 
ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# 
Master instance overlap polling 
period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n#
 Enable CsvSink for all 
instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling 
period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# 
Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance 
overlap polling 
period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable 
jvm source for instance master, worker, driver and 
executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source
 
.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+        }, 
+        "core-site": {
+            "io.serializations": 
"org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.security.auth_to_local": 
"RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0]([email protected])s/.*/ams/\nRULE:[2:$1@$0]([email protected])s/.*/hdfs/\nRULE:[2:$1@$0]([email protected])s/.*/mapred/\nRULE:[2:$1@$0]([email protected])s/.*/hdfs/\nRULE:[2:$1@$0]([email protected])s/.*/yarn/\nRULE:[2:$1@$0]([email protected])s/.*/hdfs/\nRULE:[2:$1@$0]([email protected])s/.*/yarn/\nRULE:[2:$1@$0]([email protected])s/.*/spark/\nRULE:[2:$1@$0]([email protected])s/.*/yarn/\nRULE:[2:$1@$0]([email protected])s/.*/ams/\nDEFAULT",
 
+            "proxyuser_group": "users", 
+            "fs.trash.interval": "360", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": 
"120", 
+            "hadoop.security.authentication": "kerberos", 
+            "hadoop.rpc.protection": "authentication", 
+            "io.compression.codecs": 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.security.authorization": "true", 
+            "fs.defaultFS": "hdfs://ha", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "ipc.server.tcpnodelay": "true", 
+            "ha.zookeeper.quorum": 
"c6407.ambari.apache.org:2181,c6408.ambari.apache.org:2181,c6409.ambari.apache.org:2181",
 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "ams-hbase-site": {
+            "hbase.master.info.bindAddress": "0.0.0.0", 
+            "hbase.zookeeper.property.dataDir": "${hbase.tmp.dir}/zookeeper", 
+            "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase", 
+            "hbase.replication": "false", 
+            "hbase.hregion.majorcompaction": "0", 
+            "hbase.hregion.memstore.block.multiplier": "4", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.4", 
+            "hbase.zookeeper.property.clientPort": "61181", 
+            "hbase.client.scanner.timeout.period": "900000", 
+            "phoenix.groupby.maxCacheSize": "307200000", 
+            "hbase.snapshot.enabled": "false", 
+            "hbase.master.wait.on.regionservers.mintostart": "1", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.5", 
+            "phoenix.query.spoolThresholdBytes": "12582912", 
+            "zookeeper.session.timeout": "120000", 
+            "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp", 
+            "hfile.block.cache.size": "0.3", 
+            "hbase.regionserver.port": "61320", 
+            "hbase.regionserver.thread.compaction.small": "3", 
+            "hbase.master.info.port": "61310", 
+            "hbase.zookeeper.quorum": "{{zookeeper_quorum_hosts}}", 
+            "hbase.regionserver.info.port": "61330", 
+            "hbase.hstore.blockingStoreFiles": "200", 
+            "hbase.master.port": "61300", 
+            "hbase.zookeeper.leaderport": "61388", 
+            "hbase.regionserver.thread.compaction.large": "2", 
+            "phoenix.query.timeoutMs": "1200000", 
+            "hbase.local.dir": "${hbase.tmp.dir}/local", 
+            "hbase.cluster.distributed": "false", 
+            "hbase.client.scanner.caching": "10000", 
+            "phoenix.sequence.saltBuckets": "2", 
+            "hbase.hstore.flusher.count": "2", 
+            "hbase.zookeeper.peerport": "61288"
+        }, 
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport 
YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport 
YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport 
JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport 
HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a 
softlink\nexport 
YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java 
parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" 
!= \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  
JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: 
JAVA_HOME is not set.\"\n  exit 
1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting 
YARN specific HEAP sizes please use this\n# Parameter and set 
appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might 
override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific 
parameters\n\n# Specify the max Heapsize for the ResourceManager using a 
numerical value\n# in the scale of MB. For example, to specify an jvm option of 
-Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx 
setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# 
If not specified, the default value will be picked from either YARN_HEAPMAX\n# 
or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport 
YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM 
options to be used when starting the ResourceManager.\n# These options will be 
appended to the options specified as YARN_OPTS\n# and therefore may override 
any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# 
Node Manager specific parameters\n\n# Specify the max Heapsize for the 
NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value 
will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or 
YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked 
from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred 
option of the two.\nexport 
YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max 
Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. 
For example, to specify an jvm option of -Xmx1000m, set\n# the value to 
1024.\n# This value will be overridden by an Xmx setting specified in either 
YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default 
value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with 
YARN_HEAPMAX as the preferred option of the two.\nexport 
YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM 
options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override 
any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that 
filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default 
log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  
YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; 
then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level 
authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  
YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary 
behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS 
-Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS 
-Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS 
-Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ 
\"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS 
-Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS 
-Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "min_user_id": "500", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
+        }, 
+        "ams-hbase-log4j": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by 
applicable law or agreed to in writing, software\n# distributed under the 
License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the 
specific language governing permissions and\n# limitations under the 
License.\n\n\n# Define some default values that can be overridden by system 
properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase
 .log.file=hbase.log\n\n# Define the root logger to the system property 
\"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging 
Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File 
Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] 
%c{2}: %m%n\n\n# Rolling File Appender 
properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# 
Rolling File 
Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupInde
 
x=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit 
appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n#
 Null 
Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppende
 r\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use 
this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601}
 %-5p [%t] %c{2}: %m%n\n\n# Custom Logging 
levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n#
 Make these two classes INFO-level. Make them DEBUG to see more zk 
debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n#
 Set this class to log INFO only otherwise its OTT\n# Enable this to get 
detailed connection error/retry logging.\n# 
log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n#
 Uncomment this line to enable tr
 acing on _every_ RPC call (this can be a lot of 
output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# 
Uncomment the below if you want to remove logging of client region caching'\n# 
and scan of .META. messages\n# 
log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n#
 log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
+        }, 
+        "hadoop-env": {
+            "dtnode_heapsize": "1024m", 
+            "namenode_opt_maxnewsize": "256m", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "namenode_heapsize": "1024m", 
+            "proxyuser_group": "users", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "content": "\n# Set Hadoop-specific environment variables 
here.\n\n# The only required environment variable is JAVA_HOME.  All others 
are\n# optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home 
directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop 
Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc 
required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by default.\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command 
specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as 
after dropping privileges\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# 
Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  
$HADOOP_HOME/logs by default.\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are 
stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code 
should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave 
commands.  Unset by default.  This\n# can be useful in large clusters, where, 
e.g., slave rsyncs can\n# otherwise arrive faster than the master can service 
them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are 
stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor 
jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# 
added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, 
the tez-client will be a symlink to the current folder of tez in HDP.\n    
export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n
  fi\nfi\n\n\n# Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 
2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
+            "hdfs_user": "hdfs", 
+            "namenode_opt_newsize": "256m", 
+            "dfs.datanode.data.dir.mount.file": 
"/etc/hadoop/conf/dfs_data_dir_mount.hist", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "namenode_opt_maxpermsize": "256m", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_principal_name": "[email protected]"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation 
(ASF) under one\n# or more contributor license agreements.  See the NOTICE 
file\n# distributed with this work for additional information\n# regarding 
copyright ownership.  The ASF licenses this file\n# to you under the Apache 
License, Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
 http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the 
License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: 
console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling 
log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file 
and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# 
Log INFO level and above messages to the 
console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601}
 - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log 
file output\n#    Log DEBUG level and above messages to a log 
file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n#
 Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# 
uncomment the next line to limit number of backup 
files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - 
%m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log 
DEBUG level and above messages to a log 
file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n###
 Notice we are including log4j's NDC here 
(%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p 
[%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "yarn-site": {
+            "yarn.timeline-service.http-authentication.kerberos.keytab": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.resourcemanager.webapp.address": 
"c6408.ambari.apache.org:8088", 
+            "yarn.resourcemanager.zk-num-retries": "1000", 
+            "yarn.timeline-service.bind-host": "0.0.0.0", 
+            "yarn.resourcemanager.ha.enabled": "false", 
+            "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": 
"hadoop-yarn", 
+            "yarn.timeline-service.webapp.address": 
"c6408.ambari.apache.org:8188", 
+            "yarn.nodemanager.principal": "nm/[email protected]", 
+            "yarn.timeline-service.enabled": "true", 
+            "yarn.nodemanager.recovery.enabled": "true", 
+            "yarn.timeline-service.http-authentication.type": "kerberos", 
+            "yarn.nodemanager.keytab": 
"/etc/security/keytabs/nm.service.keytab", 
+            "yarn.timeline-service.address": "c6408.ambari.apache.org:10200", 
+            "yarn.resourcemanager.hostname": "c6408.ambari.apache.org", 
+            "yarn.resourcemanager.webapp.spnego-principal": 
"HTTP/[email protected]", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.log-aggregation.debug-enabled": "false", 
+            "yarn.resourcemanager.system-metrics-publisher.enabled": "true", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.resourcemanager.nodes.exclude-path": 
"/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.cgroups.mount": 
"false", 
+            
"yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", 
+            "yarn.log.server.url": 
"http://c6408.ambari.apache.org:19888/jobhistory/logs";, 
+            "yarn.nodemanager.webapp.spnego-principal": 
"HTTP/[email protected]", 
+            "yarn.timeline-service.keytab": 
"/etc/security/keytabs/yarn.service.keytab", 
+            "yarn.application.classpath": 
"$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
 
+            
"yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", 
+            "yarn.resourcemanager.keytab": 
"/etc/security/keytabs/rm.service.keytab", 
+            "yarn.resourcemanager.principal": "rm/[email protected]", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            
"yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": 
"false", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.connect.max-wait.ms": "900000", 
+            "yarn.resourcemanager.address": "c6408.ambari.apache.org:8050", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", 
+            "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", 
+            "yarn.resourcemanager.webapp.https.address": 
"c6408.ambari.apache.org:8090", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.timeline-service.store-class": 
"org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
+            "yarn.resourcemanager.webapp.spnego-keytab-file": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.timeline-service.client.retry-interval-ms": "1000", 
+            "hadoop.registry.zk.quorum": 
"c6407.ambari.apache.org:2181,c6408.ambari.apache.org:2181,c6409.ambari.apache.org:2181",
 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": 
"org.apache.hadoop.mapred.ShuffleHandler", 
+            
"yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage":
 "90", 
+            "yarn.resourcemanager.zk-timeout-ms": "10000", 
+            "yarn.resourcemanager.fs.state-store.uri": " ", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            
"yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", 
+            "yarn.timeline-service.generic-application-history.store-class": 
"org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
 
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.resourcemanager.state-store.max-completed-applications": 
"${yarn.resourcemanager.max-completed-applications}", 
+            "yarn.resourcemanager.work-preserving-recovery.enabled": "true", 
+            "yarn.resourcemanager.resource-tracker.address": 
"c6408.ambari.apache.org:8025", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": 
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.nodemanager.resource.cpu-vcores": "1", 
+            "yarn.timeline-service.ttl-ms": "2678400000", 
+            "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
+            "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": 
"1000", 
+            
"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.timeline-service.principal": "yarn/[email protected]", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.timeline-service.client.max-retries": "30", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.nodemanager.admin-env": 
"MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.acl.enable": "true", 
+            "yarn.node-labels.manager-class": 
"org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
 
+            "yarn.timeline-service.leveldb-timeline-store.read-cache-size": 
"104857600", 
+            
"yarn.nodemanager.linux-container-executor.resources-handler.class": 
"org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", 
+            "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
+            
"yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
+            
"yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": 
"10000", 
+            "yarn.resourcemanager.bind-host": "0.0.0.0", 
+            "yarn.http.policy": "HTTP_ONLY", 
+            "yarn.resourcemanager.scheduler.address": 
"c6408.ambari.apache.org:8030", 
+            "yarn.nodemanager.recovery.dir": 
"{{yarn_log_dir_prefix}}/nodemanager/recovery-state", 
+            "yarn.nodemanager.container-executor.class": 
"org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
+            "yarn.resourcemanager.store.class": 
"org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", 
+            "yarn.nodemanager.webapp.spnego-keytab-file": 
"/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.timeline-service.leveldb-timeline-store.path": 
"/hadoop/yarn/timeline", 
+            "yarn.scheduler.minimum-allocation-mb": "682", 
+            "yarn.timeline-service.ttl-enable": "true", 
+            "yarn.resourcemanager.zk-address": "c6407.ambari.apache.org:2181", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "hadoop.registry.rm.enabled": "false", 
+            "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": 
"300000", 
+            "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 
500", 
+            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.timeline-service.http-authentication.kerberos.principal": 
"HTTP/[email protected]", 
+            "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
+            "yarn.resourcemanager.recovery.enabled": "true", 
+            "yarn.nodemanager.bind-host": "0.0.0.0", 
+            "yarn.resourcemanager.zk-retry-interval-ms": "1000", 
+            "yarn.admin.acl": "", 
+            "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", 
+            "yarn.client.nodemanager-connect.retry-interval-ms": "10000", 
+            "yarn.resourcemanager.admin.address": 
"c6408.ambari.apache.org:8141", 
+            "yarn.timeline-service.webapp.https.address": 
"c6408.ambari.apache.org:8190", 
+            "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
+            
"yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": 
"10000"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.default.minimum-user-limit-percent": 
"100", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
+            "yarn.scheduler.capacity.root.accessible-node-labels": "*", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            
"yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": 
"-1", 
+            "yarn.scheduler.capacity.root.default-node-label-expression": " ", 
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            
"yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1", 
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": 
"*", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.acl_administer_queue": "*", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.resource-calculator": 
"org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator"
+        }, 
+        "ranger-site": {
+            "HTTPS_KEYSTORE_FILE": "/etc/ranger/admin/keys/server.jks", 
+            "HTTPS_CLIENT_AUTH": "want", 
+            "HTTPS_SERVICE_PORT": "6182", 
+            "HTTPS_KEY_ALIAS": "myKey", 
+            "HTTPS_KEYSTORE_PASS": "ranger", 
+            "HTTP_ENABLED": "true

<TRUNCATED>

Reply via email to