http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh new file mode 100644 index 0000000..2d07b8b --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# + +export ttonhost=$1 +export smoke_test_user=$2 +export smoke_user_keytab=$3 +export security_enabled=$4 +export kinit_path_local=$5 +export ttonurl="http://${ttonhost}:50111/templeton/v1" + +if [[ $security_enabled == "true" ]]; then + kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; " +else + kinitcmd="" +fi + +export no_proxy=$ttonhost +cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' $ttonurl/status 2>&1" +retVal=`su - ${smoke_test_user} -c "$cmd"` +httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'` + +if [[ "$httpExitCode" -ne "200" ]] ; then + echo "Templeton Smoke Test (status cmd): Failed. : $retVal" + export TEMPLETON_EXIT_CODE=1 + exit 1 +fi + +exit 0 + +#try hcat ddl command +echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt +cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d \@${destdir}/show_db.post.txt $ttonurl/ddl 2>&1" +retVal=`su - ${smoke_test_user} -c "$cmd"` +httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'` + +if [[ "$httpExitCode" -ne "200" ]] ; then + echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal" + export TEMPLETON_EXIT_CODE=1 + exit 1 +fi + +# NOT SURE?? SUHAS +if [[ $security_enabled == "true" ]]; then + echo "Templeton Pig Smoke Tests not run in secure mode" + exit 0 +fi + +#try pig query +outname=${smoke_test_user}.`date +"%M%d%y"`.$$; +ttonTestOutput="/tmp/idtest.${outname}.out"; +ttonTestInput="/tmp/idtest.${outname}.in"; +ttonTestScript="idtest.${outname}.pig" + +echo "A = load '$ttonTestInput' using PigStorage(':');" > /tmp/$ttonTestScript +echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript +echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript + +#copy pig script to hdfs +su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript" + +#copy input file to hdfs +su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput" + +#create, copy post args file +echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt + +#submit pig query +cmd="curl -s -w 'http_code <%{http_code}>' -d \@${destdir}/pig_post.txt $ttonurl/pig 2>&1" +retVal=`su - ${smoke_test_user} -c "$cmd"` +httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'` +if [[ "$httpExitCode" -ne "200" ]] ; then + echo "Templeton Smoke Test (pig cmd): Failed. : $retVal" + export TEMPLETON_EXIT_CODE=1 + exit 1 +fi + +exit 0
http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py new file mode 100644 index 0000000..35de4bb --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py @@ -0,0 +1,20 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py new file mode 100644 index 0000000..c1d55be --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/params.py @@ -0,0 +1,83 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" + +from resource_management import * +import status_params + +# server configurations +config = Script.get_config() +tmp_dir = Script.get_tmp_dir() + +hcat_user = config['configurations']['hive-env']['hcat_user'] +webhcat_user = config['configurations']['hive-env']['webhcat_user'] + +if str(config['hostLevelParams']['stack_version']).startswith('2.0'): + config_dir = '/etc/hcatalog/conf' + webhcat_bin_dir = '/usr/lib/hcatalog/sbin' +# for newer versions +else: + config_dir = '/etc/hive-webhcat/conf' + webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin' + +webhcat_env_sh_template = config['configurations']['webhcat-env']['content'] +templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir'] +templeton_pid_dir = status_params.templeton_pid_dir + +pid_file = status_params.pid_file + +hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir'] +templeton_jar = config['configurations']['webhcat-site']['templeton.jar'] + +hadoop_home = '/usr/lib/hadoop' +user_group = config['configurations']['cluster-env']['user_group'] + +webhcat_server_host = config['clusterHostInfo']['webhcat_server_host'] + +webhcat_apps_dir = "/apps/webhcat" +smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab'] +smokeuser = config['configurations']['cluster-env']['smokeuser'] +security_enabled = config['configurations']['cluster-env']['security_enabled'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) + +hcat_hdfs_user_dir = format("/user/{hcat_user}") +hcat_hdfs_user_mode = 0755 +webhcat_hdfs_user_dir = format("/user/{webhcat_user}") +webhcat_hdfs_user_mode = 0755 +webhcat_apps_dir = "/apps/webhcat" +#for create_hdfs_directory +hostname = config["hostname"] +hadoop_conf_dir = "/etc/hadoop/conf" +security_param = "true" if security_enabled else "false" +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] +kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"]) +import functools +#create partial functions with common arguments for every HdfsDirectory call +#to create hdfs directory we need to call params.HdfsDirectory in code +HdfsDirectory = functools.partial( + HdfsDirectory, + conf_dir=hadoop_conf_dir, + hdfs_user=hdfs_user, + security_enabled = security_enabled, + keytab = hdfs_user_keytab, + kinit_path_local = kinit_path_local +) http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py new file mode 100644 index 0000000..0e3c0f0 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/service_check.py @@ -0,0 +1,45 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" +from resource_management import * + +class WebHCatServiceCheck(Script): + def service_check(self, env): + import params + + env.set_params(params) + + File(format("{tmp_dir}/templetonSmoke.sh"), + content= StaticFile('templetonSmoke.sh'), + mode=0755 + ) + + cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}" + " {security_param} {kinit_path_local}", + smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab") + + Execute(cmd, + tries=3, + try_sleep=5, + path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', + logoutput=True) + +if __name__ == "__main__": + WebHCatServiceCheck().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py new file mode 100644 index 0000000..23823e6 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/status_params.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * + +config = Script.get_config() + +templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] +pid_file = format('{templeton_pid_dir}/webhcat.pid') http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat.py new file mode 100644 index 0000000..67ae839 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat.py @@ -0,0 +1,93 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" +from resource_management import * +import sys + + +def webhcat(): + import params + + params.HdfsDirectory(params.webhcat_apps_dir, + action="create_delayed", + owner=params.webhcat_user, + mode=0755 + ) + if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir: + params.HdfsDirectory(params.hcat_hdfs_user_dir, + action="create_delayed", + owner=params.hcat_user, + mode=params.hcat_hdfs_user_mode + ) + params.HdfsDirectory(params.webhcat_hdfs_user_dir, + action="create_delayed", + owner=params.webhcat_user, + mode=params.webhcat_hdfs_user_mode + ) + params.HdfsDirectory(None, action="create") + + Directory(params.templeton_pid_dir, + owner=params.webhcat_user, + mode=0755, + group=params.user_group, + recursive=True) + + Directory(params.templeton_log_dir, + owner=params.webhcat_user, + mode=0755, + group=params.user_group, + recursive=True) + + Directory(params.config_dir, + owner=params.webhcat_user, + group=params.user_group) + + XmlConfig("webhcat-site.xml", + conf_dir=params.config_dir, + configurations=params.config['configurations']['webhcat-site'], + configuration_attributes=params.config['configuration_attributes']['webhcat-site'], + owner=params.webhcat_user, + group=params.user_group, + ) + + File(format("{config_dir}/webhcat-env.sh"), + owner=params.webhcat_user, + group=params.user_group, + content=InlineTemplate(params.webhcat_env_sh_template) + ) + + if params.security_enabled: + kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};") + else: + kinit_if_needed = "" + + if kinit_if_needed: + Execute(kinit_if_needed, + user=params.webhcat_user, + path='/bin' + ) + + CopyFromLocal('/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar', + owner=params.webhcat_user, + mode=0755, + dest_dir=params.webhcat_apps_dir, + kinnit_if_needed=kinit_if_needed, + hdfs_user=params.hdfs_user + ) http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_server.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_server.py new file mode 100644 index 0000000..2111fa4 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_server.py @@ -0,0 +1,53 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" +import sys +from resource_management import * + +from webhcat import webhcat +from webhcat_service import webhcat_service + +class WebHCatServer(Script): + def install(self, env): + self.install_packages(env) + def configure(self, env): + import params + env.set_params(params) + webhcat() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) # FOR SECURITY + webhcat_service(action = 'start') + + def stop(self, env): + import params + env.set_params(params) + + webhcat_service(action = 'stop') + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.pid_file) + +if __name__ == "__main__": + WebHCatServer().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_service.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_service.py new file mode 100644 index 0000000..a92446d --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/webhcat_service.py @@ -0,0 +1,40 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" +from resource_management import * + +def webhcat_service(action='start'): + import params + + cmd = format('env HADOOP_HOME={hadoop_home} {webhcat_bin_dir}/webhcat_server.sh') + + if action == 'start': + demon_cmd = format('{cmd} start') + no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1') + Execute(demon_cmd, + user=params.webhcat_user, + not_if=no_op_test + ) + elif action == 'stop': + demon_cmd = format('{cmd} stop') + Execute(demon_cmd, + user=params.webhcat_user + ) + Execute(format('rm -f {pid_file}')) http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml new file mode 100644 index 0000000..14ae20b --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-env.xml @@ -0,0 +1,65 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration> + <property> + <name>mapred_log_dir_prefix</name> + <value>/var/log/hadoop-mapreduce</value> + <description>Mapreduce Log Dir Prefix</description> + </property> + <property> + <name>mapred_pid_dir_prefix</name> + <value>/var/run/hadoop-mapreduce</value> + <description>Mapreduce PID Dir Prefix</description> + </property> + <property> + <name>mapred_user</name> + <value>mapred</value> + <property-type>USER</property-type> + <description>Mapreduce User</description> + </property> + <property> + <name>jobhistory_heapsize</name> + <value>900</value> + <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description> + </property> + + <!-- mapred-env.sh --> + <property> + <name>content</name> + <description>This is the jinja template for mapred-env.sh file</description> + <value> +# export JAVA_HOME=/home/y/libexec/jdk1.6.0/ + +export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}} + +export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA + +#export HADOOP_JOB_HISTORYSERVER_OPTS= +#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. +#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. +#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. +#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default +#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. + </value> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml new file mode 100644 index 0000000..f8d7b89 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration-mapred/mapred-site.xml @@ -0,0 +1,366 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- Put site-specific property overrides in this file. --> + +<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude"> + +<!-- i/o properties --> + + <property> + <name>mapreduce.task.io.sort.mb</name> + <value>200</value> + <description> + The total amount of buffer memory to use while sorting files, in megabytes. + By default, gives each merge stream 1MB, which should minimize seeks. + </description> + </property> + + <property> + <name>mapreduce.map.sort.spill.percent</name> + <value>0.7</value> + <description> + The soft limit in the serialization buffer. Once reached, a thread will + begin to spill the contents to disk in the background. Note that + collection will not block if this threshold is exceeded while a spill + is already in progress, so spills may be larger than this threshold when + it is set to less than .5 + </description> + </property> + + <property> + <name>mapreduce.task.io.sort.factor</name> + <value>100</value> + <description> + The number of streams to merge at once while sorting files. + This determines the number of open file handles. + </description> + </property> + +<!-- map/reduce properties --> + <property> + <name>mapreduce.cluster.administrators</name> + <value> hadoop</value> + <description> + Administrators for MapReduce applications. + </description> + </property> + + <property> + <name>mapreduce.reduce.shuffle.parallelcopies</name> + <value>30</value> + <description> + The default number of parallel transfers run by reduce during + the copy(shuffle) phase. + </description> + </property> + + <property> + <name>mapreduce.map.speculative</name> + <value>false</value> + <description> + If true, then multiple instances of some map tasks + may be executed in parallel. + </description> + </property> + + <property> + <name>mapreduce.reduce.speculative</name> + <value>false</value> + <description> + If true, then multiple instances of some reduce tasks may be + executed in parallel. + </description> + </property> + + <property> + <name>mapreduce.job.reduce.slowstart.completedmaps</name> + <value>0.05</value> + <description> + Fraction of the number of maps in the job which should be complete before + reduces are scheduled for the job. + </description> + </property> + + <property> + <name>mapreduce.reduce.shuffle.merge.percent</name> + <value>0.66</value> + <description> + The usage threshold at which an in-memory merge will be + initiated, expressed as a percentage of the total memory allocated to + storing in-memory map outputs, as defined by + mapreduce.reduce.shuffle.input.buffer.percent. + </description> + </property> + + <property> + <name>mapreduce.reduce.shuffle.input.buffer.percent</name> + <value>0.7</value> + <description> + The percentage of memory to be allocated from the maximum heap + size to storing map outputs during the shuffle. + </description> + </property> + + <property> + <name>mapreduce.output.fileoutputformat.compress.type</name> + <value>BLOCK</value> + <description> + If the job outputs are to compressed as SequenceFiles, how should + they be compressed? Should be one of NONE, RECORD or BLOCK. + </description> + </property> + + <property> + <name>mapreduce.reduce.input.buffer.percent</name> + <value>0.0</value> + <description> + The percentage of memory- relative to the maximum heap size- to + retain map outputs during the reduce. When the shuffle is concluded, any + remaining map outputs in memory must consume less than this threshold before + the reduce can begin. + </description> + </property> + + <!-- copied from kryptonite configuration --> + <property> + <name>mapreduce.map.output.compress</name> + <value>false</value> + <description> + Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression. + </description> + </property> + + <property> + <name>mapreduce.task.timeout</name> + <value>300000</value> + <description> + The number of milliseconds before a task will be + terminated if it neither reads an input, writes an output, nor + updates its status string. + </description> + </property> + + <property> + <name>mapreduce.map.memory.mb</name> + <value>1024</value> + <description>Virtual memory for single Map task</description> + </property> + + <property> + <name>mapreduce.reduce.memory.mb</name> + <value>1024</value> + <description>Virtual memory for single Reduce task</description> + </property> + + <property> + <name>mapreduce.jobhistory.keytab.file</name> + <value>/etc/security/keytabs/jhs.service.keytab</value> + <description>The keytab for the job history server principal.</description> + </property> + + <property> + <name>mapreduce.shuffle.port</name> + <value>13562</value> + <description> + Default port that the ShuffleHandler will run on. + ShuffleHandler is a service run at the NodeManager to facilitate + transfers of intermediate Map outputs to requesting Reducers. + </description> + </property> + + <property> + <name>mapreduce.jobhistory.intermediate-done-dir</name> + <value>/mr-history/tmp</value> + <description> + Directory where history files are written by MapReduce jobs. + </description> + </property> + + <property> + <name>mapreduce.jobhistory.done-dir</name> + <value>/mr-history/done</value> + <description> + Directory where history files are managed by the MR JobHistory Server. + </description> + </property> + + <property> + <name>mapreduce.jobhistory.address</name> + <value>localhost:10020</value> + <description>Enter your JobHistoryServer hostname.</description> + </property> + + <property> + <name>mapreduce.jobhistory.webapp.address</name> + <value>localhost:19888</value> + <description>Enter your JobHistoryServer hostname.</description> + </property> + + <property> + <name>mapreduce.framework.name</name> + <value>yarn</value> + <description> + The runtime framework for executing MapReduce jobs. Can be one of local, + classic or yarn. + </description> + </property> + + <property> + <name>yarn.app.mapreduce.am.staging-dir</name> + <value>/user</value> + <description> + The staging dir used while submitting jobs. + </description> + </property> + + <property> + <name>yarn.app.mapreduce.am.resource.mb</name> + <value>512</value> + <description>The amount of memory the MR AppMaster needs.</description> + </property> + + <property> + <name>yarn.app.mapreduce.am.command-opts</name> + <value>-Xmx312m</value> + <description> + Java opts for the MR App Master processes. + The following symbol, if present, will be interpolated: @taskid@ is replaced + by current TaskID. Any other occurrences of '@' will go unchanged. + For example, to enable verbose gc logging to a file named for the taskid in + /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: + -Xmx1024m -verbose:gc -Xloggc:/tmp/@[email protected] + + Usage of -Djava.library.path can cause programs to no longer function if + hadoop native libraries are used. These values should instead be set as part + of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and + mapreduce.reduce.env config settings. + </description> + </property> + + <property> + <name>yarn.app.mapreduce.am.admin-command-opts</name> + <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value> + <description> + Java opts for the MR App Master processes for admin purposes. + It will appears before the opts set by yarn.app.mapreduce.am.command-opts and + thus its options can be overridden user. + + Usage of -Djava.library.path can cause programs to no longer function if + hadoop native libraries are used. These values should instead be set as part + of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and + mapreduce.reduce.env config settings. + </description> + </property> + + <property> + <name>yarn.app.mapreduce.am.log.level</name> + <value>INFO</value> + <description>MR App Master process log level.</description> + </property> + + <property> + <name>mapreduce.admin.map.child.java.opts</name> + <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value> + <description>This property stores Java options for map tasks.</description> + </property> + + <property> + <name>mapreduce.admin.reduce.child.java.opts</name> + <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value> + <description>This property stores Java options for reduce tasks.</description> + </property> + + <property> + <name>mapreduce.application.classpath</name> + <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value> + <description> + CLASSPATH for MR applications. A comma-separated list of CLASSPATH + entries. + </description> + </property> + + <property> + <name>mapreduce.am.max-attempts</name> + <value>2</value> + <description> + The maximum number of application attempts. It is a + application-specific setting. It should not be larger than the global number + set by resourcemanager. Otherwise, it will be override. The default number is + set to 2, to allow at least one retry for AM. + </description> + </property> + + + + <property> + <name>mapreduce.map.java.opts</name> + <value>-Xmx756m</value> + <description> + Larger heap-size for child jvms of maps. + </description> + </property> + + + <property> + <name>mapreduce.reduce.java.opts</name> + <value>-Xmx756m</value> + <description> + Larger heap-size for child jvms of reduces. + </description> + </property> + + <property> + <name>mapreduce.map.log.level</name> + <value>INFO</value> + <description> + The logging level for the map task. The allowed levels are: + OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL. + </description> + </property> + + <property> + <name>mapreduce.reduce.log.level</name> + <value>INFO</value> + <description> + The logging level for the reduce task. The allowed levels are: + OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL. + </description> + </property> + + <property> + <name>mapreduce.admin.user.env</name> + <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value> + <description> + Additional execution environment entries for map and reduce task processes. + This is not an additive property. You must preserve the original value if + you want your map and reduce tasks to have access to native libraries (compression, etc) + </description> + </property> + + <property> + <name>mapreduce.output.fileoutputformat.compress</name> + <value>false</value> + <description> + Should the job outputs be compressed? + </description> + </property> + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml new file mode 100644 index 0000000..261d872 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/capacity-scheduler.xml @@ -0,0 +1,124 @@ +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<configuration supports_final="true"> + + <property> + <name>yarn.scheduler.capacity.maximum-applications</name> + <value>10000</value> + <description> + Maximum number of applications that can be pending and running. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.maximum-am-resource-percent</name> + <value>0.2</value> + <description> + Maximum percent of resources in the cluster which can be used to run + application masters i.e. controls number of concurrent running + applications. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.queues</name> + <value>default</value> + <description> + The queues at the this level (root is the root queue). + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.capacity</name> + <value>100</value> + <description> + The total capacity as a percentage out of 100 for this queue. + If it has child queues then this includes their capacity as well. + The child queues capacity should add up to their parent queue's capacity + or less. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.capacity</name> + <value>100</value> + <description>Default queue target capacity.</description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.user-limit-factor</name> + <value>1</value> + <description> + Default queue user limit a percentage from 0.0 to 1.0. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.maximum-capacity</name> + <value>100</value> + <description> + The maximum capacity of the default queue. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.state</name> + <value>RUNNING</value> + <description> + The state of the default queue. State can be one of RUNNING or STOPPED. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name> + <value>*</value> + <description> + The ACL of who can submit jobs to the default queue. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name> + <value>*</value> + <description> + The ACL of who can administer jobs on the default queue. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.root.acl_administer_queue</name> + <value>*</value> + <description> + The ACL for who can administer this queue i.e. change sub-queue + allocations. + </description> + </property> + + <property> + <name>yarn.scheduler.capacity.node-locality-delay</name> + <value>40</value> + <description> + Number of missed scheduling opportunities after which the CapacityScheduler + attempts to schedule rack-local containers. + Typically this should be set to number of nodes in the cluster, By default is setting + approximately number of nodes in one rack which is 40. + </description> + </property> + + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml new file mode 100644 index 0000000..5730d4a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-env.xml @@ -0,0 +1,184 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration> + <property> + <name>yarn_log_dir_prefix</name> + <value>/var/log/hadoop-yarn</value> + <description>YARN Log Dir Prefix</description> + </property> + <property> + <name>yarn_pid_dir_prefix</name> + <value>/var/run/hadoop-yarn</value> + <description>YARN PID Dir Prefix</description> + </property> + <property> + <name>yarn_user</name> + <value>yarn</value> + <property-type>USER</property-type> + <description>YARN User</description> + </property> + <property> + <name>yarn_heapsize</name> + <value>1024</value> + <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description> + </property> + <property> + <name>resourcemanager_heapsize</name> + <value>1024</value> + <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description> + </property> + <property> + <name>nodemanager_heapsize</name> + <value>1024</value> + <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description> + </property> + <property> + <name>min_user_id</name> + <value>1000</value> + <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description> + </property> + <property> + <name>apptimelineserver_heapsize</name> + <value>1024</value> + <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description> + </property> + + <!-- yarn-env.sh --> + <property> + <name>content</name> + <description>This is the jinja template for yarn-env.sh file</description> + <value> +export HADOOP_YARN_HOME={{hadoop_yarn_home}} +export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER +export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER +export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} +export JAVA_HOME={{java64_home}} + +# User for YARN daemons +export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} + +# resolve links - $0 may be a softlink +export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}" + +# some Java parameters +# export JAVA_HOME=/home/y/libexec/jdk1.6.0/ +if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME +fi + +if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 +fi + +JAVA=$JAVA_HOME/bin/java +JAVA_HEAP_MAX=-Xmx1000m + +# For setting YARN specific HEAP sizes please use this +# Parameter and set appropriately +YARN_HEAPSIZE={{yarn_heapsize}} + +# check envvars which might override default args +if [ "$YARN_HEAPSIZE" != "" ]; then + JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" +fi + +# Resource Manager specific parameters + +# Specify the max Heapsize for the ResourceManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1000. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_RESOURCEMANAGER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} + +# Specify the JVM options to be used when starting the ResourceManager. +# These options will be appended to the options specified as YARN_OPTS +# and therefore may override any similar flags set in YARN_OPTS +#export YARN_RESOURCEMANAGER_OPTS= + +# Node Manager specific parameters + +# Specify the max Heapsize for the NodeManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1000. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_NODEMANAGER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} + +# Specify the max Heapsize for the HistoryManager using a numerical value +# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set +# the value to 1024. +# This value will be overridden by an Xmx setting specified in either YARN_OPTS +# and/or YARN_HISTORYSERVER_OPTS. +# If not specified, the default value will be picked from either YARN_HEAPMAX +# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. +export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}} + +# Specify the JVM options to be used when starting the NodeManager. +# These options will be appended to the options specified as YARN_OPTS +# and therefore may override any similar flags set in YARN_OPTS +#export YARN_NODEMANAGER_OPTS= + +# so that filenames w/ spaces are handled correctly in loops below +IFS= + + +# default log directory and file +if [ "$YARN_LOG_DIR" = "" ]; then + YARN_LOG_DIR="$HADOOP_YARN_HOME/logs" +fi +if [ "$YARN_LOGFILE" = "" ]; then + YARN_LOGFILE='yarn.log' +fi + +# default policy file for service-level authorization +if [ "$YARN_POLICYFILE" = "" ]; then + YARN_POLICYFILE="hadoop-policy.xml" +fi + +# restore ordinary behaviour +unset IFS + + +YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR" +YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR" +YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE" +YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE" +YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME" +YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING" +YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" +YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}" +if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then + YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" +fi +YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" + </value> + </property> + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml new file mode 100644 index 0000000..8c44b9e --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-log4j.xml @@ -0,0 +1,71 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration supports_final="false"> + + <property> + <name>content</name> + <description>Custom log4j.properties</description> + <value> +#Relative to Yarn Log Dir Prefix +yarn.log.dir=. +# +# Job Summary Appender +# +# Use following logger to send summary to separate file defined by +# hadoop.mapreduce.jobsummary.log.file rolled daily: +# hadoop.mapreduce.jobsummary.logger=INFO,JSA +# +hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} +hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log +log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender +# Set the ResourceManager summary log filename +yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log +# Set the ResourceManager summary log level and appender +yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger} +#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY + +# To enable AppSummaryLogging for the RM, +# set yarn.server.resourcemanager.appsummary.logger to +# LEVEL,RMSUMMARY in hadoop-env.sh + +# Appender for ResourceManager Application Summary Log +# Requires the following properties to be set +# - hadoop.log.dir (Hadoop Log directory) +# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) +# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender) +log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender +log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +log4j.appender.RMSUMMARY.MaxFileSize=256MB +log4j.appender.RMSUMMARY.MaxBackupIndex=20 +log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout +log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n +log4j.appender.JSA.DatePattern=.yyyy-MM-dd +log4j.appender.JSA.layout=org.apache.log4j.PatternLayout +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} +log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false + </value> + </property> + +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml new file mode 100644 index 0000000..20052de --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/configuration/yarn-site.xml @@ -0,0 +1,413 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- Put site-specific property overrides in this file. --> + +<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude"> + + <!-- ResourceManager --> + + <property> + <name>yarn.resourcemanager.hostname</name> + <value>localhost</value> + <description>The hostname of the RM.</description> + </property> + + <property> + <name>yarn.resourcemanager.resource-tracker.address</name> + <value>localhost:8025</value> + <description> The address of ResourceManager. </description> + </property> + + <property> + <name>yarn.resourcemanager.scheduler.address</name> + <value>localhost:8030</value> + <description>The address of the scheduler interface.</description> + </property> + + <property> + <name>yarn.resourcemanager.address</name> + <value>localhost:8050</value> + <description> + The address of the applications manager interface in the + RM. + </description> + </property> + + <property> + <name>yarn.resourcemanager.admin.address</name> + <value>localhost:8141</value> + <description>The address of the RM admin interface.</description> + </property> + + <property> + <name>yarn.resourcemanager.scheduler.class</name> + <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> + <description>The class to use as the resource scheduler.</description> + </property> + + <property> + <name>yarn.scheduler.minimum-allocation-mb</name> + <value>512</value> + <description> + The minimum allocation for every container request at the RM, + in MBs. Memory requests lower than this won't take effect, + and the specified value will get allocated at minimum. + </description> + </property> + + <property> + <name>yarn.scheduler.maximum-allocation-mb</name> + <value>2048</value> + <description> + The maximum allocation for every container request at the RM, + in MBs. Memory requests higher than this won't take effect, + and will get capped to this value. + </description> + </property> + + <property> + <name>yarn.acl.enable</name> + <value>false</value> + <description> Are acls enabled. </description> + </property> + + <property> + <name>yarn.admin.acl</name> + <value></value> + <description> ACL of who can be admin of the YARN cluster. </description> + </property> + + <!-- NodeManager --> + + <property> + <name>yarn.nodemanager.address</name> + <value>0.0.0.0:45454</value> + <description>The address of the container manager in the NM.</description> + </property> + + <property> + <name>yarn.nodemanager.resource.memory-mb</name> + <value>5120</value> + <description>Amount of physical memory, in MB, that can be allocated + for containers.</description> + </property> + + <property> + <name>yarn.application.classpath</name> + <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value> + <description>Classpath for typical applications.</description> + </property> + + <property> + <name>yarn.nodemanager.vmem-pmem-ratio</name> + <value>2.1</value> + <description>Ratio between virtual memory to physical memory when + setting memory limits for containers. Container allocations are + expressed in terms of physical memory, and virtual memory usage + is allowed to exceed this allocation by this ratio. + </description> + </property> + + <property> + <name>yarn.nodemanager.container-executor.class</name> + <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value> + <description>ContainerExecutor for launching containers</description> + </property> + + <property> + <name>yarn.nodemanager.linux-container-executor.group</name> + <value>hadoop</value> + <description>Unix group of the NodeManager</description> + </property> + + <property> + <name>yarn.nodemanager.aux-services</name> + <value>mapreduce_shuffle</value> + <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can + not start with numbers</description> + </property> + + <property> + <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name> + <value>org.apache.hadoop.mapred.ShuffleHandler</value> + <description>The auxiliary service class to use </description> + </property> + + <property> + <name>yarn.nodemanager.log-dirs</name> + <value>/hadoop/yarn/log</value> + <description> + Where to store container logs. An application's localized log directory + will be found in ${yarn.nodemanager.log-dirs}/application_${appid}. + Individual containers' log directories will be below this, in directories + named container_{$contid}. Each container directory will contain the files + stderr, stdin, and syslog generated by that container. + </description> + </property> + + <property> + <name>yarn.nodemanager.local-dirs</name> + <value>/hadoop/yarn/local</value> + <description> + List of directories to store localized files in. An + application's localized file directory will be found in: + ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}. + Individual containers' work directories, called container_${contid}, will + be subdirectories of this. + </description> + </property> + + <property> + <name>yarn.nodemanager.container-monitor.interval-ms</name> + <value>3000</value> + <description> + The interval, in milliseconds, for which the node manager + waits between two cycles of monitoring its containers' memory usage. + </description> + </property> + + <!-- + <property> + <name>yarn.nodemanager.health-checker.script.path</name> + <value>/etc/hadoop/conf/health_check_nodemanager</value> + <description>The health check script to run.</description> + </property> + --> + + <property> + <name>yarn.nodemanager.health-checker.interval-ms</name> + <value>135000</value> + <description>Frequency of running node health script.</description> + </property> + + <property> + <name>yarn.nodemanager.health-checker.script.timeout-ms</name> + <value>60000</value> + <description>Script time out period.</description> + </property> + + <property> + <name>yarn.nodemanager.log.retain-second</name> + <value>604800</value> + <description> + Time in seconds to retain user logs. Only applicable if + log aggregation is disabled. + </description> + </property> + + <property> + <name>yarn.log-aggregation-enable</name> + <value>true</value> + <description>Whether to enable log aggregation. </description> + </property> + + <property> + <name>yarn.nodemanager.remote-app-log-dir</name> + <value>/app-logs</value> + <description>Location to aggregate logs to. </description> + </property> + + <property> + <name>yarn.nodemanager.remote-app-log-dir-suffix</name> + <value>logs</value> + <description> + The remote log dir will be created at + {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}. + </description> + </property> + + <property> + <name>yarn.nodemanager.log-aggregation.compression-type</name> + <value>gz</value> + <description> + T-file compression types used to compress aggregated logs. + </description> + </property> + + <property> + <name>yarn.nodemanager.delete.debug-delay-sec</name> + <value>0</value> + <description> + Number of seconds after an application finishes before the nodemanager's + DeletionService will delete the application's localized file directory + and log directory. + + To diagnose Yarn application problems, set this property's value large + enough (for example, to 600 = 10 minutes) to permit examination of these + directories. After changing the property's value, you must restart the + nodemanager in order for it to have an effect. + + The roots of Yarn applications' work directories is configurable with + the yarn.nodemanager.local-dirs property (see below), and the roots + of the Yarn applications' log directories is configurable with the + yarn.nodemanager.log-dirs property (see also below). + </description> + </property> + + <property> + <name>yarn.log-aggregation.retain-seconds</name> + <value>2592000</value> + <description> + How long to keep aggregation logs before deleting them. -1 disables. + Be careful set this too small and you will spam the name node. + </description> + </property> + + <property> + <name>yarn.nodemanager.admin-env</name> + <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value> + <description> + Environment variables that should be forwarded from the NodeManager's + environment to the container's. + </description> + </property> + + <property> + <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name> + <value>0.25</value> + <description> + The minimum fraction of number of disks to be healthy for the nodemanager + to launch new containers. This correspond to both + yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e. + If there are less number of healthy local-dirs (or log-dirs) available, + then new containers will not be launched on this node. + </description> + </property> + + <property> + <name>yarn.resourcemanager.am.max-attempts</name> + <value>2</value> + <description> + The maximum number of application attempts. It's a global + setting for all application masters. Each application master can specify + its individual maximum number of application attempts via the API, but the + individual number cannot be more than the global upper bound. If it is, + the resourcemanager will override it. The default number is set to 2, to + allow at least one retry for AM. + </description> + </property> + + <property> + <name>yarn.resourcemanager.webapp.address</name> + <value>localhost:8088</value> + <description> + The address of the RM web application. + </description> + </property> + + <property> + <name>yarn.nodemanager.vmem-check-enabled</name> + <value>false</value> + <description> + Whether virtual memory limits will be enforced for containers. + </description> + </property> + + <property> + <name>yarn.log.server.url</name> + <value>http://localhost:19888/jobhistory/logs</value> + <description> + URI for the HistoryServer's log resource + </description> + </property> + + <property> + <name>yarn.resourcemanager.nodes.exclude-path</name> + <value>/etc/hadoop/conf/yarn.exclude</value> + <description> + Names a file that contains a list of hosts that are + not permitted to connect to the resource manager. The full pathname of the + file must be specified. If the value is empty, no hosts are + excluded. + </description> + </property> + + <property> + <name>yarn.timeline-service.enabled</name> + <value>true</value> + <description>Indicate to clients whether timeline service is enabled or not. + If enabled, clients will put entities and events to the timeline server. + </description> + </property> + + <property> + <name>yarn.timeline-service.store-class</name> + <value>org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore</value> + <description> + Store class name for timeline store + </description> + </property> + + <property> + <name>yarn.timeline-service.generic-application-history.store-class</name> + <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value> + <description> + Store class name for history store, defaulting to file system store + </description> + </property> + + <property> + <name>yarn.timeline-service.leveldb-timeline-store.path</name> + <value>/var/log/hadoop-yarn/timeline</value> + <description> + Store file name for leveldb timeline store + </description> + </property> + + <property> + <name>yarn.timeline-service.webapp.address</name> + <value>0.0.0.0:8188</value> + <description> + The http address of the timeline service web application. + </description> + </property> + + <property> + <name>yarn.timeline-service.webapp.https.address</name> + <value>0.0.0.0:8190</value> + <description> + The http address of the timeline service web application. + </description> + </property> + + <property> + <name>yarn.timeline-service.address</name> + <value>0.0.0.0:10200</value> + <description> + This is default address for the timeline server to start + the RPC server. + </description> + </property> + <property> + <description>Enable age off of timeline store data.</description> + <name>yarn.timeline-service.ttl-enable</name> + <value>true</value> + </property> + <property> + <description>Time to live for timeline store data in milliseconds.</description> + <name>yarn.timeline-service.ttl-ms</name> + <value>2678400000</value> + </property> + <property> + <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description> + <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name> + <value>300000</value> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/83efcfea/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/metainfo.xml new file mode 100644 index 0000000..b9d4236 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/metainfo.xml @@ -0,0 +1,250 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>YARN</name> + <displayName>YARN</displayName> + <comment>Apache Hadoop NextGen MapReduce (YARN)</comment> + <version>2.4.0.724</version> + <components> + + <component> + <name>APP_TIMELINE_SERVER</name> + <displayName>App Timeline Server</displayName> + <category>MASTER</category> + <cardinality>0-1</cardinality> + <commandScript> + <script>scripts/application_timeline_server.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </component> + + <component> + <name>RESOURCEMANAGER</name> + <displayName>ResourceManager</displayName> + <category>MASTER</category> + <cardinality>1-2</cardinality> + <commandScript> + <script>scripts/resourcemanager.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + <customCommands> + <customCommand> + <name>DECOMMISSION</name> + <commandScript> + <script>scripts/resourcemanager.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </customCommand> + <customCommand> + <name>REFRESHQUEUES</name> + <commandScript> + <script>scripts/resourcemanager.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </customCommand> + </customCommands> + <configuration-dependencies> + <config-type>capacity-scheduler</config-type> + </configuration-dependencies> + </component> + + <component> + <name>NODEMANAGER</name> + <displayName>NodeManager</displayName> + <category>SLAVE</category> + <cardinality>1+</cardinality> + <commandScript> + <script>scripts/nodemanager.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </component> + + <component> + <name>YARN_CLIENT</name> + <displayName>Yarn Client</displayName> + <category>CLIENT</category> + <cardinality>1+</cardinality> + <commandScript> + <script>scripts/yarn_client.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + <configFiles> + <configFile> + <type>xml</type> + <fileName>yarn-site.xml</fileName> + <dictionaryName>yarn-site</dictionaryName> + </configFile> + <configFile> + <type>xml</type> + <fileName>core-site.xml</fileName> + <dictionaryName>core-site</dictionaryName> + </configFile> + <configFile> + <type>env</type> + <fileName>yarn-env.sh</fileName> + <dictionaryName>yarn-env</dictionaryName> + </configFile> + <configFile> + <type>env</type> + <fileName>log4j.properties</fileName> + <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName> + </configFile> + <configFile> + <type>xml</type> + <fileName>capacity-scheduler.xml</fileName> + <dictionaryName>capacity-scheduler</dictionaryName> + </configFile> + </configFiles> + </component> + </components> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hadoop-yarn</name> + </package> + <package> + <name>hadoop-mapreduce</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + <commandScript> + <script>scripts/service_check.py</script> + <scriptType>PYTHON</scriptType> + <timeout>300</timeout> + </commandScript> + + <requiredServices> + <service>HDFS</service> + </requiredServices> + + <configuration-dependencies> + <config-type>yarn-site</config-type> + <config-type>yarn-env</config-type> + <config-type>core-site</config-type> + <config-type>yarn-log4j</config-type> + </configuration-dependencies> + </service> + + <service> + <name>MAPREDUCE2</name> + <displayName>MapReduce2</displayName> + <comment>Apache Hadoop NextGen MapReduce (YARN)</comment> + <version>2.4.0.724</version> + <components> + <component> + <name>HISTORYSERVER</name> + <displayName>History Server</displayName> + <category>MASTER</category> + <cardinality>1</cardinality> + <auto-deploy> + <enabled>true</enabled> + <co-locate>YARN/RESOURCEMANAGER</co-locate> + </auto-deploy> + <dependencies> + <dependency> + <name>HDFS/HDFS_CLIENT</name> + <scope>host</scope> + <auto-deploy> + <enabled>true</enabled> + </auto-deploy> + </dependency> + </dependencies> + <commandScript> + <script>scripts/historyserver.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </component> + + <component> + <name>MAPREDUCE2_CLIENT</name> + <displayName>MapReduce2 Client</displayName> + <category>CLIENT</category> + <cardinality>0+</cardinality> + <commandScript> + <script>scripts/mapreduce2_client.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + <configFiles> + <configFile> + <type>xml</type> + <fileName>mapred-site.xml</fileName> + <dictionaryName>mapred-site</dictionaryName> + </configFile> + <configFile> + <type>xml</type> + <fileName>core-site.xml</fileName> + <dictionaryName>core-site</dictionaryName> + </configFile> + <configFile> + <type>env</type> + <fileName>mapred-env.sh</fileName> + <dictionaryName>mapred-env</dictionaryName> + </configFile> + </configFiles> + </component> + </components> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>hadoop-mapreduce</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + <commandScript> + <script>scripts/mapred_service_check.py</script> + <scriptType>PYTHON</scriptType> + <timeout>300</timeout> + </commandScript> + + <requiredServices> + <service>YARN</service> + </requiredServices> + + <configuration-dir>configuration-mapred</configuration-dir> + + <configuration-dependencies> + <config-type>core-site</config-type> + <config-type>mapred-site</config-type> + <config-type>mapred-env</config-type> + </configuration-dependencies> + </service> + + </services> +</metainfo>
