http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py new file mode 100644 index 0000000..f70eee8 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py @@ -0,0 +1,177 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import os + +from resource_management import * + +def setup_hadoop(): + """ + Setup hadoop files and directories + """ + import params + + Execute("/bin/echo 0 > /selinux/enforce", + only_if="test -f /selinux/enforce" + ) + + install_snappy() + + #directories + if params.has_namenode: + Directory(params.hdfs_log_dir_prefix, + recursive=True, + owner='root', + group=params.user_group, + mode=0775 + ) + Directory(params.hadoop_pid_dir_prefix, + recursive=True, + owner='root', + group='root' + ) + #this doesn't needed with stack 1 + Directory(params.hadoop_tmp_dir, + recursive=True, + owner=params.hdfs_user, + ) + #files + if params.security_enabled: + tc_owner = "root" + else: + tc_owner = params.hdfs_user + + File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'), + owner=tc_owner, + content=Template('commons-logging.properties.j2') + ) + + health_check_template = "health_check-v2" #for stack 1 use 'health_check' + File(os.path.join(params.hadoop_conf_dir, "health_check"), + owner=tc_owner, + content=Template(health_check_template + ".j2") + ) + + log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties") + if (params.log4j_props != None): + File(log4j_filename, + mode=0644, + group=params.user_group, + owner=params.hdfs_user, + content=params.log4j_props + ) + elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))): + File(log4j_filename, + mode=0644, + group=params.user_group, + owner=params.hdfs_user, + ) + + File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), + owner=params.hdfs_user, + content=Template("hadoop-metrics2.properties.j2") + ) + +def setup_database(): + """ + Load DB + """ + import params + db_driver_dload_cmd = "" + environment = { + "no_proxy": format("{ambari_server_hostname}") + } + if params.server_db_name == 'oracle' and params.oracle_driver_url != "": + db_driver_dload_cmd = format( + "curl -kf -x \"\" \ + --retry 5 {oracle_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}",) + elif params.server_db_name == 'mysql' and params.mysql_driver_url != "": + db_driver_dload_cmd = format( + "curl -kf -x \"\" \ + --retry 5 {mysql_driver_symlink_url} -o {hadoop_lib_home}/{db_driver_filename}") + + if db_driver_dload_cmd: + Execute(db_driver_dload_cmd, + not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}"), + environment = environment + ) + + +def setup_configs(): + """ + Creates configs for services HDFS mapred + """ + import params + + if params.has_namenode: + File(params.task_log4j_properties_location, + content=StaticFile("task-log4j.properties"), + mode=0755 + ) + + if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): + File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), + owner=params.hdfs_user, + group=params.user_group + ) + if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): + File(os.path.join(params.hadoop_conf_dir, 'masters'), + owner=params.hdfs_user, + group=params.user_group + ) + + generate_include_file() + + +def generate_include_file(): + import params + + if params.has_namenode and params.dfs_hosts and params.has_slaves: + include_hosts_list = params.slave_hosts + File(params.dfs_hosts, + content=Template("include_hosts_list.j2"), + owner=params.hdfs_user, + group=params.user_group + ) + + +def install_snappy(): + import params + + snappy_so = "libsnappy.so" + so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32") + so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64") + so_target_x86 = format("{so_target_dir_x86}/{snappy_so}") + so_target_x64 = format("{so_target_dir_x64}/{snappy_so}") + so_src_dir_x86 = format("{hadoop_home}/lib") + so_src_dir_x64 = format("{hadoop_home}/lib64") + so_src_x86 = format("{so_src_dir_x86}/{snappy_so}") + so_src_x64 = format("{so_src_dir_x64}/{snappy_so}") + if params.has_namenode: + Execute( + format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}")) + Execute( + format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}")) + + +def create_javahome_symlink(): + if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"): + Execute("mkdir -p /usr/jdk64/") + Execute("ln -s /usr/jdk/jdk1.6.0_31 /usr/jdk64/jdk1.6.0_31") +
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2 new file mode 100644 index 0000000..2197ba5 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/commons-logging.properties.j2 @@ -0,0 +1,43 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +#/* +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +#Logging Implementation + +#Log4J +org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger + +#JDK Logger +#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2 new file mode 100644 index 0000000..1adba80 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/exclude_hosts_list.j2 @@ -0,0 +1,21 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +{% for host in hdfs_exclude_file %} +{{host}} +{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 new file mode 100644 index 0000000..c4759f4 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 @@ -0,0 +1,65 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# syntax: [prefix].[source|sink|jmx].[instance].[options] +# See package.html for org.apache.hadoop.metrics2 for details + +{% if has_ganglia_server %} +*.period=60 + +*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 +*.sink.ganglia.period=10 + +# default for supportsparse is false +*.sink.ganglia.supportsparse=true + +.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both +.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 + +# Hook up to the server +namenode.sink.ganglia.servers={{ganglia_server_host}}:8661 +datanode.sink.ganglia.servers={{ganglia_server_host}}:8659 +jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662 +tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658 +maptask.sink.ganglia.servers={{ganglia_server_host}}:8660 +reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660 +resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664 +nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657 +historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666 +journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654 +nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649 +supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650 + +resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue + +{% endif %} http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2 new file mode 100644 index 0000000..0a03d17 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check-v2.j2 @@ -0,0 +1,81 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +#!/bin/bash +# +#/* +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +err=0; + +function check_disks { + + for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do + fsdev="" + fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; + if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then + msg_="$msg_ $m(u)" + else + msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" + fi + done + + if [ -z "$msg_" ] ; then + echo "disks ok" ; exit 0 + else + echo "$msg_" ; exit 2 + fi + +} + +# Run all checks +for check in disks ; do + msg=`check_${check}` ; + if [ $? -eq 0 ] ; then + ok_msg="$ok_msg$msg," + else + err_msg="$err_msg$msg," + fi +done + +if [ ! -z "$err_msg" ] ; then + echo -n "ERROR $err_msg " +fi +if [ ! -z "$ok_msg" ] ; then + echo -n "OK: $ok_msg" +fi + +echo + +# Success! +exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2 new file mode 100644 index 0000000..ff17b19 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/health_check.j2 @@ -0,0 +1,109 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +#!/bin/bash +# +#/* +# * Licensed to the Apache Software Foundation (ASF) under one +# * or more contributor license agreements. See the NOTICE file +# * distributed with this work for additional information +# * regarding copyright ownership. The ASF licenses this file +# * to you under the Apache License, Version 2.0 (the +# * "License"); you may not use this file except in compliance +# * with the License. You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +err=0; + +function check_disks { + + for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do + fsdev="" + fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; + if [ -z "$fsdev" ] ; then + msg_="$msg_ $m(u)" + else + msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" + fi + done + + if [ -z "$msg_" ] ; then + echo "disks ok" ; exit 0 + else + echo "$msg_" ; exit 2 + fi + +} + +function check_taskcontroller { + if [ "<%=scope.function_phd_template_var("::phd::params::security_enabled")%>" == "true" ]; then + perm=`stat -c %a:%U:%G <%=scope.function_phd_template_var("task_bin_exe")%> 2>/dev/null` + if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then + echo "taskcontroller ok" + else + echo 'check taskcontroller' ; exit 1 + fi + fi +} + +function check_jetty { + hname=`hostname` + jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_phd_template_var("::phd::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ; + if [ $? -eq 0 ] ; then + e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ; + e=${e:-0} # no jmx servlet ? + if [ $e -gt 10 ] ; then + echo "check jetty: shuffle_exceptions=$e" ; exit 1 + else + echo "jetty ok" + fi + else + echo "check jetty: ping failed" ; exit 1 + fi +} + + +# Run all checks +for check in disks taskcontroller jetty; do + msg=`check_${check}` ; + if [ $? -eq 0 ] ; then + ok_msg="$ok_msg$msg," + else + err_msg="$err_msg$msg," + fi +done + +if [ ! -z "$err_msg" ] ; then + echo -n "ERROR $err_msg " +fi +if [ ! -z "$ok_msg" ] ; then + echo -n "OK: $ok_msg" +fi + +echo + +# Success! +exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2 new file mode 100644 index 0000000..4a9e713 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/templates/include_hosts_list.j2 @@ -0,0 +1,21 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +{% for host in slave_hosts %} +{{host}} +{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml new file mode 100644 index 0000000..ca45822 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/metainfo.xml @@ -0,0 +1,22 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <versions> + <active>true</active> + </versions> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml new file mode 100644 index 0000000..4ca3b26 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/repos/repoinfo.xml @@ -0,0 +1,33 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<reposinfo> + + <os family="redhat6"> + <repo> + <baseurl>http://localhost/PHD</baseurl> + <repoid>PHD-3.0.0.0</repoid> + <reponame>PHD</reponame> + </repo> + <repo> + + <baseurl>http://localhost/PHD-UTILS</baseurl> + <repoid>PHD-UTILS-1.0</repoid> + <reponame>PHD-UTILS</reponame> + </repo> + </os> +</reposinfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json new file mode 100644 index 0000000..b52c4d2 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/role_command_order.json @@ -0,0 +1,75 @@ +{ + "_comment" : "Record format:", + "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]", + "general_deps" : { + "_comment" : "dependencies for all cases", + "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL", + "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"], + "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"], + "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"], + "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"], + "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "POSTGRESQL_SERVER-START"], + "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START"], + "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"], + "FLUME_HANDLER-START": ["OOZIE_SERVER-START"], + "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START", + "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START", + "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START", + "ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START", + "MYSQL_SERVER-START", "POSTGRESQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START", + "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"], + "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"], + "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"], + "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"], + "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"], + "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], + "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"], + "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"], + "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"] + }, + "_comment" : "GLUSTERFS-specific dependencies", + "optional_glusterfs": { + "HBASE_MASTER-START": ["PEERSTATUS-START"], + "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"] + }, + "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster", + "optional_no_glusterfs": { + "SECONDARY_NAMENODE-START": ["NAMENODE-START"], + "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"], + "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"], + "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"], + "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"], + "HIVE_SERVER-START": ["DATANODE-START"], + "WEBHCAT_SERVER-START": ["DATANODE-START"], + "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START", + "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"], + "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", + "SECONDARY_NAMENODE-START"], + "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", + "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"], + "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"], + "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"], + "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"], + "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"], + "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP", + "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"] + }, + "_comment" : "Dependencies that are used in HA NameNode cluster", + "namenode_optional_ha": { + "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"], + "ZKFC-START": ["ZOOKEEPER_SERVER-START"], + "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"] + }, + "_comment" : "Dependencies that are used in ResourceManager HA cluster", + "resourcemanager_optional_ha" : { + "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"] + } +} + http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml new file mode 100644 index 0000000..74a4c15 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-conf.xml @@ -0,0 +1,31 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration supports_final="false"> + <property> + <name>content</name> + <description>Describe all the Flume agent configurations</description> + <value> +# Flume agent config + </value> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml new file mode 100644 index 0000000..902b3ca --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-env.xml @@ -0,0 +1,78 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration> + <property> + <name>flume_conf_dir</name> + <value>/etc/flume/conf</value> + <description>Location to save configuration files</description> + </property> + <property> + <name>flume_log_dir</name> + <value>/var/log/flume</value> + <description>Location to save log files</description> + </property> + <property> + <name>flume_user</name> + <value>flume</value> + <property-type>USER</property-type> + <description>Flume User</description> + </property> + + <!-- flume-env.sh --> + <property> + <name>content</name> + <description>This is the jinja template for flume-env.sh file</description> + <value> +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced +# during Flume startup. + +# Enviroment variables can be set here. + +export JAVA_HOME={{java_home}} + +# Give Flume more memory and pre-allocate, enable remote monitoring via JMX +# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote" + +# Note that the Flume conf directory is always included in the classpath. +#FLUME_CLASSPATH="" + +# export HIVE_HOME=/usr/lib/hive +# export HCAT_HOME=/usr/lib/hive-hcatalog + </value> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml new file mode 100644 index 0000000..8c6ac27 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/configuration/flume-log4j.xml @@ -0,0 +1,31 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> +<!-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--> + +<configuration supports_final="false"> + <property> + <name>content</name> + <description>Custom log4j.properties</description> + <value> +# Flume log4j config + </value> + </property> +</configuration> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml new file mode 100644 index 0000000..7421be1 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metainfo.xml @@ -0,0 +1,69 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<metainfo> + <schemaVersion>2.0</schemaVersion> + <services> + <service> + <name>FLUME</name> + <displayName>Flume</displayName> + <comment>Data management and processing platform</comment> + <version>1.5.0.1.phd.3.0.0.0</version> + <components> + <component> + <name>FLUME_HANDLER</name> + <displayName>Flume</displayName> + <category>SLAVE</category> + <cardinality>1+</cardinality> + <commandScript> + <script>scripts/flume_handler.py</script> + <scriptType>PYTHON</scriptType> + <timeout>600</timeout> + </commandScript> + </component> + </components> + + <osSpecifics> + <osSpecific> + <osFamily>any</osFamily> + <packages> + <package> + <name>flume</name> + </package> + </packages> + </osSpecific> + </osSpecifics> + + <commandScript> + <script>scripts/flume_check.py</script> + <scriptType>PYTHON</scriptType> + <timeout>300</timeout> + </commandScript> + + <requiredServices> + <service>HDFS</service> + </requiredServices> + + <configuration-dependencies> + <config-type>flume-env</config-type> + <config-type>flume-conf</config-type> + <config-type>flume-log4j</config-type> + </configuration-dependencies> + + </service> + </services> +</metainfo> http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json new file mode 100644 index 0000000..2114c12 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/metrics.json @@ -0,0 +1,716 @@ +{ + "FLUME_HANDLER": { + "Component": [ + { + "type": "ganglia", + "metrics": { + "metrics/boottime":{ + "metric":"boottime", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_aidle":{ + "metric":"cpu_aidle", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_idle":{ + "metric":"cpu_idle", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_nice":{ + "metric":"cpu_nice", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_num":{ + "metric":"cpu_num", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_speed":{ + "metric":"cpu_speed", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_system":{ + "metric":"cpu_system", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_user":{ + "metric":"cpu_user", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_wio":{ + "metric":"cpu_wio", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/disk_free":{ + "metric":"disk_free", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/disk_total":{ + "metric":"disk_total", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/part_max_used":{ + "metric":"part_max_used", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/gcCount":{ + "metric":"jvm.metrics.gcCount", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/gcTimeMillis":{ + "metric":"jvm.metrics.gcTimeMillis", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logError":{ + "metric":"jvm.metrics.logError", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logFatal":{ + "metric":"jvm.metrics.logFatal", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logInfo":{ + "metric":"jvm.metrics.logInfo", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logWarn":{ + "metric":"jvm.metrics.logWarn", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/maxMemoryM":{ + "metric":"jvm.metrics.maxMemoryM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memHeapCommittedM":{ + "metric":"jvm.metrics.memHeapCommittedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memHeapUsedM":{ + "metric":"jvm.metrics.memHeapUsedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memNonHeapCommittedM":{ + "metric":"jvm.metrics.memNonHeapCommittedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memNonHeapUsedM":{ + "metric":"jvm.metrics.memNonHeapUsedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsBlocked":{ + "metric":"jvm.metrics.threadsBlocked", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsNew":{ + "metric":"jvm.metrics.threadsNew", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsRunnable":{ + "metric":"jvm.metrics.threadsRunnable", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsTerminated":{ + "metric":"jvm.metrics.threadsTerminated", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsTimedWaiting":{ + "metric":"jvm.metrics.threadsTimedWaiting", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsWaiting":{ + "metric":"jvm.metrics.threadsWaiting", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/StartTime":{ + "metric":"(\\w+).CHANNEL.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/StopTime":{ + "metric":"(\\w+).CHANNEL.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelSize":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchCompleteCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{ + "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/StartTime":{ + "metric":"(\\w+).SINK.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{ + "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/StopTime":{ + "metric":"(\\w+).SINK.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchEmptyCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/StartTime":{ + "metric":"(\\w+).SOURCE.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{ + "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/StopTime":{ + "metric":"(\\w+).SOURCE.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount", + "pointInTime":true, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum", + "pointInTime":false, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum", + "pointInTime":false, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum", + "pointInTime":false, + "temporal":true + } + + } + } + ], + "HostComponent": [ + { + "type": "ganglia", + "metrics": { + "metrics/boottime":{ + "metric":"boottime", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_aidle":{ + "metric":"cpu_aidle", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_idle":{ + "metric":"cpu_idle", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_nice":{ + "metric":"cpu_nice", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_num":{ + "metric":"cpu_num", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_speed":{ + "metric":"cpu_speed", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_system":{ + "metric":"cpu_system", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_user":{ + "metric":"cpu_user", + "pointInTime":true, + "temporal":true + }, + "metrics/cpu/cpu_wio":{ + "metric":"cpu_wio", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/disk_free":{ + "metric":"disk_free", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/disk_total":{ + "metric":"disk_total", + "pointInTime":true, + "temporal":true + }, + "metrics/disk/part_max_used":{ + "metric":"part_max_used", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/gcCount":{ + "metric":"jvm.metrics.gcCount", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/gcTimeMillis":{ + "metric":"jvm.metrics.gcTimeMillis", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logError":{ + "metric":"jvm.metrics.logError", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logFatal":{ + "metric":"jvm.metrics.logFatal", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logInfo":{ + "metric":"jvm.metrics.logInfo", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/logWarn":{ + "metric":"jvm.metrics.logWarn", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/maxMemoryM":{ + "metric":"jvm.metrics.maxMemoryM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memHeapCommittedM":{ + "metric":"jvm.metrics.memHeapCommittedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memHeapUsedM":{ + "metric":"jvm.metrics.memHeapUsedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memNonHeapCommittedM":{ + "metric":"jvm.metrics.memNonHeapCommittedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/memNonHeapUsedM":{ + "metric":"jvm.metrics.memNonHeapUsedM", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsBlocked":{ + "metric":"jvm.metrics.threadsBlocked", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsNew":{ + "metric":"jvm.metrics.threadsNew", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsRunnable":{ + "metric":"jvm.metrics.threadsRunnable", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsTerminated":{ + "metric":"jvm.metrics.threadsTerminated", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsTimedWaiting":{ + "metric":"jvm.metrics.threadsTimedWaiting", + "pointInTime":true, + "temporal":true + }, + "metrics/jvm/threadsWaiting":{ + "metric":"jvm.metrics.threadsWaiting", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/StartTime":{ + "metric":"(\\w+).CHANNEL.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/StopTime":{ + "metric":"(\\w+).CHANNEL.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/ChannelSize":{ + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{ + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchCompleteCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{ + "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/StartTime":{ + "metric":"(\\w+).SINK.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{ + "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{ + "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/StopTime":{ + "metric":"(\\w+).SINK.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SINK/$2/BatchEmptyCount":{ + "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/StartTime":{ + "metric":"(\\w+).SOURCE.(\\w+).StartTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{ + "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/StopTime":{ + "metric":"(\\w+).SOURCE.(\\w+).StopTime", + "pointInTime":true, + "temporal":true + }, + "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{ + "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount", + "pointInTime":true, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum", + "pointInTime":false, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum", + "pointInTime":false, + "temporal":true + }, + + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min", + "pointInTime":false, + "temporal":true + }, + "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": { + "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum", + "pointInTime":false, + "temporal":true + } + + } + } + ] + } +} http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py new file mode 100644 index 0000000..2db4039 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume.py @@ -0,0 +1,255 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import glob +import json +import os +from resource_management import * + +def flume(action = None): + import params + + if action == 'config': + # remove previously defined meta's + for n in find_expected_agent_names(): + os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json')) + + Directory(params.flume_conf_dir, recursive=True) + Directory(params.flume_log_dir, owner=params.flume_user) + + File(format("{flume_conf_dir}/flume-env.sh"), + owner=params.flume_user, + content=InlineTemplate(params.flume_env_sh_template) + ) + + flume_agents = {} + if params.flume_conf_content is not None: + flume_agents = build_flume_topology(params.flume_conf_content) + + for agent in flume_agents.keys(): + flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent) + flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf') + flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json') + flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties') + + Directory(flume_agent_conf_dir) + + PropertiesFile(flume_agent_conf_file, + properties=flume_agents[agent], + mode = 0644) + + File(flume_agent_log4j_file, + content=Template('log4j.properties.j2', agent_name = agent), + mode = 0644) + + File(flume_agent_meta_file, + content = json.dumps(ambari_meta(agent, flume_agents[agent])), + mode = 0644) + + elif action == 'start': + # desired state for service should be STARTED + if len(params.flume_command_targets) == 0: + _set_desired_state('STARTED') + + flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; ' + '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"') + + for agent in cmd_target_names(): + flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent + flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf" + flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid" + + if not os.path.isfile(flume_agent_conf_file): + continue + + if not is_live(flume_agent_pid_file): + # TODO someday make the ganglia ports configurable + extra_args = '' + if params.ganglia_server_host is not None: + extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}' + extra_args = extra_args.format(params.ganglia_server_host, '8655') + + flume_cmd = flume_base.format(agent, flume_agent_conf_dir, + flume_agent_conf_file, extra_args) + + Execute(flume_cmd, wait_for_finish=False) + + # sometimes startup spawns a couple of threads - so only the first line may count + pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home}.*{agent}.* > {flume_agent_pid_file}') + Execute(pid_cmd, logoutput=True, tries=10, try_sleep=6) + + pass + elif action == 'stop': + # desired state for service should be INSTALLED + if len(params.flume_command_targets) == 0: + _set_desired_state('INSTALLED') + + pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid") + + if 0 == len(pid_files): + return + + agent_names = cmd_target_names() + + + for agent in agent_names: + pid_file = params.flume_run_dir + os.sep + agent + '.pid' + pid = format('`cat {pid_file}` > /dev/null 2>&1') + Execute(format('kill {pid}'), ignore_failures=True) + File(pid_file, action = 'delete') + + +def ambari_meta(agent_name, agent_conf): + res = {} + + sources = agent_conf[agent_name + '.sources'].split(' ') + res['sources_count'] = len(sources) + + sinks = agent_conf[agent_name + '.sinks'].split(' ') + res['sinks_count'] = len(sinks) + + channels = agent_conf[agent_name + '.channels'].split(' ') + res['channels_count'] = len(channels) + + return res + +# define a map of dictionaries, where the key is agent name +# and the dictionary is the name/value pair +def build_flume_topology(content): + + result = {} + agent_names = [] + + for line in content.split('\n'): + rline = line.strip() + if 0 != len(rline) and not rline.startswith('#'): + pair = rline.split('=') + lhs = pair[0].strip() + rhs = pair[1].strip() + + part0 = lhs.split('.')[0] + + if lhs.endswith(".sources"): + agent_names.append(part0) + + if not result.has_key(part0): + result[part0] = {} + + result[part0][lhs] = rhs + + # trim out non-agents + for k in result.keys(): + if not k in agent_names: + del result[k] + + + return result + +def is_live(pid_file): + live = False + + try: + check_process_status(pid_file) + live = True + except ComponentIsNotRunning: + pass + + return live + +def live_status(pid_file): + import params + + pid_file_part = pid_file.split(os.sep).pop() + + res = {} + res['name'] = pid_file_part + + if pid_file_part.endswith(".pid"): + res['name'] = pid_file_part[:-4] + + res['status'] = 'RUNNING' if is_live(pid_file) else 'NOT_RUNNING' + res['sources_count'] = 0 + res['sinks_count'] = 0 + res['channels_count'] = 0 + + flume_agent_conf_dir = params.flume_conf_dir + os.sep + res['name'] + flume_agent_meta_file = flume_agent_conf_dir + os.sep + 'ambari-meta.json' + + try: + with open(flume_agent_meta_file) as fp: + meta = json.load(fp) + res['sources_count'] = meta['sources_count'] + res['sinks_count'] = meta['sinks_count'] + res['channels_count'] = meta['channels_count'] + except: + pass + + return res + +def flume_status(): + import params + + meta_files = find_expected_agent_names() + pid_files = [] + for agent_name in meta_files: + pid_files.append(os.path.join(params.flume_run_dir, agent_name + '.pid')) + + procs = [] + for pid_file in pid_files: + procs.append(live_status(pid_file)) + + return procs + +# these are what Ambari believes should be running +def find_expected_agent_names(): + import params + + files = glob.glob(params.flume_conf_dir + os.sep + "*/ambari-meta.json") + expected = [] + + for f in files: + expected.append(os.path.dirname(f).split(os.sep).pop()) + + return expected + +def cmd_target_names(): + import params + + if len(params.flume_command_targets) > 0: + return params.flume_command_targets + else: + return find_expected_agent_names() + +def _set_desired_state(state): + import params + try: + with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'w') as fp: + fp.write(state) + except: + pass + +def get_desired_state(): + import params + + try: + with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'r') as fp: + return fp.read() + except: + return 'INSTALLED' + http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py new file mode 100644 index 0000000..b93b8e8 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_check.py @@ -0,0 +1,40 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * + + +class FlumeServiceCheck(Script): + + def service_check(self, env): + import params + + env.set_params(params) + if params.security_enabled: + principal_replaced = params.http_principal.replace("_HOST", params.hostname) + Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"), + user=params.smoke_user) + + Execute(format('env JAVA_HOME={java_home} {flume_bin} version'), + logoutput=True, + tries = 3, + try_sleep = 20) + +if __name__ == "__main__": + FlumeServiceCheck().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py new file mode 100644 index 0000000..42ac560 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/flume_handler.py @@ -0,0 +1,121 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * +from flume import flume +from flume import flume_status +from flume import find_expected_agent_names +from flume import get_desired_state + +class FlumeHandler(Script): + def install(self, env): + import params + + self.install_packages(env) + env.set_params(params) + + def start(self, env): + import params + + env.set_params(params) + self.configure(env) + + flume(action='start') + + def stop(self, env): + import params + + env.set_params(params) + + flume(action='stop') + + def configure(self, env): + import params + + env.set_params(params) + + flume(action='config') + + def status(self, env): + import params + + env.set_params(params) + + processes = flume_status() + expected_agents = find_expected_agent_names() + + json = {} + json['processes'] = processes + json['alerts'] = [] + + alert = {} + alert['name'] = 'flume_agent' + alert['label'] = 'Flume Agent process' + + if len(processes) == 0 and len(expected_agents) == 0: + alert['state'] = 'OK' + + if not params.hostname is None: + alert['text'] = 'No agents defined on ' + params.hostname + else: + alert['text'] = 'No agents defined' + + else: + crit = [] + ok = [] + + for proc in processes: + if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING': + crit.append(proc['name']) + else: + ok.append(proc['name']) + + text_arr = [] + + if len(crit) > 0: + text_arr.append("{0} {1} NOT running".format(", ".join(crit), + "is" if len(crit) == 1 else "are")) + + if len(ok) > 0: + text_arr.append("{0} {1} running".format(", ".join(ok), + "is" if len(ok) == 1 else "are")) + + plural = len(crit) > 1 or len(ok) > 1 + alert['text'] = "Agent{0} {1} {2}".format( + "s" if plural else "", + " and ".join(text_arr), + "" if params.hostname is None else "on " + str(params.hostname)) + + alert['state'] = 'CRITICAL' if len(crit) > 0 else 'OK' + + json['alerts'].append(alert) + self.put_structured_out(json) + + # only throw an exception if there are agents defined and there is a + # problem with the processes; if there are no agents defined, then + # the service should report STARTED (green) ONLY if the desired state is started. otherwise, INSTALLED (red) + if len(expected_agents) > 0: + for proc in processes: + if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING': + raise ComponentIsNotRunning() + elif len(expected_agents) == 0 and 'INSTALLED' == get_desired_state(): + raise ComponentIsNotRunning() + +if __name__ == "__main__": + FlumeHandler().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py new file mode 100644 index 0000000..227bf8a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/scripts/params.py @@ -0,0 +1,70 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * + +config = Script.get_config() + +user_group = config['configurations']['cluster-env']['user_group'] +proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group'] + +security_enabled = False + +#RPM versioning support +rpm_version = default("/configurations/cluster-env/rpm_version", None) + +#hadoop params +if rpm_version: + flume_bin = '/usr/phd/current/flume-client/bin/flume-ng' +else: + flume_bin = '/usr/bin/flume-ng' + +flume_conf_dir = '/etc/flume/conf' +java_home = config['hostLevelParams']['java_home'] +flume_log_dir = '/var/log/flume' +flume_run_dir = '/var/run/flume' +flume_user = 'flume' +flume_group = 'flume' + +if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']: + flume_user = config['configurations']['flume-env']['flume_user'] + +if (('flume-conf' in config['configurations']) and('content' in config['configurations']['flume-conf'])): + flume_conf_content = config['configurations']['flume-conf']['content'] +else: + flume_conf_content = None + +if (('flume-log4j' in config['configurations']) and ('content' in config['configurations']['flume-log4j'])): + flume_log4j_content = config['configurations']['flume-log4j']['content'] +else: + flume_log4j_content = None + +targets = default('/commandParams/flume_handler', None) +flume_command_targets = [] if targets is None else targets.split(',') + +flume_env_sh_template = config['configurations']['flume-env']['content'] + +ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) +ganglia_server_host = None +if 0 != len(ganglia_server_hosts): + ganglia_server_host = ganglia_server_hosts[0] + +hostname = None +if config.has_key('hostname'): + hostname = config['hostname'] http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2 b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2 new file mode 100644 index 0000000..70e495c --- /dev/null +++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/flume.conf.j2 @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# flume.conf: Add your flume configuration here and start flume +# Note if you are using the Windows srvice or Unix service +# provided by the PHD distribution, they will assume the +# agent's name in this file to be 'a1' +# +{{flume_agent_conf_content}}