Andrew McLeod has proposed merging lp:~bigdata-dev/charms/trusty/apache-hadoop-yarn-master/ganglia_metrics into lp:~bigdata-dev/charms/trusty/apache-hadoop-yarn-master/trunk.
Requested reviews: Juju Big Data Development (bigdata-dev) For more details, see: https://code.launchpad.net/~bigdata-dev/charms/trusty/apache-hadoop-yarn-master/ganglia_metrics/+merge/268538 Added capability to send ganglia metrics is relationship exists -- Your team Juju Big Data Development is requested to review the proposed merge of lp:~bigdata-dev/charms/trusty/apache-hadoop-yarn-master/ganglia_metrics into lp:~bigdata-dev/charms/trusty/apache-hadoop-yarn-master/trunk.
=== modified file 'DEV-README.md' --- DEV-README.md 2015-07-24 15:49:14 +0000 +++ DEV-README.md 2015-08-19 19:25:13 +0000 @@ -78,7 +78,7 @@ ## Manual Deployment The easiest way to deploy the core Apache Hadoop platform is to use one of -the [apache bundles](https://jujucharms.com/u/bigdata-dev/#bundles). +the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles). However, to manually deploy the base Apache Hadoop platform without using one of the bundles, you can use the following: === modified file 'README.md' --- README.md 2015-08-07 21:32:44 +0000 +++ README.md 2015-08-19 19:25:13 +0000 @@ -11,10 +11,10 @@ ## Usage This charm is intended to be deployed via one of the -[apache bundles](https://jujucharms.com/u/bigdata-dev/#bundles). +[apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles). For example: - juju quickstart u/bigdata-dev/apache-analytics-sql + juju quickstart apache-analytics-sql This will deploy the Apache Hadoop platform with Apache Hive available to perform SQL-like queries against your data. === modified file 'hooks/callbacks.py' --- hooks/callbacks.py 2015-08-10 22:59:54 +0000 +++ hooks/callbacks.py 2015-08-19 19:25:13 +0000 @@ -18,7 +18,10 @@ from charmhelpers.core import hookenv from charmhelpers.core import unitdata -from jujubigdata.relations import NameNode, NodeManager +from jujubigdata.relations import NameNode, NodeManager, Ganglia +from charmhelpers.core.templating import render +from functools import partial + def update_blocked_status(): @@ -50,3 +53,27 @@ def clear_active_flag(): unitdata.kv().set('charm.active', False) + + +def conf_ganglia_metrics(purgeConf=False): + """ + Send hadoop specific metrics to a ganglia server + """ + if purgeConf: + ganglia_sink_str = '# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31' + ganglia_host = 'UNSET_BY_JUJU' + else: + ganglia_sink_str = '*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31' + ganglia_host = Ganglia().host() + + hookenv.log("Configuring ganglia sink in /etc/hadoop/conf/hadoop-metrics2.properties", level=None) + render( + source='hadoop-metrics2.properties.j2', + target='/etc/hadoop/conf/hadoop-metrics2.properties', + context={ + 'ganglia_host': ganglia_host, + 'ganglia_sink_str': ganglia_sink_str, + }, + ), + +purge_ganglia_metrics = partial(conf_ganglia_metrics, purgeConf=True) === modified file 'hooks/common.py' --- hooks/common.py 2015-08-10 22:59:54 +0000 +++ hooks/common.py 2015-08-19 19:25:13 +0000 @@ -116,6 +116,21 @@ callbacks.update_blocked_status, ], }, + { + 'name': 'ganglia', + 'requires': [ + hadoop.is_installed, + jujubigdata.relations.Ganglia, + ], + 'callbacks': [ + callbacks.conf_ganglia_metrics, + ], + 'cleanup': [ + callbacks.purge_ganglia_metrics, + ], + + }, + ]) manager.manage() === added file 'hooks/ganglia-relation-broken' --- hooks/ganglia-relation-broken 1970-01-01 00:00:00 +0000 +++ hooks/ganglia-relation-broken 2015-08-19 19:25:13 +0000 @@ -0,0 +1,26 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +All hooks in this charm are managed by the Charm Framework. +The framework helps manage dependencies and preconditions to ensure that +steps are only executed when they can be successful. As such, no additional +code should be added to this hook; instead, please integrate new functionality +into the 'callbacks' list in hooks/common.py. New callbacks can be placed +in hooks/callbacks.py, if necessary. + +See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html +for more information. +""" +import common +common.manage() === added file 'hooks/ganglia-relation-changed' --- hooks/ganglia-relation-changed 1970-01-01 00:00:00 +0000 +++ hooks/ganglia-relation-changed 2015-08-19 19:25:13 +0000 @@ -0,0 +1,26 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +All hooks in this charm are managed by the Charm Framework. +The framework helps manage dependencies and preconditions to ensure that +steps are only executed when they can be successful. As such, no additional +code should be added to this hook; instead, please integrate new functionality +into the 'callbacks' list in hooks/common.py. New callbacks can be placed +in hooks/callbacks.py, if necessary. + +See http://big-data-charm-helpers.readthedocs.org/en/latest/examples/framework.html +for more information. +""" +import common +common.manage() === modified file 'metadata.yaml' --- metadata.yaml 2015-04-23 20:48:28 +0000 +++ metadata.yaml 2015-08-19 19:25:13 +0000 @@ -10,6 +10,8 @@ provides: resourcemanager: interface: mapred + ganglia: + interface: monitor requires: namenode: interface: dfs === added directory 'templates' === added file 'templates/hadoop-metrics2.properties.j2' --- templates/hadoop-metrics2.properties.j2 1970-01-01 00:00:00 +0000 +++ templates/hadoop-metrics2.properties.j2 2015-08-19 19:25:13 +0000 @@ -0,0 +1,69 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# syntax: [prefix].[source|sink].[instance].[options] +# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details + +*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink +# default sampling period, in seconds +*.period=10 + +# Defining sink for Ganglia 3.1 +{{ ganglia_sink_str }} + +# Default polling period for GangliaSink +*.sink.ganglia.period=10 + +# default for supportsparse is false +*.sink.ganglia.supportsparse=true + +# Directing output to ganglia servers + +*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both +*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 + +namenode.sink.ganglia.servers={{ ganglia_host }}:8649 +datanode.sink.ganglia.servers={{ ganglia_host }}:8649 +jobtracker.sink.ganglia.servers={{ ganglia_host }}:8649 +tasktracker.sink.ganglia.servers={{ ganglia_host }}:8649 +maptask.sink.ganglia.servers={{ ganglia_host }}:8649 +reducetask.sink.ganglia.servers={{ ganglia_host }}:8649 +resourcemanager.sink.ganglia.servers={{ ganglia_host }}:8649 +nodemanager.sink.ganglia.servers={{ ganglia_host }}:8649 +historyserver.sink.ganglia.servers={{ ganglia_host }}:8649 +journalnode.sink.ganglia.servers={{ ganglia_host }}:8649 +resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue + +# The namen de-metrics. ut will contain metrics from all context +#namenode.sink.file.filename=namenode-metrics.out +# Specifying a special sampling period for namenode: +#namenode.sink.*.period=8 + +#datanode.sink.file.filename=datanode-metrics.out + +# the following example split metrics of different +# context to different sinks (in this case files) +#jobtracker.sink.file_jvm.context=jvm +#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out +#jobtracker.sink.file_mapred.context=mapred +#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out + +#tasktracker.sink.file.filename=tasktracker-metrics.out + +#maptask.sink.file.filename=maptask-metrics.out + +#reducetask.sink.file.filename=reducetask-metrics.out
-- Mailing list: https://launchpad.net/~bigdata-dev Post to : [email protected] Unsubscribe : https://launchpad.net/~bigdata-dev More help : https://help.launchpad.net/ListHelp

