Kevin W Monroe has proposed merging lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk into lp:charms/trusty/apache-hadoop-compute-slave.
Requested reviews: Kevin W Monroe (kwmonroe) For more details, see: https://code.launchpad.net/~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk/+merge/268667 -- Your team Juju Big Data Development is subscribed to branch lp:~bigdata-dev/charms/trusty/apache-hadoop-compute-slave/trunk.
=== modified file 'DEV-README.md' --- DEV-README.md 2015-06-29 14:15:27 +0000 +++ DEV-README.md 2015-08-20 23:11:02 +0000 @@ -49,10 +49,17 @@ ## Manual Deployment +<<<<<<< TREE The easiest way to deploy the core Apache Hadoop platform is to use one of the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles). However, to manually deploy the base Apache Hadoop platform without using one of the bundles, you can use the following: +======= +The easiest way to deploy an Apache Hadoop platform is to use one of +the [apache bundles](https://jujucharms.com/u/bigdata-charmers/#bundles). +However, to manually deploy the base Apache Hadoop platform without using one +of the bundles, you can use the following: +>>>>>>> MERGE-SOURCE juju deploy apache-hadoop-hdfs-master hdfs-master juju deploy apache-hadoop-hdfs-secondary secondary-namenode === modified file 'README.md' --- README.md 2015-06-29 14:15:27 +0000 +++ README.md 2015-08-20 23:11:02 +0000 @@ -59,17 +59,19 @@ of these resources: sudo pip install jujuresources - juju resources fetch --all apache-hadoop-compute-slave/resources.yaml -d /tmp/resources - juju resources serve -d /tmp/resources + juju-resources fetch --all /path/to/resources.yaml -d /tmp/resources + juju-resources serve -d /tmp/resources This will fetch all of the resources needed by this charm and serve them via a -simple HTTP server. You can then set the `resources_mirror` config option to -have the charm use this server for retrieving resources. +simple HTTP server. The output from `juju-resources serve` will give you a +URL that you can set as the `resources_mirror` config option for this charm. +Setting this option will cause all resources required by this charm to be +downloaded from the configured URL. You can fetch the resources for all of the Apache Hadoop charms (`apache-hadoop-hdfs-master`, `apache-hadoop-yarn-master`, `apache-hadoop-hdfs-secondary`, `apache-hadoop-plugin`, etc) into a single -directory and serve them all with a single `juju resources serve` instance. +directory and serve them all with a single `juju-resources serve` instance. ## Contact Information === modified file 'dist.yaml' --- dist.yaml 2015-04-17 22:24:50 +0000 +++ dist.yaml 2015-08-20 23:11:02 +0000 @@ -73,44 +73,16 @@ # Only expose ports serving a UI or external API (i.e., namenode and # resourcemanager). Communication among units within the cluster does # not need ports to be explicitly opened. - # If adding a port here, you will need to update - # charmhelpers.contrib.bigdata.handlers.apache or hooks/callbacks.py - # to ensure that it is supported. - namenode: - port: 8020 - exposed_on: 'hdfs-master' - nn_webapp_http: - port: 50070 - exposed_on: 'hdfs-master' dn_webapp_http: port: 50075 exposed_on: 'compute-slave-hdfs' - resourcemanager: - port: 8032 - exposed_on: 'yarn-master' - rm_webapp_http: - port: 8088 - exposed_on: 'yarn-master' - rm_log: - port: 19888 nm_webapp_http: port: 8042 exposed_on: 'compute-slave-yarn' - jobhistory: - port: 10020 - jh_webapp_http: - port: 19888 - exposed_on: 'yarn-master' # TODO: support SSL - #nn_webapp_https: - # port: 50470 - # exposed_on: 'hdfs-master' #dn_webapp_https: # port: 50475 # exposed_on: 'compute-slave-hdfs' - #rm_webapp_https: - # port: 8090 - # exposed_on: 'yarn-master' #nm_webapp_https: # port: 8044 # exposed_on: 'compute-slave-yarn' === modified file 'hooks/callbacks.py' --- hooks/callbacks.py 2015-06-24 22:12:57 +0000 +++ hooks/callbacks.py 2015-08-20 23:11:02 +0000 @@ -24,37 +24,53 @@ def update_blocked_status(): if unitdata.kv().get('charm.active', False): return - rels = ( - ('Yarn', 'ResourceManager', ResourceManagerMaster()), + rels = [ ('HDFS', 'NameNode', NameNodeMaster()), - ) + ] missing_rel = [rel for rel, res, impl in rels if not impl.connected_units()] - missing_hosts = [rel for rel, res, impl in rels if not impl.am_i_registered()] - not_ready = [(rel, res) for rel, res, impl in rels if not impl.is_ready()] + rels.append(('Yarn', 'ResourceManager', ResourceManagerMaster())) + not_ready = [(rel, res) for rel, res, impl in rels if impl.connected_units() and not impl.is_ready()] + missing_hosts = [rel for rel, res, impl in rels if impl.connected_units() and not impl.am_i_registered()] if missing_rel: hookenv.status_set('blocked', 'Waiting for relation to %s master%s' % ( ' and '.join(missing_rel), 's' if len(missing_rel) > 1 else '', )), - elif missing_hosts: - hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % ( - ' and '.join(missing_hosts), - )) elif not_ready: unready_rels, unready_ress = zip(*not_ready) hookenv.status_set('waiting', 'Waiting for %s to provide %s' % ( ' and '.join(unready_rels), ' and '.join(unready_ress), )) + elif missing_hosts: + hookenv.status_set('waiting', 'Waiting for /etc/hosts registration on %s' % ( + ' and '.join(missing_hosts), + )) def update_working_status(): if unitdata.kv().get('charm.active', False): hookenv.status_set('maintenance', 'Updating configuration') return - hookenv.status_set('maintenance', 'Setting up NodeManager and DataNode') + yarn_connected = ResourceManagerMaster().connected_units() + hookenv.status_set('maintenance', 'Setting up DataNode%s' % ( + ' and NodeManager' if yarn_connected else '', + )) def update_active_status(): - unitdata.kv().set('charm.active', True) - hookenv.status_set('active', 'Ready') + hdfs_ready = NameNodeMaster().is_ready() + yarn_connected = ResourceManagerMaster().connected_units() + yarn_ready = ResourceManagerMaster().is_ready() + if hdfs_ready and (not yarn_connected or yarn_ready): + unitdata.kv().set('charm.active', True) + hookenv.status_set('active', 'Ready%s' % ( + '' if yarn_ready else ' (HDFS only)' + )) + else: + clear_active_flag() + update_blocked_status() + + +def clear_active_flag(): + unitdata.kv().set('charm.active', False) === modified file 'hooks/common.py' --- hooks/common.py 2015-06-24 22:12:57 +0000 +++ hooks/common.py 2015-08-20 23:11:02 +0000 @@ -71,40 +71,61 @@ ], }, { - 'name': 'compute-slave', + 'name': 'datanode', 'provides': [ jujubigdata.relations.DataNode(), + ], + 'requires': [ + hadoop.is_installed, + hdfs_relation, + hdfs_relation.am_i_registered, + ], + 'callbacks': [ + callbacks.update_working_status, + hdfs_relation.register_provided_hosts, + jujubigdata.utils.manage_etc_hosts, + hdfs_relation.install_ssh_keys, + hdfs.configure_datanode, + hdfs.start_datanode, + charmframework.helpers.open_ports( + dist_config.exposed_ports('compute-slave-hdfs')), + callbacks.update_active_status, + ], + 'cleanup': [ + callbacks.clear_active_flag, + charmframework.helpers.close_ports( + dist_config.exposed_ports('compute-slave-hdfs')), + hdfs.stop_datanode, + callbacks.update_blocked_status, + ], + }, + { + 'name': 'nodemanager', + 'provides': [ jujubigdata.relations.NodeManager(), ], 'requires': [ hadoop.is_installed, - hdfs_relation, yarn_relation, - hdfs_relation.am_i_registered, yarn_relation.am_i_registered, ], 'callbacks': [ callbacks.update_working_status, - hdfs_relation.register_provided_hosts, yarn_relation.register_provided_hosts, jujubigdata.utils.manage_etc_hosts, - hdfs_relation.install_ssh_keys, yarn_relation.install_ssh_keys, - hdfs.configure_datanode, yarn.configure_nodemanager, - hdfs.start_datanode, yarn.start_nodemanager, charmframework.helpers.open_ports( - dist_config.exposed_ports('compute-slave-hdfs') + dist_config.exposed_ports('compute-slave-yarn')), callbacks.update_active_status, ], 'cleanup': [ + callbacks.clear_active_flag, charmframework.helpers.close_ports( - dist_config.exposed_ports('compute-slave-hdfs') + dist_config.exposed_ports('compute-slave-yarn')), - hdfs.stop_datanode, yarn.stop_nodemanager, + callbacks.update_active_status, # might still be active if HDFS-only ], }, ]) === added file 'hooks/datanode-relation-departed' --- hooks/datanode-relation-departed 1970-01-01 00:00:00 +0000 +++ hooks/datanode-relation-departed 2015-08-20 23:11:02 +0000 @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import common + +common.manage() === added file 'hooks/nodemanager-relation-departed' --- hooks/nodemanager-relation-departed 1970-01-01 00:00:00 +0000 +++ hooks/nodemanager-relation-departed 2015-08-20 23:11:02 +0000 @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import common + +common.manage() === modified file 'resources.yaml' --- resources.yaml 2015-07-24 15:25:29 +0000 +++ resources.yaml 2015-08-20 23:11:02 +0000 @@ -4,7 +4,11 @@ pathlib: pypi: path.py>=7.0 jujubigdata: +<<<<<<< TREE pypi: jujubigdata>=2.0.2,<3.0.0 +======= + pypi: jujubigdata>=4.0.0,<5.0.0 +>>>>>>> MERGE-SOURCE java-installer: # This points to a script which manages installing Java. # If replaced with an alternate implementation, it must output *only* two === added file 'resources/python/jujuresources-0.2.9.tar.gz' Binary files resources/python/jujuresources-0.2.9.tar.gz 1970-01-01 00:00:00 +0000 and resources/python/jujuresources-0.2.9.tar.gz 2015-08-20 23:11:02 +0000 differ === renamed file 'resources/python/jujuresources-0.2.9.tar.gz' => 'resources/python/jujuresources-0.2.9.tar.gz.moved'
-- Mailing list: https://launchpad.net/~bigdata-dev Post to : [email protected] Unsubscribe : https://launchpad.net/~bigdata-dev More help : https://help.launchpad.net/ListHelp

