Cory Johns has proposed merging lp:~johnsca/charm-helpers/test-fixes into lp:~bigdata-dev/charm-helpers/framework.
Requested reviews: Juju Big Data Development (bigdata-dev) For more details, see: https://code.launchpad.net/~johnsca/charm-helpers/test-fixes/+merge/279937 Fixed or deleted failing tests -- Your team Juju Big Data Development is requested to review the proposed merge of lp:~johnsca/charm-helpers/test-fixes into lp:~bigdata-dev/charm-helpers/framework.
=== removed directory 'charmhelpers/contrib/bigdata' === removed file 'charmhelpers/contrib/bigdata/__init__.py' --- charmhelpers/contrib/bigdata/__init__.py 2015-01-23 21:35:56 +0000 +++ charmhelpers/contrib/bigdata/__init__.py 1970-01-01 00:00:00 +0000 @@ -1,19 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -from . import utils # noqa -from . import relations # noqa -from . import handlers # noqa === removed directory 'charmhelpers/contrib/bigdata/handlers' === removed file 'charmhelpers/contrib/bigdata/handlers/__init__.py' --- charmhelpers/contrib/bigdata/handlers/__init__.py 2015-01-23 21:35:56 +0000 +++ charmhelpers/contrib/bigdata/handlers/__init__.py 1970-01-01 00:00:00 +0000 @@ -1,17 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -from . import apache # noqa === removed file 'charmhelpers/contrib/bigdata/handlers/apache.py' --- charmhelpers/contrib/bigdata/handlers/apache.py 2015-12-07 20:40:11 +0000 +++ charmhelpers/contrib/bigdata/handlers/apache.py 1970-01-01 00:00:00 +0000 @@ -1,479 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import re -from subprocess import check_call, check_output -import time - -from path import Path - -import jujuresources - -from charmhelpers.core import host -from charmhelpers.core import hookenv -from charmhelpers.core import unitdata -from charmhelpers.core.charmframework import helpers -from charmhelpers.contrib.bigdata import utils - - -class HadoopBase(object): - def __init__(self, dist_config): - self.dist_config = dist_config - self.charm_config = hookenv.config() - self.cpu_arch = host.cpu_arch() - - # dist_config will have simple validation done on primary keys in the - # dist.yaml, but we need to ensure deeper values are present. - required_dirs = ['hadoop', 'hadoop_conf', 'hdfs_log_dir', - 'yarn_log_dir'] - missing_dirs = set(required_dirs) - set(self.dist_config.dirs.keys()) - if missing_dirs: - raise ValueError('dirs option in {} is missing required entr{}: {}'.format( - self.dist_config.yaml_file, - 'ies' if len(missing_dirs) > 1 else 'y', - ', '.join(missing_dirs))) - - self.client_spec = { - 'hadoop': self.dist_config.hadoop_version, - } - self.verify_conditional_resources = utils.verify_resources('hadoop-%s' % self.cpu_arch) - - def spec(self): - """ - Generate the full spec for keeping charms in sync. - - NB: This has to be a callback instead of a plain property because it is - passed to the relations during construction of the Manager but needs to - properly reflect the Java version in the same hook invocation that installs - Java. - """ - java_version = unitdata.kv().get('java.version') - if java_version: - return { - 'vendor': self.dist_config.vendor, - 'hadoop': self.dist_config.hadoop_version, - 'java': java_version, - 'arch': self.cpu_arch, - } - else: - return None - - def is_installed(self): - return unitdata.kv().get('hadoop.base.installed') - - def install(self, force=False): - if not force and self.is_installed(): - return - self.configure_hosts_file() - self.dist_config.add_users() - self.dist_config.add_dirs() - self.dist_config.add_packages() - self.install_base_packages() - self.setup_hadoop_config() - self.configure_hadoop() - unitdata.kv().set('hadoop.base.installed', True) - unitdata.kv().flush(True) - - def configure_hosts_file(self): - """ - Add the unit's private-address to /etc/hosts to ensure that Java - can resolve the hostname of the server to its real IP address. - We derive our hostname from the unit_id, replacing / with -. - """ - private_address = hookenv.unit_get('private-address') - hostname = hookenv.local_unit().replace('/', '-') - - etc_hostname = Path('/etc/hostname') - etc_hostname.write_text(hostname) - check_call(["hostname", "-F", "/etc/hostname"]) - - etc_hosts = Path('/etc/hosts') - hosts = etc_hosts.lines() - line = '%s %s' % (private_address, hostname) - IP_pat = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' - if not re.match(IP_pat, private_address): - line = '# %s # private-address did not return an IP' % line - if hosts[0] != line: - hosts.insert(0, line) - etc_hosts.write_lines(hosts) - - def install_base_packages(self): - with utils.disable_firewall(): - self.install_java() - self.install_hadoop() - - def install_java(self): - """ - Run the java-installer resource to install Java and determine - the JAVA_HOME and Java version. - - The java-installer must be idempotent and its only output (on stdout) - should be two lines: the JAVA_HOME path, and the Java version, respectively. - - If there is an error installing Java, the installer should exit - with a non-zero exit code. - """ - env = utils.read_etc_env() - java_installer = Path(jujuresources.resource_path('java-installer')) - java_installer.chmod(0o755) - output = check_output([java_installer], env=env) - java_home, java_version = map(str.strip, output.strip().split('\n')) - unitdata.kv().set('java.home', java_home) - unitdata.kv().set('java.version', java_version) - - def install_hadoop(self): - jujuresources.install('hadoop-%s' % - self.cpu_arch, - destination=self.dist_config.path('hadoop'), - skip_top_level=True) - - def setup_hadoop_config(self): - # copy default config into alternate dir - conf_dir = self.dist_config.path('hadoop') / 'etc/hadoop' - self.dist_config.path('hadoop_conf').rmtree_p() - conf_dir.copytree(self.dist_config.path('hadoop_conf')) - (self.dist_config.path('hadoop_conf') / 'slaves').remove_p() - mapred_site = self.dist_config.path('hadoop_conf') / 'mapred-site.xml' - if not mapred_site.exists(): - (self.dist_config.path('hadoop_conf') / 'mapred-site.xml.template').copy(mapred_site) - - def configure_hadoop(self): - java_home = Path(unitdata.kv().get('java.home')) - java_bin = java_home / 'bin' - hadoop_bin = self.dist_config.path('hadoop') / 'bin' - hadoop_sbin = self.dist_config.path('hadoop') / 'sbin' - with utils.environment_edit_in_place('/etc/environment') as env: - env['JAVA_HOME'] = java_home - if java_bin not in env['PATH']: - env['PATH'] = ':'.join([env['PATH'], java_bin]) - if hadoop_bin not in env['PATH']: - env['PATH'] = ':'.join([env['PATH'], hadoop_bin]) - if hadoop_sbin not in env['PATH']: - env['PATH'] = ':'.join([env['PATH'], hadoop_sbin]) - env['HADOOP_LIBEXEC_DIR'] = self.dist_config.path('hadoop') / 'libexec' - env['HADOOP_INSTALL'] = self.dist_config.path('hadoop') - env['HADOOP_HOME'] = self.dist_config.path('hadoop') - env['HADOOP_COMMON_HOME'] = self.dist_config.path('hadoop') - env['HADOOP_HDFS_HOME'] = self.dist_config.path('hadoop') - env['HADOOP_MAPRED_HOME'] = self.dist_config.path('hadoop') - env['HADOOP_YARN_HOME'] = self.dist_config.path('hadoop') - env['YARN_HOME'] = self.dist_config.path('hadoop') - env['HADOOP_CONF_DIR'] = self.dist_config.path('hadoop_conf') - env['YARN_CONF_DIR'] = self.dist_config.path('hadoop_conf') - env['YARN_LOG_DIR'] = self.dist_config.path('yarn_log_dir') - env['HDFS_LOG_DIR'] = self.dist_config.path('hdfs_log_dir') - env['HADOOP_LOG_DIR'] = self.dist_config.path('hdfs_log_dir') # for hadoop 2.2.0 only - env['MAPRED_LOG_DIR'] = '/var/log/hadoop/mapred' # should be moved to config, but could - env['MAPRED_PID_DIR'] = '/var/run/hadoop/mapred' # be destructive for mapreduce operation - - hadoop_env = self.dist_config.path('hadoop_conf') / 'hadoop-env.sh' - utils.re_edit_in_place(hadoop_env, { - r'export JAVA_HOME *=.*': 'export JAVA_HOME=%s' % java_home, - }) - - def register_slaves(self, relation): - """ - Add slaves to a hdfs or yarn master, determined by the relation name. - - :param str relation: 'datanode' for registering HDFS slaves; - 'nodemanager' for registering YARN slaves. - """ - slaves = helpers.all_ready_units(relation) - slaves_file = self.dist_config.path('hadoop_conf') / 'slaves' - slaves_file.write_lines( - [ - '# DO NOT EDIT', - '# This file is automatically managed by Juju', - ] + [ - data['hostname'] for slave, data in slaves - ] - ) - slaves_file.chown('ubuntu', 'hadoop') - - def run(self, user, command, *args, **kwargs): - """ - Run a Hadoop command as the `hdfs` user. - - :param str command: Command to run, prefixed with `bin/` or `sbin/` - :param list args: Additional args to pass to the command - """ - return utils.run_as(user, - self.dist_config.path('hadoop') / command, - *args, **kwargs) - - -class HDFS(object): - def __init__(self, hadoop_base): - self.hadoop_base = hadoop_base - - def stop_namenode(self): - self._hadoop_daemon('stop', 'namenode') - - def start_namenode(self): - if not utils.jps('NameNode'): - self._hadoop_daemon('start', 'namenode') - # Some hadoop processes take a bit of time to start - # we need to let them get to a point where they are - # ready to accept connections - increase the value for hadoop 2.4.1 - time.sleep(30) - - def stop_secondarynamenode(self): - self._hadoop_daemon('stop', 'secondarynamenode') - - def start_secondarynamenode(self): - if not utils.jps('SecondaryNameNode'): - self._hadoop_daemon('start', 'secondarynamenode') - # Some hadoop processes take a bit of time to start - # we need to let them get to a point where they are - # ready to accept connections - increase the value for hadoop 2.4.1 - time.sleep(30) - - def stop_datanode(self): - self._hadoop_daemon('stop', 'datanode') - - def start_datanode(self): - if not utils.jps('DataNode'): - self._hadoop_daemon('start', 'datanode') - - def _remote(self, relation): - """ - Return the hostname of the unit on the other end of the given - relation (derived from that unit's name) and the port used to talk - to that unit. - :param str relation: Name of the relation, e.g. "datanode" or "namenode" - """ - unit, data = helpers.any_ready_unit(relation) - host = unit.replace('/', '-') - return host, data['port'] - - def _local(self): - """ - Return the local hostname (which we derive from our unit name), - and namenode port from our dist.yaml - """ - host = hookenv.local_unit().replace('/', '-') - port = self.hadoop_base.dist_config.port('namenode') - return host, port - - def configure_namenode(self): - self.configure_hdfs_base(*self._local()) - cfg = self.hadoop_base.charm_config - dc = self.hadoop_base.dist_config - hdfs_site = dc.path('hadoop_conf') / 'hdfs-site.xml' - with utils.xmlpropmap_edit_in_place(hdfs_site) as props: - props['dfs.replication'] = cfg['dfs_replication'] - props['dfs.blocksize'] = int(cfg['dfs_blocksize']) - props['dfs.namenode.datanode.registration.ip-hostname-check'] = 'true' - props['dfs.namenode.http-address'] = '0.0.0.0:{}'.format(dc.port('nn_webapp_http')) - # TODO: support SSL - # props['dfs.namenode.https-address'] = '0.0.0.0:{}'.format(dc.port('nn_webapp_https')) - - def configure_secondarynamenode(self): - """ - Configure the Secondary Namenode when the apache-hadoop-hdfs-secondary - charm is deployed and related to apache-hadoop-hdfs-master. - - The only purpose of the secondary namenode is to perform periodic - checkpoints. The secondary name-node periodically downloads current - namenode image and edits log files, joins them into new image and - uploads the new image back to the (primary and the only) namenode. - """ - self.configure_hdfs_base(*self._remote("namenode")) - - def configure_datanode(self): - self.configure_hdfs_base(*self._remote("datanode")) - dc = self.hadoop_base.dist_config - hdfs_site = dc.path('hadoop_conf') / 'hdfs-site.xml' - with utils.xmlpropmap_edit_in_place(hdfs_site) as props: - props['dfs.datanode.http.address'] = '0.0.0.0:{}'.format(dc.port('dn_webapp_http')) - # TODO: support SSL - # props['dfs.datanode.https.address'] = '0.0.0.0:{}'.format(dc.port('dn_webapp_https')) - - def configure_client(self): - self.configure_hdfs_base(*self._remote("namenode")) - - def configure_hdfs_base(self, host, port): - dc = self.hadoop_base.dist_config - core_site = dc.path('hadoop_conf') / 'core-site.xml' - with utils.xmlpropmap_edit_in_place(core_site) as props: - props['fs.defaultFS'] = "hdfs://{host}:{port}".format(host=host, port=port) - props['hadoop.proxyuser.hue.hosts'] = "*" - props['hadoop.proxyuser.hue.groups'] = "*" - props['hadoop.proxyuser.oozie.groups'] = '*' - props['hadoop.proxyuser.oozie.hosts'] = '*' - hdfs_site = dc.path('hadoop_conf') / 'hdfs-site.xml' - with utils.xmlpropmap_edit_in_place(hdfs_site) as props: - props['dfs.webhdfs.enabled'] = "true" - props['dfs.namenode.name.dir'] = dc.path('hdfs_dir_base') / 'cache/hadoop/dfs/name' - props['dfs.datanode.data.dir'] = dc.path('hdfs_dir_base') / 'cache/hadoop/dfs/name' - props['dfs.permissions'] = 'false' # TODO - secure this hadoop installation! - - def format_namenode(self): - if unitdata.kv().get('hdfs.namenode.formatted'): - return - self.stop_namenode() - # Run without prompting; this will fail if the namenode has already - # been formatted -- we do not want to reformat existing data! - self._hdfs('namenode', '-format', '-noninteractive') - unitdata.kv().set('hdfs.namenode.formatted', True) - unitdata.kv().flush(True) - - def create_hdfs_dirs(self): - if unitdata.kv().get('hdfs.namenode.dirs.created'): - return - self._hdfs('dfs', '-mkdir', '-p', '/tmp/hadoop/mapred/staging') - self._hdfs('dfs', '-chmod', '-R', '1777', '/tmp/hadoop/mapred/staging') - self._hdfs('dfs', '-mkdir', '-p', '/tmp/hadoop-yarn/staging') - self._hdfs('dfs', '-chmod', '-R', '1777', '/tmp/hadoop-yarn') - self._hdfs('dfs', '-mkdir', '-p', '/user/ubuntu') - self._hdfs('dfs', '-chown', '-R', 'ubuntu', '/user/ubuntu') - # for JobHistory - self._hdfs('dfs', '-mkdir', '-p', '/mr-history/tmp') - self._hdfs('dfs', '-chmod', '-R', '1777', '/mr-history/tmp') - self._hdfs('dfs', '-mkdir', '-p', '/mr-history/done') - self._hdfs('dfs', '-chmod', '-R', '1777', '/mr-history/done') - self._hdfs('dfs', '-chown', '-R', 'mapred:hdfs', '/mr-history') - self._hdfs('dfs', '-mkdir', '-p', '/app-logs') - self._hdfs('dfs', '-chmod', '-R', '1777', '/app-logs') - self._hdfs('dfs', '-chown', 'yarn', '/app-logs') - unitdata.kv().set('hdfs.namenode.dirs.created', True) - unitdata.kv().flush(True) - - def register_slaves(self): - self.hadoop_base.register_slaves('datanode') - - def _hadoop_daemon(self, command, service): - self.hadoop_base.run('hdfs', 'sbin/hadoop-daemon.sh', - '--config', - self.hadoop_base.dist_config.path('hadoop_conf'), - command, service) - - def _hdfs(self, command, *args): - self.hadoop_base.run('hdfs', 'bin/hdfs', command, *args) - - -class YARN(object): - def __init__(self, hadoop_base): - self.hadoop_base = hadoop_base - - def stop_resourcemanager(self): - self._yarn_daemon('stop', 'resourcemanager') - - def start_resourcemanager(self): - if not utils.jps('ResourceManager'): - self._yarn_daemon('start', 'resourcemanager') - - def stop_jobhistory(self): - self._jobhistory_daemon('stop', 'historyserver') - - def start_jobhistory(self): - if utils.jps('JobHistoryServer'): - self._jobhistory_daemon('stop', 'historyserver') - self._jobhistory_daemon('start', 'historyserver') - - def stop_nodemanager(self): - self._yarn_daemon('stop', 'nodemanager') - - def start_nodemanager(self): - if not utils.jps('NodeManager'): - self._yarn_daemon('start', 'nodemanager') - - def _remote(self, relation): - """ - Return the hostname of the unit on the other end of the given - relation (derived from that unit's name) and the port used to talk - to that unit. - :param str relation: Name of the relation, e.g. "resourcemanager" or "nodemanager" - """ - unit, data = helpers.any_ready_unit(relation) - host = unit.replace('/', '-') - return host, data['port'] - - def _local(self): - """ - Return the local hostname (which we derive from our unit name), - and resourcemanager port from our dist.yaml - """ - host = hookenv.local_unit().replace('/', '-') - port = self.hadoop_base.dist_config.port('resourcemanager') - return host, port - - def configure_resourcemanager(self): - self.configure_yarn_base(*self._local()) - dc = self.hadoop_base.dist_config - yarn_site = dc.path('hadoop_conf') / 'yarn-site.xml' - with utils.xmlpropmap_edit_in_place(yarn_site) as props: - # 0.0.0.0 will listen on all interfaces, which is what we want on the server - props['yarn.resourcemanager.webapp.address'] = '0.0.0.0:{}'.format(dc.port('rm_webapp_http')) - # TODO: support SSL - # props['yarn.resourcemanager.webapp.https.address'] = '0.0.0.0:{}'.format(dc.port('rm_webapp_https')) - - def configure_jobhistory(self): - self.configure_yarn_base(*self._local()) - dc = self.hadoop_base.dist_config - mapred_site = dc.path('hadoop_conf') / 'mapred-site.xml' - with utils.xmlpropmap_edit_in_place(mapred_site) as props: - # 0.0.0.0 will listen on all interfaces, which is what we want on the server - props["mapreduce.jobhistory.address"] = "0.0.0.0:{}".format(dc.port('jobhistory')) - props["mapreduce.jobhistory.webapp.address"] = "0.0.0.0:{}".format(dc.port('jh_webapp_http')) - - def configure_nodemanager(self): - self.configure_yarn_base(*self._remote("nodemanager")) - - def configure_client(self): - self.configure_yarn_base(*self._remote("resourcemanager")) - - def configure_yarn_base(self, host, port): - dc = self.hadoop_base.dist_config - yarn_site = dc.path('hadoop_conf') / 'yarn-site.xml' - with utils.xmlpropmap_edit_in_place(yarn_site) as props: - props['yarn.nodemanager.aux-services'] = 'mapreduce_shuffle' - props['yarn.resourcemanager.hostname'] = '{}'.format(host) - props['yarn.resourcemanager.address'] = '{}:{}'.format(host, port) - props["yarn.log.server.url"] = "{}:{}/jobhistory/logs/".format(host, dc.port('rm_log')) - mapred_site = dc.path('hadoop_conf') / 'mapred-site.xml' - with utils.xmlpropmap_edit_in_place(mapred_site) as props: - props["mapreduce.jobhistory.address"] = "{}:{}".format(host, dc.port('jobhistory')) - props["mapreduce.framework.name"] = 'yarn' - - def install_demo(self): - if unitdata.kv().get('yarn.client.demo.installed'): - return - # Copy our demo (TeraSort) to the target location and set mode/owner - demo_source = 'scripts/terasort.sh' - demo_target = '/home/ubuntu/terasort.sh' - - Path(demo_source).copy(demo_target) - Path(demo_target).chmod(0o755) - Path(demo_target).chown('ubuntu', 'hadoop') - unitdata.kv().set('yarn.client.demo.installed', True) - unitdata.kv().flush(True) - - def register_slaves(self): - self.hadoop_base.register_slaves('nodemanager') - - def _yarn_daemon(self, command, service): - self.hadoop_base.run('yarn', 'sbin/yarn-daemon.sh', - '--config', - self.hadoop_base.dist_config.path('hadoop_conf'), - command, service) - - def _jobhistory_daemon(self, command, service): - # TODO refactor job history to separate class - self.hadoop_base.run('mapred', 'sbin/mr-jobhistory-daemon.sh', - '--config', - self.hadoop_base.dist_config.path('hadoop_conf'), - command, service) === removed file 'charmhelpers/contrib/bigdata/relations.py' --- charmhelpers/contrib/bigdata/relations.py 2015-12-07 20:40:11 +0000 +++ charmhelpers/contrib/bigdata/relations.py 1970-01-01 00:00:00 +0000 @@ -1,269 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import json - -from charmhelpers.core import hookenv -from charmhelpers.core.charmframework.helpers import Relation -from charmhelpers.contrib.bigdata import utils - - -class SpecMatchingRelation(Relation): - """ - Relation base class that validates that a version and environment - between two related charms match, to prevent interoperability issues. - - This class adds a ``spec`` key to the ``required_keys`` and populates it - in :meth:`provide`. The ``spec`` value must be passed in to :meth:`__init__`. - - The ``spec`` should be a mapping (or a callback that returns a mapping) - which describes all aspects of the charm's environment or configuration - that might affect its interoperability with the remote charm. The charm - on the requires side of the relation will verify that all of the keys in - its ``spec`` are present and exactly equal on the provides side of the - relation. This does mean that the requires side can be a subset of the - provides side, but not the other way around. - - An example spec string might be:: - - { - 'arch': 'x86_64', - 'vendor': 'apache', - 'version': '2.4', - } - """ - def __init__(self, spec=None, *args, **kwargs): - """ - Create a new relation handler instance. - - :param str spec: Spec string that should capture version or environment - particulars which can cause issues if mismatched. - """ - super(SpecMatchingRelation, self).__init__(*args, **kwargs) - self._spec = spec - - @property - def spec(self): - if callable(self._spec): - return self._spec() - return self._spec - - def provide(self, remote_service, all_ready): - """ - Provide the ``spec`` data to the remote service. - - Subclasses *must* either delegate to this method (e.g., via `super()`) - or include ``'spec': json.dumps(self.spec)`` in the provided data themselves. - """ - data = super(SpecMatchingRelation, self).provide(remote_service, all_ready) - if self.spec: - data['spec'] = json.dumps(self.spec) - return data - - def is_ready(self): - """ - Validate the ``spec`` data from the connected units to ensure that - it matches the local ``spec``. - """ - if not super(SpecMatchingRelation, self).is_ready(): - return False - if not self.spec: - return True - for unit, data in self.filtered_data().iteritems(): - remote_spec = json.loads(data.get('spec', '{}')) - for k, v in self.spec.items(): - if v != remote_spec.get(k): - # TODO XXX Once extended status reporting is available, - # we should use that instead of erroring. - raise ValueError( - 'Spec mismatch with related unit %s: ' - '%r != %r' % (unit, data.get('spec'), json.dumps(self.spec))) - return True - - -class NameNode(SpecMatchingRelation): - """ - Relation which communicates the NameNode (HDFS) connection & status info. - - This is the relation that clients should use. - """ - relation_name = 'namenode' - required_keys = ['private-address', 'etc_hosts', 'port', 'ready'] - - def __init__(self, spec=None, port=None): - self.port = port # only needed for provides - utils.initialize_kv_host() - super(NameNode, self).__init__(spec) - - def provide(self, remote_service, all_ready): - data = super(NameNode, self).provide(remote_service, all_ready) - if all_ready and DataNode().is_ready(): - data.update({ - 'ready': 'true', - 'etc_hosts': utils.get_kv_hosts(), - 'port': self.port, - }) - return data - - -class NameNodeMaster(NameNode): - """ - Alternate NameNode relation for DataNodes. - """ - relation_name = 'datanode' - required_keys = ['private-address', 'etc_hosts', 'port', 'ready', 'ssh-key'] - - def provide(self, remote_service, all_ready): - data = super(NameNodeMaster, self).provide(remote_service, all_ready) - data.update({ - 'ssh-key': utils.get_ssh_key('hdfs'), - }) - return data - - def install_ssh_keys(self): - relation_values = self.filtered_data().values()[0] - ssh_key = relation_values.get('ssh-key') - utils.install_ssh_key('hdfs', ssh_key) - - -class ResourceManager(SpecMatchingRelation): - """ - Relation which communicates the ResourceManager (YARN) connection & status info. - - This is the relation that clients should use. - """ - relation_name = 'resourcemanager' - required_keys = ['private-address', 'etc_hosts', 'port', 'ready'] - - def __init__(self, spec=None, port=None): - self.port = port # only needed for provides - utils.initialize_kv_host() - super(ResourceManager, self).__init__(spec) - - def provide(self, remote_service, all_ready): - data = super(ResourceManager, self).provide(remote_service, all_ready) - if all_ready: - data.update({ - 'ready': 'true', - 'etc_hosts': utils.get_kv_hosts(), - 'port': self.port, - }) - return data - - -class ResourceManagerMaster(ResourceManager): - """ - Alternate ResourceManager relation for NodeManagers. - """ - relation_name = 'nodemanager' - required_keys = ['private-address', 'etc_hosts', 'port', 'ready', 'ssh-key'] - - def provide(self, remote_service, all_ready): - data = super(ResourceManagerMaster, self).provide(remote_service, all_ready) - data.update({ - 'ssh-key': utils.get_ssh_key('yarn'), - }) - return data - - def install_ssh_keys(self): - relation_values = self.filtered_data().values()[0] - ssh_key = relation_values.get('ssh-key') - utils.install_ssh_key('yarn', ssh_key) - - -class DataNode(Relation): - """ - Relation which communicates DataNode info back to NameNodes. - """ - relation_name = 'datanode' - required_keys = ['private-address', 'hostname'] - - def provide(self, remote_service, all_ready): - data = super(DataNode, self).provide(remote_service, all_ready) - hostname = hookenv.local_unit().replace('/', '-') - data.update({ - 'hostname': hostname, - }) - return data - - -class NodeManager(Relation): - """ - Relation which communicates NodeManager info back to ResourceManagers. - """ - relation_name = 'nodemanager' - required_keys = ['private-address', 'hostname'] - - def provide(self, remote_service, all_ready): - data = super(NodeManager, self).provide(remote_service, all_ready) - hostname = hookenv.local_unit().replace('/', '-') - data.update({ - 'hostname': hostname, - }) - return data - - -class HadoopPlugin(Relation): - relation_name = 'hadoop-plugin' - required_keys = ['private-address', 'hdfs-ready'] - - def __init__(self, *args, **kwargs): - super(HadoopPlugin, self).__init__(*args, **kwargs) - - def provide(self, remote_service, all_ready): - if not all_ready: - return {} - utils.wait_for_hdfs(400) # will error if timeout - return {'hdfs-ready': True} - - -class MySQL(Relation): - relation_name = 'db' - required_keys = ['host', 'database', 'user', 'password'] - - -class FlumeAgent(Relation): - relation_name = 'flume-agent' - required_keys = ['private-address', 'port'] - - def provide(self, remote_service, all_ready): - data = super(FlumeAgent, self).provide(remote_service, all_ready) - flume_protocol = hookenv.config('protocol') - if (flume_protocol not in ['avro']): - hookenv.log('Invalid flume protocol {}'.format(flume_protocol), hookenv.ERROR) - return data - data.update({ - 'protocol': hookenv.config('protocol'), - }) - return data - - -class Hive(Relation): - relation_name = 'hive' - required_keys = ['private-address', 'port', 'ready'] - - def __init__(self, port=None): - self.port = port # only needed for provides - super(Hive, self).__init__() - - def provide(self, remote_service, all_ready): - data = super(Hive, self).provide(remote_service, all_ready) - if all_ready: - data.update({ - 'ready': 'true', - 'port': self.port, - }) - return data === removed file 'charmhelpers/contrib/bigdata/utils.py' --- charmhelpers/contrib/bigdata/utils.py 2015-05-11 02:51:38 +0000 +++ charmhelpers/contrib/bigdata/utils.py 1970-01-01 00:00:00 +0000 @@ -1,438 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import ast -import re -import time -import yaml -from contextlib import contextmanager -from subprocess import check_call, check_output, CalledProcessError -from xml.etree import ElementTree as ET -from xml.dom import minidom -from distutils.util import strtobool -from path import Path -from json import dumps - -from charmhelpers.core import unitdata -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers import fetch - - -class DistConfig(object): - """ - This class processes distribution-specific configuration options. - - Some configuration options are specific to the Hadoop distribution, - (e.g. Apache, Hortonworks, MapR, etc). These options are immutable and - must not change throughout the charm deployment lifecycle. - - Helper methods are provided for keys that require action. Presently, this - includes adding/removing directories, dependent packages, and groups/users. - Other required keys may be listed when instantiating this class, but this - will only validate these keys exist in the yaml; it will not provide any - helper functionality for unkown keys. - - :param str filename: File to process (default dist.yaml) - :param list required_keys: A list of keys required to be present in the yaml - - Example dist.yaml with supported keys: - vendor: '<name>' - hadoop_version: '<version>' - packages: - - '<package 1>' - - '<package 2>' - groups: - - '<name>' - users: - <user 1>: - groups: ['<primary>', '<group>', '<group>'] - <user 2>: - groups: ['<primary>'] - dirs: - <dir 1>: - path: '</path/to/dir>' - perms: 0777 - <dir 2>: - path: '{config[<option>]}' # value comes from config option - owner: '<user>' - group: '<group>' - perms: 0755 - ports: - <name1>: - port: <port> - exposed_on: <service> # optional - <name2>: - port: <port> - exposed_on: <service> # optional - """ - def __init__(self, filename='dist.yaml', required_keys=None): - self.yaml_file = filename - self.dist_config = yaml.load(Path(self.yaml_file).text()) - - # validate dist.yaml - missing_keys = set(required_keys or []) - set(self.dist_config.keys()) - if missing_keys: - raise ValueError('{} is missing required option{}: {}'.format( - filename, - 's' if len(missing_keys) > 1 else '', - ', '.join(missing_keys))) - - for opt in required_keys: - setattr(self, opt, self.dist_config[opt]) - - def path(self, key): - config = hookenv.config() - dirs = {name: self.dirs[name]['path'] for name in self.dirs.keys()} - levels = 0 - old_path = None - path = self.dirs[key]['path'] - while '{' in path and path != old_path: - levels += 1 - if levels > 100: - raise ValueError('Maximum level of nested dirs references exceeded for: {}'.format(key)) - old_path = path - path = path.format(config=config, dirs=dirs) - return Path(path) - - def port(self, key): - return self.ports.get(key, {}).get('port') - - def exposed_ports(self, service): - exposed = [] - for port in self.ports.values(): - if port.get('exposed_on') == service: - exposed.append(port['port']) - return exposed - - def add_dirs(self): - for name, details in self.dirs.items(): - host.mkdir( - self.path(name), - owner=details.get('owner', 'root'), - group=details.get('group', 'root'), - perms=details.get('perms', 0o755)) - - def add_packages(self): - with disable_firewall(): - fetch.apt_update() - fetch.apt_install(self.packages) - - def add_users(self): - for group in self.groups: - host.add_group(group) - for username, details in self.users.items(): - primary_group = None - groups = details.get('groups', []) - if groups: - primary_group = groups[0] - host.adduser(username, group=primary_group) - for group in groups: - host.add_user_to_group(username, group) - - def remove_dirs(self): - # TODO: no removal function exists in CH, just log what we would do. - for name in self.dirs.items(): - hookenv.log('noop: remove directory {0}'.format(name)) - - def remove_packages(self): - # TODO: no removal function exists in CH, just log what we would do. - for name in self.packages.items(): - hookenv.log('noop: remove package {0}'.format(name)) - - def remove_users(self): - # TODO: no removal function exists in CH, just log what we would do. - for user in self.users.items(): - hookenv.log('noop: remove user {0}'.format(user)) - for group in self.groups: - hookenv.log('noop: remove group {0}'.format(group)) - - -@contextmanager -def disable_firewall(): - """ - Temporarily disable the firewall, via ufw. - """ - status = check_output(['ufw', 'status']) - already_disabled = 'inactive' in status - if not already_disabled: - check_call(['ufw', 'disable']) - try: - yield - finally: - if not already_disabled: - check_call(['ufw', 'enable']) - - -def re_edit_in_place(filename, subs): - """ - Perform a set of in-place edits to a file. - - :param str filename: Name of file to edit - :param dict subs: Mapping of patterns to replacement strings - """ - with Path(filename).in_place() as (reader, writer): - for line in reader: - for pat, repl in subs.iteritems(): - line = re.sub(pat, repl, line) - writer.write(line) - - -@contextmanager -def xmlpropmap_edit_in_place(filename): - """ - Edit an XML property map (configuration) file in-place. - - This helper acts as a context manager which edits an XML file of the form: - - <configuration> - <property> - <name>property-name</name> - <value>property-value</value> - <description>Optional property description</description> - </property> - ... - </configuration> - - This context manager yields a dict containing the existing name/value - mappings. Properties can then be modified, added, or removed, and the - changes will be reflected in the file. - - Example usage: - - with xmlpropmap_edit_in_place('my.xml') as props: - props['foo'] = 'bar' - del props['removed'] - - Note that the file is not locked during the edits. - """ - tree = ET.parse(filename) - root = tree.getroot() - props = {} - for prop in root.findall('property'): - props[prop.find('name').text] = prop.find('value').text - old_props = set(props.keys()) - yield props - new_props = set(props.keys()) - added = new_props - old_props - modified = new_props & old_props - removed = old_props - new_props - for prop in root.findall('property'): - name = prop.find('name').text - if name in modified and props[name] is not None: - prop.find('value').text = str(props[name]) - elif name in removed: - root.remove(prop) - for name in added: - prop = ET.SubElement(root, 'property') - ET.SubElement(prop, 'name').text = name - ET.SubElement(prop, 'value').text = str(props[name]) - for node in tree.iter(): - node.tail = None - node.text = (node.text or '').strip() or None - prettied = minidom.parseString(ET.tostring(root)).toprettyxml(indent=' ') - Path(filename).write_text(prettied) - - -@contextmanager -def environment_edit_in_place(filename='/etc/environment'): - """ - Edit the `/etc/environment` file in-place. - - There is no standard definition for the format of `/etc/environment`, - but the convention, which this helper supports, is simple key-value - pairs, separated by `=`, with optionally quoted values. - - Note that this helper will implicitly quote all values. - - Also note that the file is not locked during the edits. - """ - etc_env = Path(filename) - lines = [l.strip().split('=') for l in etc_env.lines()] - data = {k.strip(): v.strip(' \'"') for k, v in lines} - yield data - etc_env.write_lines('{}="{}"'.format(k, v) for k, v in data.items()) - - -def normalize_strbool(value): - intbool = strtobool(value) - return str(bool(intbool)).lower() - - -def jps(name): - """ - Get PIDs for named Java processes, for any user. - """ - pat = re.sub(r'^(.)', r'^[^ ]*java .*[\1]', name) - try: - output = check_output(['sudo', 'pgrep', '-f', pat]) - except CalledProcessError: - return [] - return filter(None, map(str.strip, output.split('\n'))) - - -class TimeoutError(Exception): - pass - - -def read_etc_env(): - """ - Read /etc/environment and return it as a dict. - """ - etc_env = Path('/etc/environment') - env = {} - if etc_env.exists(): - for line in etc_env.lines(): - var, value = line.split('=') - env[var.strip()] = value.strip().strip('"') - return env - - -def run_as(user, command, *args, **kwargs): - """ - Run a command as a particular user, using ``/etc/environment`` and optionally - capturing and returning the output. - - Raises subprocess.CalledProcessError if command fails. - - :param str user: Username to run command as - :param str command: Command to run - :param list args: Additional args to pass to command - :param dict env: Additional env variables (will be merged with ``/etc/environment``) - :param bool capture_output: Capture and return output (default: False) - """ - parts = [command] + list(args) - quoted = ' '.join("'%s'" % p for p in parts) - env = read_etc_env() - if 'env' in kwargs: - env.update(kwargs['env']) - run = check_output if kwargs.get('capture_output') else check_call - return run(['su', user, '-c', quoted], env=env) - - -def update_etc_hosts(hosts): - ''' - Update /etc/hosts on the unit - - :param str hosts: json string of host dictionaries - ''' - etc_hosts = Path('/etc/hosts') - hosts_contents = etc_hosts.lines() - - for key, data in ast.literal_eval(hosts).items(): - line = '%s %s' % (data['private-address'], data['hostname']) - IP_pat = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' - if not re.match(IP_pat, data['private-address']): - line = '# %s # INVALID IP' % line - for l, hosts_line in enumerate(hosts_contents): - if re.match(r'(# )?%s\s' % data['private-address'], hosts_line): - # update existing host - hosts_contents[l] = line - break - else: - # add new host - hosts_contents.append(line) - - # write new /etc/hosts - etc_hosts.write_lines(hosts_contents, append=False) - - -def initialize_kv_host(): - # get the hostname attrs from our local unit and update the kv store - local_ip = hookenv.unit_private_ip() - local_host = hookenv.local_unit().replace('/', '-') - update_kv_host(local_ip, local_host) - - -def get_kv_hosts(): - unit_kv = unitdata.kv() - # all our hosts in the kv are prefixed with 'etc_host.'; they'll come - # out of the kv as a unicode object, but convert them to a json string - # for ease of use later -- this string of hosts is what we set on - # various relations so units can update their local /etc/hosts file. - kv_hosts = dumps(unit_kv.getrange('etc_host')) - return kv_hosts - - -def update_kv_host(ip, host): - unit_kv = unitdata.kv() - - # store attrs in the kv as 'etc_host.<ip>'; kv.update will insert - # a new record or update any existing key with current data. - unit_kv.update({ip: {'private-address': ip, - 'hostname': host}}, - prefix="etc_host.") - unit_kv.flush(True) - - -def get_ssh_key(user): - sshdir = Path('/home/%s/.ssh' % user) - if not sshdir.exists(): - host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755) - keyfile = sshdir / 'id_rsa' - pubfile = sshdir / 'id_rsa.pub' - authfile = sshdir / 'authorized_keys' - if not pubfile.exists(): - (sshdir / 'config').write_lines([ - 'Host *', - ' StrictHostKeyChecking no' - ], append=True) - check_call(['ssh-keygen', '-t', 'rsa', '-P', '', '-f', keyfile]) - host.chownr(sshdir, user, 'hadoop') - # allow ssh'ing to localhost; useful for things like start_dfs.sh - if not authfile.exists(): - Path.copy(pubfile, authfile) - return pubfile.text() - - -def install_ssh_key(user, ssh_key): - sshdir = Path('/home/%s/.ssh' % user) - if not sshdir.exists(): - host.mkdir(sshdir, owner=user, group='hadoop', perms=0o755) - Path(sshdir / 'authorized_keys').write_text(ssh_key, append=True) - host.chownr(sshdir, user, 'hadoop') - - -def wait_for_hdfs(timeout): - start = time.time() - while time.time() - start < timeout: - try: - output = run_as('hdfs', 'hdfs', 'dfsadmin', '-report', capture_output=True) - if 'Datanodes available' in output: - return True - except CalledProcessError as e: - output = e.output # probably a "connection refused"; wait and try again - time.sleep(2) - raise TimeoutError('Timed-out waiting for HDFS:\n%s' % output) - - -class verify_resources(object): - """ - Predicate for specific named resources, with useful rendering in the logs. - - :param str *which: One or more resource names to fetch & verify. Defaults to - all non-optional resources. - """ - def __init__(self, *which): - self.which = list(which) - - def __str__(self): - return '<resources %s>' % ', '.join(map(repr, self.which)) - - def __call__(self): - import jujuresources - mirror_url = hookenv.config('resources_mirror') - return jujuresources.fetch(self.which, mirror_url=mirror_url) === removed directory 'tests/contrib/bigdata' === removed file 'tests/contrib/bigdata/__init__.py' --- tests/contrib/bigdata/__init__.py 2015-01-22 14:30:19 +0000 +++ tests/contrib/bigdata/__init__.py 1970-01-01 00:00:00 +0000 @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. === removed file 'tests/contrib/bigdata/test_relations.py' --- tests/contrib/bigdata/test_relations.py 2015-04-15 12:12:19 +0000 +++ tests/contrib/bigdata/test_relations.py 1970-01-01 00:00:00 +0000 @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - - -import unittest -import mock - -try: - from path import Path -except ImportError: - Path = None - relations = None -else: - from charmhelpers.contrib.bigdata import relations - - [email protected](Path, 'charmhelpers.contrib.bigdata requires path.py') -class TestSpecMatchingRelation(unittest.TestCase): - def setUp(self): - self.data = None - self.cache = {} - self.relation = relations.SpecMatchingRelation( - spec='valid', - relation_name='test', - required_keys=['foo'], - datastore=mock.MagicMock(), - cache=self.cache) - self.relation.unfiltered_data = lambda: self.data - - def test_ready(self): - self.data = {'unit/0': {'spec': 'valid', 'foo': 'bar'}} - self.assertTrue(self.relation.is_ready()) - - def test_not_ready(self): - self.data = {} - self.assertFalse(self.relation.is_ready()) - self.cache.clear() - self.data = {'unit/0': {}} - self.assertFalse(self.relation.is_ready()) - self.cache.clear() - self.data = {'unit/0': {'no-spec': 'valid', 'foo': 'bar'}} - self.assertFalse(self.relation.is_ready()) - self.cache.clear() - self.data = {'unit/0': {'spec': 'valid', 'no-foo': 'bar'}} - self.assertFalse(self.relation.is_ready()) - self.cache.clear() - self.data = {'unit/0': {'spec': 'invalid', 'no-foo': 'bar'}} - self.assertFalse(self.relation.is_ready()) - - def test_invalid(self): - self.data = {'unit/0': {'spec': 'invalid', 'foo': 'bar'}} - self.assertRaises(ValueError, self.relation.is_ready) - - -if __name__ == '__main__': - unittest.main() === removed file 'tests/contrib/bigdata/test_utils.py' --- tests/contrib/bigdata/test_utils.py 2015-01-22 14:30:19 +0000 +++ tests/contrib/bigdata/test_utils.py 1970-01-01 00:00:00 +0000 @@ -1,125 +0,0 @@ -#!/usr/bin/env python -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - - -import os -import tempfile -import unittest -import mock - -try: - from path import Path -except ImportError: - Path = None - utils = None -else: - from charmhelpers.contrib.bigdata import utils - - -class TestError(RuntimeError): - pass - - [email protected](Path, 'charmhelpers.contrib.bigdata requires path.py') -class TestUtils(unittest.TestCase): - def test_disable_firewall(self): - with mock.patch.object(utils, 'check_call') as check_call: - with utils.disable_firewall(): - check_call.assert_called_once_with(['ufw', 'disable']) - check_call.assert_called_with(['ufw', 'enable']) - - def test_disable_firewall_on_error(self): - with mock.patch.object(utils, 'check_call') as check_call: - try: - with utils.disable_firewall(): - check_call.assert_called_once_with(['ufw', 'disable']) - raise TestError() - except TestError: - check_call.assert_called_with(['ufw', 'enable']) - - def test_re_edit_in_place(self): - fd, filename = tempfile.mkstemp() - os.close(fd) - tmp_file = Path(filename) - try: - tmp_file.write_text('foo\nbar\nqux') - utils.re_edit_in_place(tmp_file, { - r'oo$': 'OO', - r'a': 'A', - r'^qux$': 'QUX', - }) - self.assertEqual(tmp_file.text(), 'fOO\nbAr\nQUX') - finally: - tmp_file.remove() - - def test_xmlpropmap_edit_in_place(self): - fd, filename = tempfile.mkstemp() - os.close(fd) - tmp_file = Path(filename) - try: - tmp_file.write_text( - '<?xml version="1.0"?>\n' - '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n' - '\n' - '<!-- Put site-specific property overrides in this file. -->\n' - '\n' - '<configuration>\n' - ' <property>\n' - ' <name>modify.me</name>\n' - ' <value>1</value>\n' - ' <description>Property to be modified</description>\n' - ' </property>\n' - ' <property>\n' - ' <name>delete.me</name>\n' - ' <value>None</value>\n' - ' <description>Property to be removed</description>\n' - ' </property>\n' - ' <property>\n' - ' <name>do.not.modify.me</name>\n' - ' <value>0</value>\n' - ' <description>Property to *not* be modified</description>\n' - ' </property>\n' - '</configuration>') - with utils.xmlpropmap_edit_in_place(tmp_file) as props: - del props['delete.me'] - props['modify.me'] = 'one' - props['add.me'] = 'NEW' - self.assertEqual( - tmp_file.text(), - '<?xml version="1.0" ?>\n' - '<configuration>\n' - ' <property>\n' - ' <name>modify.me</name>\n' - ' <value>one</value>\n' - ' <description>Property to be modified</description>\n' - ' </property>\n' - ' <property>\n' - ' <name>do.not.modify.me</name>\n' - ' <value>0</value>\n' - ' <description>Property to *not* be modified</description>\n' - ' </property>\n' - ' <property>\n' - ' <name>add.me</name>\n' - ' <value>NEW</value>\n' - ' </property>\n' - '</configuration>\n') - finally: - tmp_file.remove() - - -if __name__ == '__main__': - unittest.main() === modified file 'tests/core/test_host.py' --- tests/core/test_host.py 2015-04-29 12:52:18 +0000 +++ tests/core/test_host.py 2015-12-08 19:13:38 +0000 @@ -272,19 +272,34 @@ getpwnam.assert_called_with(username) @patch('subprocess.check_call') + @patch('grp.getgrnam') + @patch('pwd.getpwnam') @patch.object(host, 'log') - def test_adds_a_user_to_a_group(self, log, check_call): + def test_adds_a_user_to_a_group(self, log, getpwnam, getgrnam, check_call): username = 'foo' group = 'bar' + getpwnam.return_value.pw_gid = 'ogid' + getgrnam.return_value.gr_gid = 'gid' + getgrnam.return_value.gr_mem = ['user'] host.add_user_to_group(username, group) - check_call.assert_called_with([ 'gpasswd', '-a', username, group ]) + check_call.reset_mock() + getpwnam.return_value.pw_gid = 'gid' + host.add_user_to_group(username, group) + assert not check_call.called + + check_call.reset_mock() + getpwnam.return_value.pw_gid = 'ogid' + getgrnam.return_value.gr_mem = ['user', 'foo'] + host.add_user_to_group(username, group) + assert not check_call.called + @patch('grp.getgrnam') @patch('subprocess.check_call') @patch.object(host, 'log') === removed file 'tests/core/test_services.py' --- tests/core/test_services.py 2015-04-03 14:53:10 +0000 +++ tests/core/test_services.py 1970-01-01 00:00:00 +0000 @@ -1,786 +0,0 @@ -import mock -import unittest -from charmhelpers.core import hookenv -from charmhelpers.core import services - - -class TestServiceManager(unittest.TestCase): - def setUp(self): - self.pcharm_dir = mock.patch.object(hookenv, 'charm_dir') - self.mcharm_dir = self.pcharm_dir.start() - self.mcharm_dir.return_value = 'charm_dir' - - def tearDown(self): - self.pcharm_dir.stop() - - def test_register(self): - manager = services.ServiceManager([ - {'service': 'service1', - 'foo': 'bar'}, - {'service': 'service2', - 'qux': 'baz'}, - ]) - self.assertEqual(manager.services, { - 'service1': {'service': 'service1', - 'foo': 'bar'}, - 'service2': {'service': 'service2', - 'qux': 'baz'}, - }) - - def test_register_preserves_order(self): - service_list = [dict(service='a'), dict(service='b')] - - # Test that the services list order is preserved by checking - # both forwards and backwards - only one of these will be - # dictionary order, and if both work we know order is being - # preserved. - manager = services.ServiceManager(service_list) - self.assertEqual(list(manager.services.keys()), ['a', 'b']) - manager = services.ServiceManager(reversed(service_list)) - self.assertEqual(list(manager.services.keys()), ['b', 'a']) - - @mock.patch.object(services.ServiceManager, 'reconfigure_services') - @mock.patch.object(services.ServiceManager, 'stop_services') - @mock.patch.object(hookenv, 'hook_name') - @mock.patch.object(hookenv, 'config') - def test_manage_stop(self, config, hook_name, stop_services, reconfigure_services): - manager = services.ServiceManager() - hook_name.return_value = 'stop' - manager.manage() - stop_services.assert_called_once_with() - assert not reconfigure_services.called - - @mock.patch.object(services.ServiceManager, 'provide_data') - @mock.patch.object(services.ServiceManager, 'reconfigure_services') - @mock.patch.object(services.ServiceManager, 'stop_services') - @mock.patch.object(hookenv, 'hook_name') - @mock.patch.object(hookenv, 'config') - def test_manage_other(self, config, hook_name, stop_services, reconfigure_services, provide_data): - manager = services.ServiceManager() - hook_name.return_value = 'config-changed' - manager.manage() - assert not stop_services.called - reconfigure_services.assert_called_once_with() - provide_data.assert_called_once_with() - - @mock.patch.object(hookenv, 'config') - def test_manage_config_saved(self, config): - config = config.return_value - config.implicit_save = True - manager = services.ServiceManager() - manager.manage() - self.assertTrue(config.save.called) - - @mock.patch.object(hookenv, 'config') - def test_manage_config_not_saved(self, config): - config = config.return_value - config.implicit_save = False - manager = services.ServiceManager() - manager.manage() - self.assertFalse(config.save.called) - - @mock.patch.object(services.ServiceManager, 'save_ready') - @mock.patch.object(services.ServiceManager, 'fire_event') - @mock.patch.object(services.ServiceManager, 'is_ready') - def test_reconfigure_ready(self, is_ready, fire_event, save_ready): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - is_ready.return_value = True - manager.reconfigure_services() - is_ready.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - fire_event.assert_has_calls([ - mock.call('data_ready', 'service1'), - mock.call('start', 'service1', default=[ - services.service_restart, - services.manage_ports]), - ], any_order=False) - fire_event.assert_has_calls([ - mock.call('data_ready', 'service2'), - mock.call('start', 'service2', default=[ - services.service_restart, - services.manage_ports]), - ], any_order=False) - save_ready.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - - @mock.patch.object(services.ServiceManager, 'save_ready') - @mock.patch.object(services.ServiceManager, 'fire_event') - @mock.patch.object(services.ServiceManager, 'is_ready') - def test_reconfigure_ready_list(self, is_ready, fire_event, save_ready): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - is_ready.return_value = True - manager.reconfigure_services('service3', 'service4') - self.assertEqual(is_ready.call_args_list, [ - mock.call('service3'), - mock.call('service4'), - ]) - self.assertEqual(fire_event.call_args_list, [ - mock.call('data_ready', 'service3'), - mock.call('start', 'service3', default=[ - services.service_restart, - services.open_ports]), - mock.call('data_ready', 'service4'), - mock.call('start', 'service4', default=[ - services.service_restart, - services.open_ports]), - ]) - self.assertEqual(save_ready.call_args_list, [ - mock.call('service3'), - mock.call('service4'), - ]) - - @mock.patch.object(services.ServiceManager, 'save_lost') - @mock.patch.object(services.ServiceManager, 'fire_event') - @mock.patch.object(services.ServiceManager, 'was_ready') - @mock.patch.object(services.ServiceManager, 'is_ready') - def test_reconfigure_not_ready(self, is_ready, was_ready, fire_event, save_lost): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - is_ready.return_value = False - was_ready.return_value = False - manager.reconfigure_services() - is_ready.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - fire_event.assert_has_calls([ - mock.call('stop', 'service1', default=[ - services.close_ports, - services.service_stop]), - mock.call('stop', 'service2', default=[ - services.close_ports, - services.service_stop]), - ], any_order=True) - save_lost.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - - @mock.patch.object(services.ServiceManager, 'save_lost') - @mock.patch.object(services.ServiceManager, 'fire_event') - @mock.patch.object(services.ServiceManager, 'was_ready') - @mock.patch.object(services.ServiceManager, 'is_ready') - def test_reconfigure_no_longer_ready(self, is_ready, was_ready, fire_event, save_lost): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - is_ready.return_value = False - was_ready.return_value = True - manager.reconfigure_services() - is_ready.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - fire_event.assert_has_calls([ - mock.call('data_lost', 'service1'), - mock.call('stop', 'service1', default=[ - services.close_ports, - services.service_stop]), - ], any_order=False) - fire_event.assert_has_calls([ - mock.call('data_lost', 'service2'), - mock.call('stop', 'service2', default=[ - services.close_ports, - services.service_stop]), - ], any_order=False) - save_lost.assert_has_calls([ - mock.call('service1'), - mock.call('service2'), - ], any_order=True) - - @mock.patch.object(services.ServiceManager, 'fire_event') - def test_stop_services(self, fire_event): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - manager.stop_services() - fire_event.assert_has_calls([ - mock.call('stop', 'service1', default=[ - services.close_ports, - services.service_stop]), - mock.call('stop', 'service2', default=[ - services.close_ports, - services.service_stop]), - ], any_order=True) - - @mock.patch.object(services.ServiceManager, 'fire_event') - def test_stop_services_list(self, fire_event): - manager = services.ServiceManager([ - {'service': 'service1'}, {'service': 'service2'}]) - manager.stop_services('service3', 'service4') - self.assertEqual(fire_event.call_args_list, [ - mock.call('stop', 'service3', default=[ - services.close_ports, - services.service_stop]), - mock.call('stop', 'service4', default=[ - services.close_ports, - services.service_stop]), - ]) - - def test_get_service(self): - service = {'service': 'test', 'test': 'test_service'} - manager = services.ServiceManager([service]) - self.assertEqual(manager.get_service('test'), service) - - def test_get_service_not_registered(self): - service = {'service': 'test', 'test': 'test_service'} - manager = services.ServiceManager([service]) - self.assertRaises(KeyError, manager.get_service, 'foo') - - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_default(self, get_service): - get_service.return_value = {} - cb = mock.Mock() - manager = services.ServiceManager() - manager.fire_event('event', 'service', cb) - cb.assert_called_once_with('service') - - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_default_list(self, get_service): - get_service.return_value = {} - cb = mock.Mock() - manager = services.ServiceManager() - manager.fire_event('event', 'service', [cb]) - cb.assert_called_once_with('service') - - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_simple_callback(self, get_service): - cb = mock.Mock() - dcb = mock.Mock() - get_service.return_value = {'event': cb} - manager = services.ServiceManager() - manager.fire_event('event', 'service', dcb) - assert not dcb.called - cb.assert_called_once_with('service') - - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_simple_callback_list(self, get_service): - cb = mock.Mock() - dcb = mock.Mock() - get_service.return_value = {'event': [cb]} - manager = services.ServiceManager() - manager.fire_event('event', 'service', dcb) - assert not dcb.called - cb.assert_called_once_with('service') - - @mock.patch.object(services.ManagerCallback, '__call__') - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_manager_callback(self, get_service, mcall): - cb = services.ManagerCallback() - dcb = mock.Mock() - get_service.return_value = {'event': cb} - manager = services.ServiceManager() - manager.fire_event('event', 'service', dcb) - assert not dcb.called - mcall.assert_called_once_with(manager, 'service', 'event') - - @mock.patch.object(services.ManagerCallback, '__call__') - @mock.patch.object(services.ServiceManager, 'get_service') - def test_fire_event_manager_callback_list(self, get_service, mcall): - cb = services.ManagerCallback() - dcb = mock.Mock() - get_service.return_value = {'event': [cb]} - manager = services.ServiceManager() - manager.fire_event('event', 'service', dcb) - assert not dcb.called - mcall.assert_called_once_with(manager, 'service', 'event') - - @mock.patch.object(services.ServiceManager, 'get_service') - def test_is_ready(self, get_service): - get_service.side_effect = [ - {}, - {'required_data': [True]}, - {'required_data': [False]}, - {'required_data': [True, False]}, - ] - manager = services.ServiceManager() - assert manager.is_ready('foo') - assert manager.is_ready('bar') - assert not manager.is_ready('foo') - assert not manager.is_ready('foo') - get_service.assert_has_calls([mock.call('foo'), mock.call('bar')]) - - def test_load_ready_file_short_circuit(self): - manager = services.ServiceManager() - manager._ready = 'foo' - manager._load_ready_file() - self.assertEqual(manager._ready, 'foo') - - @mock.patch('os.path.exists') - @mock.patch.object(services.base, 'open', create=True) - def test_load_ready_file_new(self, mopen, exists): - manager = services.ServiceManager() - exists.return_value = False - manager._load_ready_file() - self.assertEqual(manager._ready, set()) - assert not mopen.called - - @mock.patch('json.load') - @mock.patch('os.path.exists') - @mock.patch.object(services.base, 'open', create=True) - def test_load_ready_file(self, mopen, exists, jload): - manager = services.ServiceManager() - exists.return_value = True - jload.return_value = ['bar'] - manager._load_ready_file() - self.assertEqual(manager._ready, set(['bar'])) - exists.assert_called_once_with('charm_dir/READY-SERVICES.json') - mopen.assert_called_once_with('charm_dir/READY-SERVICES.json') - - @mock.patch('json.dump') - @mock.patch.object(services.base, 'open', create=True) - def test_save_ready_file(self, mopen, jdump): - manager = services.ServiceManager() - manager._save_ready_file() - assert not mopen.called - manager._ready = set(['foo']) - manager._save_ready_file() - mopen.assert_called_once_with('charm_dir/READY-SERVICES.json', 'w') - jdump.assert_called_once_with(['foo'], mopen.return_value.__enter__()) - - @mock.patch.object(services.base.ServiceManager, '_save_ready_file') - @mock.patch.object(services.base.ServiceManager, '_load_ready_file') - def test_save_ready(self, _lrf, _srf): - manager = services.ServiceManager() - manager._ready = set(['foo']) - manager.save_ready('bar') - _lrf.assert_called_once_with() - self.assertEqual(manager._ready, set(['foo', 'bar'])) - _srf.assert_called_once_with() - - @mock.patch.object(services.base.ServiceManager, '_save_ready_file') - @mock.patch.object(services.base.ServiceManager, '_load_ready_file') - def test_save_lost(self, _lrf, _srf): - manager = services.ServiceManager() - manager._ready = set(['foo', 'bar']) - manager.save_lost('bar') - _lrf.assert_called_once_with() - self.assertEqual(manager._ready, set(['foo'])) - _srf.assert_called_once_with() - manager.save_lost('bar') - self.assertEqual(manager._ready, set(['foo'])) - - @mock.patch.object(services.base.ServiceManager, '_save_ready_file') - @mock.patch.object(services.base.ServiceManager, '_load_ready_file') - def test_was_ready(self, _lrf, _srf): - manager = services.ServiceManager() - manager._ready = set() - manager.save_ready('foo') - manager.save_ready('bar') - assert manager.was_ready('foo') - assert manager.was_ready('bar') - manager.save_lost('bar') - assert manager.was_ready('foo') - assert not manager.was_ready('bar') - - @mock.patch.object(services.base.hookenv, 'relation_set') - @mock.patch.object(services.base.hookenv, 'hook_name') - def test_provide_data_no_match(self, hook_name, relation_set): - provider = mock.Mock() - provider.name = 'provided' - manager = services.ServiceManager([ - {'service': 'service', 'provided_data': [provider]} - ]) - hook_name.return_value = 'not-provided-relation-joined' - manager.provide_data() - assert not provider.provide_data.called - - hook_name.return_value = 'provided-relation-broken' - manager.provide_data() - assert not provider.provide_data.called - - @mock.patch.object(services.base.hookenv, 'relation_set') - @mock.patch.object(services.base.hookenv, 'hook_name') - def test_provide_data_not_ready(self, hook_name, relation_set): - provider = mock.Mock() - provider.name = 'provided' - data = provider.provide_data.return_value = {'data': True} - provider._is_ready.return_value = False - manager = services.ServiceManager([ - {'service': 'service', 'provided_data': [provider]} - ]) - hook_name.return_value = 'provided-relation-joined' - manager.provide_data() - assert not relation_set.called - provider._is_ready.assert_called_once_with(data) - - @mock.patch.object(services.base.hookenv, 'relation_set') - @mock.patch.object(services.base.hookenv, 'hook_name') - def test_provide_data_ready(self, hook_name, relation_set): - provider = mock.Mock() - provider.name = 'provided' - data = provider.provide_data.return_value = {'data': True} - provider._is_ready.return_value = True - manager = services.ServiceManager([ - {'service': 'service', 'provided_data': [provider]} - ]) - hook_name.return_value = 'provided-relation-changed' - manager.provide_data() - relation_set.assert_called_once_with(None, data) - - -class TestRelationContext(unittest.TestCase): - def setUp(self): - self.phookenv = mock.patch.object(services.helpers, 'hookenv') - self.mhookenv = self.phookenv.start() - self.mhookenv.relation_ids.return_value = [] - self.context = services.RelationContext() - self.context.name = 'http' - self.context.interface = 'http' - self.context.required_keys = ['foo', 'bar'] - self.mhookenv.reset_mock() - - def tearDown(self): - self.phookenv.stop() - - def test_no_relations(self): - self.context.get_data() - self.assertFalse(self.context.is_ready()) - self.assertEqual(self.context, {}) - self.mhookenv.relation_ids.assert_called_once_with('http') - - def test_no_units(self): - self.mhookenv.relation_ids.return_value = ['nginx'] - self.mhookenv.related_units.return_value = [] - self.context.get_data() - self.assertFalse(self.context.is_ready()) - self.assertEqual(self.context, {'http': []}) - - def test_incomplete(self): - self.mhookenv.relation_ids.return_value = ['nginx', 'apache'] - self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] - self.mhookenv.relation_get.side_effect = [{}, {'foo': '1'}] - self.context.get_data() - self.assertFalse(bool(self.context)) - self.assertEqual(self.mhookenv.relation_get.call_args_list, [ - mock.call(rid='apache', unit='apache/0'), - mock.call(rid='nginx', unit='nginx/0'), - ]) - - def test_complete(self): - self.mhookenv.relation_ids.return_value = ['nginx', 'apache', 'tomcat'] - self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] - self.mhookenv.relation_get.side_effect = [{'foo': '1'}, {'foo': '2', 'bar': '3'}, {}] - self.context.get_data() - self.assertTrue(self.context.is_ready()) - self.assertEqual(self.context, {'http': [ - { - 'foo': '2', - 'bar': '3', - }, - ]}) - self.mhookenv.relation_ids.assert_called_with('http') - self.assertEqual(self.mhookenv.relation_get.call_args_list, [ - mock.call(rid='apache', unit='apache/0'), - mock.call(rid='nginx', unit='nginx/0'), - mock.call(rid='tomcat', unit='tomcat/0'), - ]) - - def test_provide(self): - self.assertEqual(self.context.provide_data(), {}) - - -class TestHttpRelation(unittest.TestCase): - def setUp(self): - self.phookenv = mock.patch.object(services.helpers, 'hookenv') - self.mhookenv = self.phookenv.start() - - self.context = services.helpers.HttpRelation() - - def tearDown(self): - self.phookenv.stop() - - def test_provide_data(self): - self.mhookenv.unit_get.return_value = "127.0.0.1" - self.assertEqual(self.context.provide_data(), { - 'host': "127.0.0.1", - 'port': 80, - }) - - def test_complete(self): - self.mhookenv.relation_ids.return_value = ['website'] - self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] - self.mhookenv.relation_get.side_effect = [{'host': '127.0.0.2', - 'port': 8080}] - self.context.get_data() - self.assertTrue(self.context.is_ready()) - self.assertEqual(self.context, {'website': [ - { - 'host': '127.0.0.2', - 'port': 8080, - }, - ]}) - - self.mhookenv.relation_ids.assert_called_with('website') - self.assertEqual(self.mhookenv.relation_get.call_args_list, [ - mock.call(rid='website', unit='website/0'), - ]) - - -class TestMysqlRelation(unittest.TestCase): - - def setUp(self): - self.phookenv = mock.patch.object(services.helpers, 'hookenv') - self.mhookenv = self.phookenv.start() - - self.context = services.helpers.MysqlRelation() - - def tearDown(self): - self.phookenv.stop() - - def test_complete(self): - self.mhookenv.relation_ids.return_value = ['db'] - self.mhookenv.related_units.side_effect = lambda i: [i + '/0'] - self.mhookenv.relation_get.side_effect = [{'host': '127.0.0.2', - 'user': 'mysql', - 'password': 'mysql', - 'database': 'mysql', - }] - self.context.get_data() - self.assertTrue(self.context.is_ready()) - self.assertEqual(self.context, {'db': [ - { - 'host': '127.0.0.2', - 'user': 'mysql', - 'password': 'mysql', - 'database': 'mysql', - }, - ]}) - - self.mhookenv.relation_ids.assert_called_with('db') - self.assertEqual(self.mhookenv.relation_get.call_args_list, [ - mock.call(rid='db', unit='db/0'), - ]) - - -class TestRequiredConfig(unittest.TestCase): - def setUp(self): - self.options = { - 'options': { - 'option1': { - 'type': 'string', - 'description': 'First option', - }, - 'option2': { - 'type': 'int', - 'default': 0, - 'description': 'Second option', - }, - }, - } - self.config = { - 'option1': None, - 'option2': 0, - } - self._pyaml = mock.patch.object(services.helpers, 'yaml') - self.myaml = self._pyaml.start() - self.myaml.load.side_effect = lambda fp: self.options - self._pconfig = mock.patch.object(hookenv, 'config') - self.mconfig = self._pconfig.start() - self.mconfig.side_effect = lambda: self.config - self._pcharm_dir = mock.patch.object(hookenv, 'charm_dir') - self.mcharm_dir = self._pcharm_dir.start() - self.mcharm_dir.return_value = 'charm_dir' - - def tearDown(self): - self._pyaml.stop() - self._pconfig.stop() - self._pcharm_dir.stop() - - def test_none_changed(self): - with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): - context = services.helpers.RequiredConfig('option1', 'option2') - self.assertFalse(bool(context)) - self.assertEqual(context['config']['option1'], None) - self.assertEqual(context['config']['option2'], 0) - - def test_partial(self): - self.config['option1'] = 'value' - with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): - context = services.helpers.RequiredConfig('option1', 'option2') - self.assertFalse(bool(context)) - self.assertEqual(context['config']['option1'], 'value') - self.assertEqual(context['config']['option2'], 0) - - def test_ready(self): - self.config['option1'] = 'value' - self.config['option2'] = 1 - with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): - context = services.helpers.RequiredConfig('option1', 'option2') - self.assertTrue(bool(context)) - self.assertEqual(context['config']['option1'], 'value') - self.assertEqual(context['config']['option2'], 1) - - def test_none_empty(self): - self.config['option1'] = '' - self.config['option2'] = 1 - with mock.patch.object(services.helpers, 'open', mock.mock_open(), create=True): - context = services.helpers.RequiredConfig('option1', 'option2') - self.assertFalse(bool(context)) - self.assertEqual(context['config']['option1'], '') - self.assertEqual(context['config']['option2'], 1) - - -class TestStoredContext(unittest.TestCase): - @mock.patch.object(services.helpers.StoredContext, 'read_context') - @mock.patch.object(services.helpers.StoredContext, 'store_context') - @mock.patch('os.path.exists') - def test_new(self, exists, store_context, read_context): - exists.return_value = False - context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) - assert not read_context.called - store_context.assert_called_once_with('foo.yaml', {'key': 'val'}) - self.assertEqual(context, {'key': 'val'}) - - @mock.patch.object(services.helpers.StoredContext, 'read_context') - @mock.patch.object(services.helpers.StoredContext, 'store_context') - @mock.patch('os.path.exists') - def test_existing(self, exists, store_context, read_context): - exists.return_value = True - read_context.return_value = {'key': 'other'} - context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) - read_context.assert_called_once_with('foo.yaml') - assert not store_context.called - self.assertEqual(context, {'key': 'other'}) - - @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') - @mock.patch.object(services.helpers.StoredContext, 'read_context') - @mock.patch.object(services.helpers, 'yaml') - @mock.patch('os.fchmod') - @mock.patch('os.path.exists') - def test_store_context(self, exists, fchmod, yaml, read_context): - exists.return_value = False - mopen = mock.mock_open() - with mock.patch.object(services.helpers, 'open', mopen, create=True): - services.helpers.StoredContext('foo.yaml', {'key': 'val'}) - mopen.assert_called_once_with('charm_dir/foo.yaml', 'w') - fchmod.assert_called_once_with(mopen.return_value.fileno(), 0o600) - yaml.dump.assert_called_once_with({'key': 'val'}, mopen.return_value) - - @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') - @mock.patch.object(services.helpers.StoredContext, 'read_context') - @mock.patch.object(services.helpers, 'yaml') - @mock.patch('os.fchmod') - @mock.patch('os.path.exists') - def test_store_context_abs(self, exists, fchmod, yaml, read_context): - exists.return_value = False - mopen = mock.mock_open() - with mock.patch.object(services.helpers, 'open', mopen, create=True): - services.helpers.StoredContext('/foo.yaml', {'key': 'val'}) - mopen.assert_called_once_with('/foo.yaml', 'w') - - @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') - @mock.patch.object(services.helpers, 'yaml') - @mock.patch('os.path.exists') - def test_read_context(self, exists, yaml): - exists.return_value = True - yaml.load.return_value = {'key': 'other'} - mopen = mock.mock_open() - with mock.patch.object(services.helpers, 'open', mopen, create=True): - context = services.helpers.StoredContext('foo.yaml', {'key': 'val'}) - mopen.assert_called_once_with('charm_dir/foo.yaml', 'r') - yaml.load.assert_called_once_with(mopen.return_value) - self.assertEqual(context, {'key': 'other'}) - - @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') - @mock.patch.object(services.helpers, 'yaml') - @mock.patch('os.path.exists') - def test_read_context_abs(self, exists, yaml): - exists.return_value = True - yaml.load.return_value = {'key': 'other'} - mopen = mock.mock_open() - with mock.patch.object(services.helpers, 'open', mopen, create=True): - context = services.helpers.StoredContext('/foo.yaml', {'key': 'val'}) - mopen.assert_called_once_with('/foo.yaml', 'r') - yaml.load.assert_called_once_with(mopen.return_value) - self.assertEqual(context, {'key': 'other'}) - - @mock.patch.object(hookenv, 'charm_dir', lambda: 'charm_dir') - @mock.patch.object(services.helpers, 'yaml') - @mock.patch('os.path.exists') - def test_read_context_empty(self, exists, yaml): - exists.return_value = True - yaml.load.return_value = None - mopen = mock.mock_open() - with mock.patch.object(services.helpers, 'open', mopen, create=True): - self.assertRaises(OSError, services.helpers.StoredContext, '/foo.yaml', {}) - - -class TestTemplateCallback(unittest.TestCase): - @mock.patch.object(services.helpers, 'templating') - def test_template_defaults(self, mtemplating): - manager = mock.Mock(**{'get_service.return_value': { - 'required_data': [{'foo': 'bar'}]}}) - self.assertRaises(TypeError, services.template, source='foo.yml') - callback = services.template(source='foo.yml', target='bar.yml') - assert isinstance(callback, services.ManagerCallback) - assert not mtemplating.render.called - callback(manager, 'test', 'event') - mtemplating.render.assert_called_once_with( - 'foo.yml', 'bar.yml', {'foo': 'bar'}, - 'root', 'root', 0o444) - - @mock.patch.object(services.helpers, 'templating') - def test_template_explicit(self, mtemplating): - manager = mock.Mock(**{'get_service.return_value': { - 'required_data': [{'foo': 'bar'}]}}) - callback = services.template( - source='foo.yml', target='bar.yml', - owner='user', group='group', perms=0o555 - ) - assert isinstance(callback, services.ManagerCallback) - assert not mtemplating.render.called - callback(manager, 'test', 'event') - mtemplating.render.assert_called_once_with( - 'foo.yml', 'bar.yml', {'foo': 'bar'}, - 'user', 'group', 0o555) - - -class TestPortsCallback(unittest.TestCase): - def setUp(self): - self.phookenv = mock.patch.object(services.base, 'hookenv') - self.mhookenv = self.phookenv.start() - self.mhookenv.relation_ids.return_value = [] - self.mhookenv.charm_dir.return_value = 'charm_dir' - self.popen = mock.patch.object(services.base, 'open', create=True) - self.mopen = self.popen.start() - - def tearDown(self): - self.phookenv.stop() - self.popen.stop() - - def test_no_ports(self): - manager = mock.Mock(**{'get_service.return_value': {}}) - services.PortManagerCallback()(manager, 'service', 'event') - assert not self.mhookenv.open_port.called - assert not self.mhookenv.close_port.called - - def test_open_ports(self): - manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) - services.open_ports(manager, 'service', 'start') - self.mhookenv.open_port.has_calls([mock.call(1), mock.call(2)]) - assert not self.mhookenv.close_port.called - - def test_close_ports(self): - manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) - services.close_ports(manager, 'service', 'stop') - assert not self.mhookenv.open_port.called - self.mhookenv.close_port.has_calls([mock.call(1), mock.call(2)]) - - def test_close_old_ports(self): - self.mopen.return_value.read.return_value = '10,20' - manager = mock.Mock(**{'get_service.return_value': {'ports': [1, 2]}}) - services.close_ports(manager, 'service', 'stop') - assert not self.mhookenv.open_port.called - self.mhookenv.close_port.has_calls([ - mock.call(10), - mock.call(20), - mock.call(1), - mock.call(2)]) - -if __name__ == '__main__': - unittest.main() === modified file 'tests/fetch/test_giturl.py' --- tests/fetch/test_giturl.py 2014-12-02 18:04:16 +0000 +++ tests/fetch/test_giturl.py 2015-12-08 19:13:38 +0000 @@ -50,16 +50,26 @@ self.assertNotEqual(result, True, url) @unittest.skipIf(six.PY3, 'git does not support Python 3') - @patch('git.Repo.clone_from') - def test_branch(self, _clone_from): + @patch.object(giturl, 'Repo') + def test_branch(self, Repo): dest_path = "/destination/path" branch = "master" for url in self.valid_urls: self.fh.remote_branch = MagicMock() self.fh.load_plugins = MagicMock() - self.fh.clone(url, dest_path, branch) - - _clone_from.assert_called_with(url, dest_path) + repo = MagicMock() + Repo.side_effect = [giturl.InvalidGitRepositoryError, repo] + Repo.clone_from.return_value = repo + + self.fh.clone(url, dest_path, branch) + Repo.clone_from.assert_called_once_with(url, dest_path) + repo.git.checkout.assert_called_once_with(branch) + + Repo.clone_from.reset_mock() + repo.git.checkout.reset_mock() + self.fh.clone(url, dest_path, branch) + assert not Repo.clone_from.called + repo.git.checkout.assert_called_once_with(branch) for url in self.invalid_urls: with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
-- Mailing list: https://launchpad.net/~bigdata-dev Post to : [email protected] Unsubscribe : https://launchpad.net/~bigdata-dev More help : https://help.launchpad.net/ListHelp

