Dan Watkins has proposed merging ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
Requested reviews: Server Team CI bot (server-team-bot): continuous-integration cloud-init commiters (cloud-init-dev) Related bugs: Bug #1801364 in cloud-init: "persisting OpenStack metadata fails" https://bugs.launchpad.net/cloud-init/+bug/1801364 Bug #1815109 in cloud-init: "cloud-final.service: "cloud-init modules --mode final" exit with "KeyError: 'modules-init'" after upgrade to version 18.2" https://bugs.launchpad.net/cloud-init/+bug/1815109 Bug #1823084 in cloud-init: "DataSourceAzure doesn't rebuild network-config after reboot" https://bugs.launchpad.net/cloud-init/+bug/1823084 For more details, see: https://code.launchpad.net/~daniel-thewatkins/cloud-init/+git/cloud-init/+merge/365803 -- Your team cloud-init commiters is requested to review the proposed merge of ~daniel-thewatkins/cloud-init/+git/cloud-init:ubuntu/devel into cloud-init:ubuntu/devel.
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 933c019..a5446da 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): 'start': None, 'finished': None, } + if status is None: status = {'v1': {}} - for m in modes: - status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None - elif mode not in status['v1']: - status['v1'][mode] = nullstatus.copy() + + for m in modes: + if m not in status['v1']: + status['v1'][m] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 5e082bd..f488123 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -1,150 +1,143 @@ -# Copyright (C) 2018 Canonical Ltd. -# # This file is part of cloud-init. See LICENSE file for license information. -"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" +"""ubuntu_advantage: Configure Ubuntu Advantage support services""" -import sys from textwrap import dedent -from cloudinit import log as logging +import six + from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE -from cloudinit.subp import prepend_base_command from cloudinit import util -distros = ['ubuntu'] -frequency = PER_INSTANCE +UA_URL = 'https://ubuntu.com/advantage' -LOG = logging.getLogger(__name__) +distros = ['ubuntu'] schema = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', - 'title': 'Install, configure and manage ubuntu-advantage offerings', + 'title': 'Configure Ubuntu Advantage support services', 'description': dedent("""\ - This module provides configuration options to setup ubuntu-advantage - subscriptions. - - .. note:: - Both ``commands`` value can be either a dictionary or a list. If - the configuration provided is a dictionary, the keys are only used - to order the execution of the commands and the dictionary is - merged with any vendor-data ubuntu-advantage configuration - provided. If a ``commands`` is provided as a list, any vendor-data - ubuntu-advantage ``commands`` are ignored. - - Ubuntu-advantage ``commands`` is a dictionary or list of - ubuntu-advantage commands to run on the deployed machine. - These commands can be used to enable or disable subscriptions to - various ubuntu-advantage products. See 'man ubuntu-advantage' for more - information on supported subcommands. - - .. note:: - Each command item can be a string or list. If the item is a list, - 'ubuntu-advantage' can be omitted and it will automatically be - inserted as part of the command. + Attach machine to an existing Ubuntu Advantage support contract and + enable or disable support services such as Livepatch, ESM, + FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage, + one can also specify services to enable. When the 'enable' + list is present, any named service will be enabled and all absent + services will remain disabled. + + Note that when enabling FIPS or FIPS updates you will need to schedule + a reboot to ensure the machine is running the FIPS-compliant kernel. + See :ref:`Power State Change` for information on how to configure + cloud-init to perform this reboot. """), 'distros': distros, 'examples': [dedent("""\ - # Enable Extended Security Maintenance using your service auth token + # Attach the machine to a Ubuntu Advantage support contract with a + # UA contract token obtained from %s. + ubuntu_advantage: + token: <ua_contract_token> + """ % UA_URL), dedent("""\ + # Attach the machine to an Ubuntu Advantage support contract enabling + # only fips and esm services. Services will only be enabled if + # the environment supports said service. Otherwise warnings will + # be logged for incompatible services specified. ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-esm <token> + token: <ua_contract_token> + enable: + - fips + - esm """), dedent("""\ - # Enable livepatch by providing your livepatch token + # Attach the machine to an Ubuntu Advantage support contract and enable + # the FIPS service. Perform a reboot once cloud-init has + # completed. + power_state: + mode: reboot ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-livepatch <livepatch-token> - - """), dedent("""\ - # Convenience: the ubuntu-advantage command can be omitted when - # specifying commands as a list and 'ubuntu-advantage' will - # automatically be prepended. - # The following commands are equivalent - ubuntu-advantage: - commands: - 00: ['enable-livepatch', 'my-token'] - 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] - 02: ubuntu-advantage enable-livepatch my-token - 03: 'ubuntu-advantage enable-livepatch my-token' - """)], + token: <ua_contract_token> + enable: + - fips + """)], 'frequency': PER_INSTANCE, 'type': 'object', 'properties': { - 'ubuntu-advantage': { + 'ubuntu_advantage': { 'type': 'object', 'properties': { - 'commands': { - 'type': ['object', 'array'], # Array of strings or dict - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] - }, - 'additionalItems': False, # Reject non-string & non-list - 'minItems': 1, - 'minProperties': 1, + 'enable': { + 'type': 'array', + 'items': {'type': 'string'}, + }, + 'token': { + 'type': 'string', + 'description': ( + 'A contract token obtained from %s.' % UA_URL) } }, - 'additionalProperties': False, # Reject keys not in schema - 'required': ['commands'] + 'required': ['token'], + 'additionalProperties': False } } } -# TODO schema for 'assertions' and 'commands' are too permissive at the moment. -# Once python-jsonschema supports schema draft 6 add support for arbitrary -# object keys with 'patternProperties' constraint to validate string values. - __doc__ = get_schema_doc(schema) # Supplement python help() -UA_CMD = "ubuntu-advantage" - - -def run_commands(commands): - """Run the commands provided in ubuntu-advantage:commands config. +LOG = logging.getLogger(__name__) - Commands are run individually. Any errors are collected and reported - after attempting all commands. - @param commands: A list or dict containing commands to run. Keys of a - dict will be used to order the commands provided as dict values. - """ - if not commands: - return - LOG.debug('Running user-provided ubuntu-advantage commands') - if isinstance(commands, dict): - # Sort commands based on dictionary key - commands = [v for _, v in sorted(commands.items())] - elif not isinstance(commands, list): - raise TypeError( - 'commands parameter was not a list or dict: {commands}'.format( - commands=commands)) - - fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) - - cmd_failures = [] - for command in fixed_ua_commands: - shell = isinstance(command, str) - try: - util.subp(command, shell=shell, status_cb=sys.stderr.write) - except util.ProcessExecutionError as e: - cmd_failures.append(str(e)) - if cmd_failures: - msg = ( - 'Failures running ubuntu-advantage commands:\n' - '{cmd_failures}'.format( - cmd_failures=cmd_failures)) +def configure_ua(token=None, enable=None): + """Call ua commandline client to attach or enable services.""" + error = None + if not token: + error = ('ubuntu_advantage: token must be provided') + LOG.error(error) + raise RuntimeError(error) + + if enable is None: + enable = [] + elif isinstance(enable, six.string_types): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a string; treating as a single enable') + enable = [enable] + elif not isinstance(enable, list): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a %s; skipping enabling services', + type(enable).__name__) + enable = [] + + attach_cmd = ['ua', 'attach', token] + LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) + try: + util.subp(attach_cmd) + except util.ProcessExecutionError as e: + msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( + error=str(e)) util.logexc(LOG, msg) raise RuntimeError(msg) + enable_errors = [] + for service in enable: + try: + cmd = ['ua', 'enable', service] + util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + enable_errors.append((service, e)) + if enable_errors: + for service, error in enable_errors: + msg = 'Failure enabling "{service}":\n{error}'.format( + service=service, error=str(error)) + util.logexc(LOG, msg) + raise RuntimeError( + 'Failure enabling Ubuntu Advantage service(s): {}'.format( + ', '.join('"{}"'.format(service) + for service, _ in enable_errors))) def maybe_install_ua_tools(cloud): """Install ubuntu-advantage-tools if not present.""" - if util.which('ubuntu-advantage'): + if util.which('ua'): return try: cloud.distro.update_package_sources() @@ -159,14 +152,28 @@ def maybe_install_ua_tools(cloud): def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('ubuntu-advantage') - if cfgin is None: - LOG.debug(("Skipping module named %s," - " no 'ubuntu-advantage' key in configuration"), name) + ua_section = None + if 'ubuntu-advantage' in cfg: + LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.' + ' Expected underscore delimited "ubuntu_advantage"; will' + ' attempt to continue.') + ua_section = cfg['ubuntu-advantage'] + if 'ubuntu_advantage' in cfg: + ua_section = cfg['ubuntu_advantage'] + if ua_section is None: + LOG.debug("Skipping module named %s," + " no 'ubuntu_advantage' configuration found", name) return - validate_cloudconfig_schema(cfg, schema) + if 'commands' in ua_section: + msg = ( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"') + LOG.error(msg) + raise RuntimeError(msg) + maybe_install_ua_tools(cloud) - run_commands(cfgin.get('commands', [])) + configure_ua(token=ua_section.get('token'), + enable=ua_section.get('enable')) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py new file mode 100644 index 0000000..91feb60 --- /dev/null +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -0,0 +1,112 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Ubuntu Drivers: Interact with third party drivers in Ubuntu.""" + +from textwrap import dedent + +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import type_utils +from cloudinit import util + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +distros = ['ubuntu'] +schema = { + 'id': 'cc_ubuntu_drivers', + 'name': 'Ubuntu Drivers', + 'title': 'Interact with third party drivers in Ubuntu.', + 'description': dedent("""\ + This module interacts with the 'ubuntu-drivers' command to install + third party driver packages."""), + 'distros': distros, + 'examples': [dedent("""\ + drivers: + nvidia: + license-accepted: true + """)], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'drivers': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'nvidia': { + 'type': 'object', + 'additionalProperties': False, + 'required': ['license-accepted'], + 'properties': { + 'license-accepted': { + 'type': 'boolean', + 'description': ("Do you accept the NVIDIA driver" + " license?"), + }, + 'version': { + 'type': 'string', + 'description': ( + 'The version of the driver to install (e.g.' + ' "390", "410"). Defaults to the latest' + ' version.'), + }, + }, + }, + }, + }, + }, +} +OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( + "ubuntu-drivers: error: argument <command>: invalid choice: 'install'") + +__doc__ = get_schema_doc(schema) # Supplement python help() + + +def install_drivers(cfg, pkg_install_func): + if not isinstance(cfg, dict): + raise TypeError( + "'drivers' config expected dict, found '%s': %s" % + (type_utils.obj_name(cfg), cfg)) + + cfgpath = 'nvidia/license-accepted' + # Call translate_bool to ensure that we treat string values like "yes" as + # acceptance and _don't_ treat string values like "nah" as acceptance + # because they're True-ish + nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) + if not nv_acc: + LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) + return + + if not util.which('ubuntu-drivers'): + LOG.debug("'ubuntu-drivers' command not available. " + "Installing ubuntu-drivers-common") + pkg_install_func(['ubuntu-drivers-common']) + + driver_arg = 'nvidia' + version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version') + if version_cfg: + driver_arg += ':{}'.format(version_cfg) + + LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)", + cfgpath, nv_acc, version_cfg if version_cfg else 'latest') + + try: + util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) + except util.ProcessExecutionError as exc: + if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: + LOG.warning('the available version of ubuntu-drivers is' + ' too old to perform requested driver installation') + elif 'No drivers found for installation.' in exc.stdout: + LOG.warning('ubuntu-drivers found no drivers for installation') + raise + + +def handle(name, cfg, cloud, log, _args): + if "drivers" not in cfg: + log.debug("Skipping module named %s, no 'drivers' key in config", name) + return + + validate_cloudconfig_schema(cfg, schema) + install_drivers(cfg['drivers'], cloud.distro.install_packages) diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index b7cf9be..8c4161e 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -1,10 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import re -from six import StringIO - from cloudinit.config.cc_ubuntu_advantage import ( - handle, maybe_install_ua_tools, run_commands, schema) + configure_ua, handle, maybe_install_ua_tools, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( @@ -20,90 +17,120 @@ class FakeCloud(object): self.distro = distro -class TestRunCommands(CiTestCase): +class TestConfigureUA(CiTestCase): with_logs = True allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): - super(TestRunCommands, self).setUp() + super(TestConfigureUA, self).setUp() self.tmp = self.tmp_dir() @mock.patch('%s.util.subp' % MPATH) - def test_run_commands_on_empty_list(self, m_subp): - """When provided with an empty list, run_commands does nothing.""" - run_commands([]) - self.assertEqual('', self.logs.getvalue()) - m_subp.assert_not_called() - - def test_run_commands_on_non_list_or_dict(self): - """When provided an invalid type, run_commands raises an error.""" - with self.assertRaises(TypeError) as context_manager: - run_commands(commands="I'm Not Valid") + def test_configure_ua_attach_error(self, m_subp): + """Errors from ua attach command are raised.""" + m_subp.side_effect = util.ProcessExecutionError( + 'Invalid token SomeToken') + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken') self.assertEqual( - "commands parameter was not a list or dict: I'm Not Valid", + 'Failure attaching Ubuntu Advantage:\nUnexpected error while' + ' running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid token SomeToken\nStderr: -', str(context_manager.exception)) - def test_run_command_logs_commands_and_exit_codes_to_stderr(self): - """All exit codes are logged to stderr.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'bogus command' - cmd3 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2, cmd3] - - mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO) as m_stderr: - with self.assertRaises(RuntimeError) as context_manager: - run_commands(commands=commands) - - self.assertIsNotNone( - re.search(r'bogus: (command )?not found', - str(context_manager.exception)), - msg='Expected bogus command not found') - expected_stderr_log = '\n'.join([ - 'Begin run command: {cmd}'.format(cmd=cmd1), - 'End run command: exit(0)', - 'Begin run command: {cmd}'.format(cmd=cmd2), - 'ERROR: End run command: exit(127)', - 'Begin run command: {cmd}'.format(cmd=cmd3), - 'End run command: exit(0)\n']) - self.assertEqual(expected_stderr_log, m_stderr.getvalue()) - - def test_run_command_as_lists(self): - """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2] - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_token(self, m_subp): + """When token is provided, attach the machine to ua using the token.""" + configure_ua(token='SomeToken') + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_on_service_error(self, m_subp): + """all services should be enabled and then any failures raised""" + def fake_subp(cmd, capture=None): + fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + if cmd in fail_cmds and capture: + svc = cmd[-1] + raise util.ProcessExecutionError( + 'Invalid {} credentials'.format(svc.upper())) + + m_subp.side_effect = fake_subp + + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'esm'], capture=True), + mock.call(['ua', 'enable', 'cc'], capture=True), + mock.call(['ua', 'enable', 'fips'], capture=True)]) self.assertIn( - 'DEBUG: Running user-provided ubuntu-advantage commands', + 'WARNING: Failure enabling "esm":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid ESM credentials\nStderr: -\n', self.logs.getvalue()) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) self.assertIn( - 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' - ' config:', + 'WARNING: Failure enabling "cc":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid CC credentials\nStderr: -\n', + self.logs.getvalue()) + self.assertEqual( + 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', + str(context_manager.exception)) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_empty_services(self, m_subp): + """When services is an empty list, do not auto-enable attach.""" + configure_ua(token='SomeToken', enable=[]) + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - def test_run_command_dict_sorted_as_command_script(self): - """When commands are a dict, sort them and run.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = {'02': cmd1, '01': cmd2} - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_specific_services(self, m_subp): + """When services a list, only enable specific services.""" + configure_ua(token='SomeToken', enable=['fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_string_services(self, m_subp): + """When services a string, treat as singleton list and warn""" + configure_ua(token='SomeToken', enable='fips') + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' string; treating as a single enable\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) - expected_messages = [ - 'DEBUG: Running user-provided ubuntu-advantage commands'] - for message in expected_messages: - self.assertIn(message, self.logs.getvalue()) - self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_weird_services(self, m_subp): + """When services not string or list, warn but still attach""" + configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken'])]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' dict; skipping enabling services\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) @skipUnlessJsonSchema() @@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True schema = schema - def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): - """If ubuntu-advantage configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): + """If ubuntu_advantage configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" + "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" " of type 'object'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_disallows_unknown_keys(self, _): - """Unknown keys in ubuntu-advantage configuration emit warnings.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_disallows_unknown_keys(self, _cfg, _): + """Unknown keys in ubuntu_advantage configuration emit warnings.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, + {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, schema) self.assertIn( - 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' + 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' " are not allowed ('invalid-key' was unexpected)", self.logs.getvalue()) - def test_warn_schema_requires_commands(self): - """Warn when ubuntu-advantage configuration lacks commands.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" - " required property\n", - self.logs.getvalue()) - - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_commands_is_not_list_or_dict(self, _): - """Warn when ubuntu-advantage:commands config is not a list or dict.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_requires_token(self, _cfg, _): + """Warn if ubuntu_advantage configuration lacks token.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': 'broken'}}, schema) + {'ubuntu_advantage': {'enable': ['esm']}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" - " not of type 'object', 'array'\n", - self.logs.getvalue()) + "WARNING: Invalid config:\nubuntu_advantage:" + " 'token' is a required property\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_when_commands_is_empty(self, _): - """Emit warnings when ubuntu-advantage:commands is empty.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': []}}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): + """Warn when ubuntu_advantage:enable config is not a list.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {}}}, schema) + {'ubuntu_advantage': {'enable': 'needslist'}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" - " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" - " does not have enough properties\n", + "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" + " required property\nubuntu_advantage.enable: 'needslist'" + " is not of type 'array'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_when_commands_are_list_or_dict(self, _): - """No warnings when ubuntu-advantage:commands are a list or dict.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['valid']}}, schema) - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo" "bye"]]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - {'commands': ["echo bye", "echo bye"]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_array(self): - """Duplicated commands dict/array entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_string(self): - """Duplicated commands dict/string entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': "echo bye", '01': "echo bye"}}, - "command entries can be duplicate.") - class TestHandle(CiTestCase): @@ -205,41 +192,89 @@ class TestHandle(CiTestCase): super(TestHandle, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.run_commands' % MPATH) @mock.patch('%s.validate_cloudconfig_schema' % MPATH) - def test_handle_no_config(self, m_schema, m_run): + def test_handle_no_config(self, m_schema): """When no ua-related configuration is provided, nothing happens.""" cfg = {} handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) self.assertIn( - "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" - " in config", + "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" + ' configuration found', self.logs.getvalue()) m_schema.assert_not_called() - m_run.assert_not_called() + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): + def test_handle_tries_to_install_ubuntu_advantage_tools( + self, m_install, m_cfg): """If ubuntu_advantage is provided, try installing ua-tools package.""" - cfg = {'ubuntu-advantage': {}} + cfg = {'ubuntu_advantage': {'token': 'valid'}} mycloud = FakeCloud(None) handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) m_install.assert_called_once_with(mycloud) + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_runs_commands_provided(self, m_install): - """When commands are specified as a list, run them.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + def test_handle_passes_credentials_and_services_to_configure_ua( + self, m_install, m_configure_ua): + """All ubuntu_advantage config keys are passed to configure_ua.""" + cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( + self, m_configure_ua): + """Warning when ubuntu-advantage key is present with new config""" + cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + def test_handle_error_on_deprecated_commands_key_dashed(self): + """Error when commands is present in ubuntu-advantage key.""" + cfg = {'ubuntu-advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + + def test_handle_error_on_deprecated_commands_key_underscored(self): + """Error when commands is present in ubuntu_advantage key.""" + cfg = {'ubuntu_advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_prefers_new_style_config( + self, m_configure_ua): + """ubuntu_advantage should be preferred over ubuntu-advantage""" cfg = { - 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, - 'echo "MOM" >> %s' % outfile]}} - mock_path = '%s.sys.stderr' % MPATH - with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, - args=None) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, + 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, + } + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) class TestMaybeInstallUATools(CiTestCase): @@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase): @mock.patch('%s.util.which' % MPATH) def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): """Do nothing if ubuntu-advantage-tools already exists.""" - m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed + m_which.return_value = '/usr/bin/ua' # already installed distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( 'Some apt error') diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py new file mode 100644 index 0000000..efba4ce --- /dev/null +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy + +from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.util import ProcessExecutionError + +MPATH = "cloudinit.config.cc_ubuntu_drivers." +OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( + "ubuntu-drivers: error: argument <command>: invalid choice: 'install' " + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + + +class TestUbuntuDrivers(CiTestCase): + cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + + with_logs = True + + @skipUnlessJsonSchema() + def test_schema_requires_boolean_for_license_accepted(self): + with self.assertRaisesRegex( + SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + validate_cloudconfig_schema( + {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, + schema=drivers.schema, strict=True) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_happy_path_taken(self, config, m_which, m_subp): + """Positive path test through handle. Package should be installed.""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_handle_does_package_install(self): + self._assert_happy_path_taken(self.cfg_accepted) + + def test_trueish_strings_are_considered_approval(self): + for true_value in ['yes', 'true', 'on', '1']: + new_config = copy.deepcopy(self.cfg_accepted) + new_config['drivers']['nvidia']['license-accepted'] = true_value + self._assert_happy_path_taken(new_config) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stdout='No drivers found for installation.\n', exit_code=1)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp): + """If ubuntu-drivers doesn't install any drivers, raise an error.""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('ubuntu-drivers found no drivers for installation', + self.logs.getvalue()) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_inert_with_config(self, config, m_which, m_subp): + """Helper to reduce repetition when testing negative cases""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual(0, myCloud.distro.install_packages.call_count) + self.assertEqual(0, m_subp.call_count) + + def test_handle_inert_if_license_not_accepted(self): + """Ensure we don't do anything if the license is rejected.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': False}}}) + + def test_handle_inert_if_garbage_in_license_field(self): + """Ensure we don't do anything if unknown text is in license field.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + + def test_handle_inert_if_no_license_key(self): + """Ensure we don't do anything if no license key.""" + self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + + def test_handle_inert_if_no_nvidia_key(self): + """Ensure we don't do anything if other license accepted.""" + self._assert_inert_with_config( + {'drivers': {'acme': {'license-accepted': True}}}) + + def test_handle_inert_if_string_given(self): + """Ensure we don't do anything if string refusal given.""" + for false_value in ['no', 'false', 'off', '0']: + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': false_value}}}) + + @mock.patch(MPATH + "install_drivers") + def test_handle_no_drivers_does_nothing(self, m_install_drivers): + """If no 'drivers' key in the config, nothing should be done.""" + myCloud = mock.MagicMock() + myLog = mock.MagicMock() + drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) + self.assertIn('Skipping module named', + myLog.debug.call_args_list[0][0][0]) + self.assertEqual(0, m_install_drivers.call_count) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=True) + def test_install_drivers_no_install_if_present(self, m_which, m_subp): + """If 'ubuntu-drivers' is present, no package install should occur.""" + pkg_install = mock.MagicMock() + drivers.install_drivers(self.cfg_accepted['drivers'], + pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + self.assertEqual([mock.call('ubuntu-drivers')], + m_which.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_install_drivers_rejects_invalid_config(self): + """install_drivers should raise TypeError if not given a config dict""" + pkg_install = mock.MagicMock() + with self.assertRaisesRegex(TypeError, ".*expected dict.*"): + drivers.install_drivers("mystring", pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_install_drivers_handles_old_ubuntu_drivers_gracefully( + self, m_which, m_subp): + """Older ubuntu-drivers versions should emit message and raise error""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('WARNING: the available version of ubuntu-drivers is' + ' too old to perform requested driver installation', + self.logs.getvalue()) + + +# Sub-class TestUbuntuDrivers to run the same test cases, but with a version +class TestUbuntuDriversWithVersion(TestUbuntuDrivers): + cfg_accepted = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def test_version_none_uses_latest(self, m_which, m_subp): + myCloud = mock.MagicMock() + version_none_cfg = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} + drivers.handle( + 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + self.assertEqual( + [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], + m_subp.call_args_list) + + def test_specifying_a_version_doesnt_override_license_acceptance(self): + self._assert_inert_with_config({ + 'drivers': {'nvidia': {'license-accepted': False, + 'version': '123'}} + }) + +# vi: ts=4 expandtab diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 6423632..b129bb6 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -366,8 +366,6 @@ class Renderer(renderer.Renderer): down = indent + "pre-down route del" or_true = " || true" mapping = { - 'network': '-net', - 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } @@ -379,13 +377,21 @@ class Renderer(renderer.Renderer): default_gw = ' -A inet6 default' route_line = '' - for k in ['network', 'netmask', 'gateway', 'metric']: - if default_gw and k in ['network', 'netmask']: + for k in ['network', 'gateway', 'metric']: + if default_gw and k == 'network': continue if k == 'gateway': route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) elif k in route: - route_line += ' %s %s' % (mapping[k], route[k]) + if k == 'network': + if ':' in route[k]: + route_line += ' -A inet6' + else: + route_line += ' -net' + if 'prefix' in route: + route_line += ' %s/%s' % (route[k], route['prefix']) + else: + route_line += ' %s %s' % (mapping[k], route[k]) content.append(up + route_line + or_true) content.append(down + route_line + or_true) return content diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 539b76d..4d19f56 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -148,6 +148,7 @@ class NetworkState(object): self._network_state = copy.deepcopy(network_state) self._version = version self.use_ipv6 = network_state.get('use_ipv6', False) + self._has_default_route = None @property def config(self): @@ -157,14 +158,6 @@ class NetworkState(object): def version(self): return self._version - def iter_routes(self, filter_func=None): - for route in self._network_state.get('routes', []): - if filter_func is not None: - if filter_func(route): - yield route - else: - yield route - @property def dns_nameservers(self): try: @@ -179,6 +172,12 @@ class NetworkState(object): except KeyError: return [] + @property + def has_default_route(self): + if self._has_default_route is None: + self._has_default_route = self._maybe_has_default_route() + return self._has_default_route + def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) for iface in six.itervalues(ifaces): @@ -188,6 +187,32 @@ class NetworkState(object): if filter_func(iface): yield iface + def iter_routes(self, filter_func=None): + for route in self._network_state.get('routes', []): + if filter_func is not None: + if filter_func(route): + yield route + else: + yield route + + def _maybe_has_default_route(self): + for route in self.iter_routes(): + if self._is_default_route(route): + return True + for iface in self.iter_interfaces(): + for subnet in iface.get('subnets', []): + for route in subnet.get('routes', []): + if self._is_default_route(route): + return True + return False + + def _is_default_route(self, route): + default_nets = ('::', '0.0.0.0') + return ( + route.get('prefix') == 0 + and route.get('network') in default_nets + ) + @six.add_metaclass(CommandHandlerMeta) class NetworkStateInterpreter(object): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 19b3e60..0998392 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -322,7 +322,7 @@ class Renderer(renderer.Renderer): iface_cfg[new_key] = old_value @classmethod - def _render_subnets(cls, iface_cfg, subnets): + def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values iface_cfg['BOOTPROTO'] = 'none' @@ -331,6 +331,7 @@ class Renderer(renderer.Renderer): mtu_key = 'MTU' subnet_type = subnet.get('type') if subnet_type == 'dhcp6': + # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True iface_cfg['DHCPV6C'] = True elif subnet_type in ['dhcp4', 'dhcp']: @@ -375,9 +376,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - continue - elif subnet_type in ['dhcp4', 'dhcp']: + if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: + if has_default_route and iface_cfg['BOOTPROTO'] != 'none': + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): @@ -385,10 +386,13 @@ class Renderer(renderer.Renderer): ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) if ipv6_index == 0: iface_cfg['IPV6ADDR'] = ipv6_cidr + iface_cfg['IPADDR6'] = ipv6_cidr elif ipv6_index == 1: iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + iface_cfg['IPADDR6_0'] = ipv6_cidr else: iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr + iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) @@ -443,6 +447,8 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: if is_ipv6 or is_ipv6_addr(route['gateway']): iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] @@ -493,7 +499,9 @@ class Renderer(renderer.Renderer): iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -518,7 +526,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) # iter_interfaces on network-state is not sorted to produce @@ -547,7 +557,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @staticmethod @@ -608,7 +620,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -620,7 +634,9 @@ class Renderer(renderer.Renderer): iface_cfg.kind = 'infiniband' iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py old mode 100644 new mode 100755 index eccbee5..76b1661 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -21,10 +21,14 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric) LOG = logging.getLogger(__name__) @@ -54,6 +58,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +200,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -241,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -287,6 +295,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -312,6 +321,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -341,15 +351,18 @@ class DataSourceAzure(sources.DataSource): LOG.debug("ssh authentication: " "using fingerprint from fabirc") - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve ssh keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -363,6 +376,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -464,6 +478,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -510,6 +525,7 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -577,6 +593,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -614,9 +631,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -649,6 +671,7 @@ class DataSourceAzure(sources.DataSource): util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -665,7 +688,7 @@ class DataSourceAzure(sources.DataSource): 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - if not self._network_config: + if not self._network_config or self._network_config == sources.UNSET: if self.ds_cfg.get('apply_network_config'): nc_src = self._metadata_imds else: @@ -687,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -741,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -805,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -840,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -848,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -863,6 +907,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -890,6 +935,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -909,6 +955,7 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. @@ -961,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -1061,6 +1109,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1089,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1100,18 +1150,31 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +@azure_ds_telemetry_reporter +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1126,6 +1189,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1148,47 +1212,54 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1213,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): return util.log_time(**kwargs) +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" @@ -1232,6 +1304,7 @@ def _get_metadata_from_imds(retries): return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1269,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 4f2f6cc..ac28f1d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) - # RELEASE_BLOCKER: Xenial debian/postinst needs to add - # EventType.BOOT on upgrade path for classic. + + # RELEASE_BLOCKER: xenial should drop the below if statement, + # because the issue being addressed doesn't exist pre-netplan. + # (This datasource doesn't implement check_instance_id() so the + # datasource object is recreated every boot; this means we don't + # need to modify update_events on cloud-init upgrade.) # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0c..fcf5d58 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b38..54bfc1f 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + self.update_events = { + 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b3..1604932 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,9 +164,6 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). - # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} - # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -191,6 +188,9 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None + # Default: generate network config on new instance id (first boot). + self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} + self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py old mode 100644 new mode 100755 index 2829dd2..d3af05e --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,10 +16,27 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -119,6 +136,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -139,17 +157,20 @@ class OpenSSLManager(object): LOG.debug('New certificate generated.') @staticmethod + @azure_ds_telemetry_reporter def _run_x509_action(action, cert): cmd = ['openssl', 'x509', '-noout', action] result, _ = util.subp(cmd, data=cert) return result + @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): pub_key = self._run_x509_action('-pubkey', certificate) keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ssh_key, _ = util.subp(keygen_cmd, data=pub_key) return ssh_key + @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): """openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ @@ -163,6 +184,7 @@ class OpenSSLManager(object): octets = raw_fp[eq+1:-1].split(':') return ''.join(octets) + @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. @@ -185,6 +207,7 @@ class OpenSSLManager(object): shell=True, data=b'\n'.join(lines)) return out + @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of fingerprints and associated SSH keys derived from the certs.""" @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address + @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data(self, pubkey_info=None): if self.openssl_manager is None: self.openssl_manager = OpenSSLManager() @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): return keys + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') +@azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98..cb1912b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) + def test_data_sources_cant_mutate_update_events_for_others(self): + """update_events shouldn't be changed for other DSes (LP: #1819913)""" + + class ModifyingDS(DataSource): + + def __init__(self, sys_cfg, distro, paths): + # This mirrors what DataSourceAzure does which causes LP: + # #1819913 + DataSource.__init__(self, sys_cfg, distro, paths) + self.update_events['network'].add(EventType.BOOT) + + before_update_events = copy.deepcopy(self.datasource.update_events) + ModifyingDS(self.sys_cfg, self.distro, self.paths) + self.assertEqual(before_update_events, self.datasource.update_events) + class TestRedactSensitiveData(CiTestCase): diff --git a/cloudinit/util.py b/cloudinit/util.py index a192091..385f231 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None): # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): + """Return the value of the item at path C{keyp} in C{yobj}. + + example: + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None + + @param yobj: A dictionary. + @param keyp: A path inside yobj. it can be a '/' delimited string, + or an iterable. + @param default: The default to return if the path does not exist. + @return: The value of the item at keyp." + is not found.""" + + if isinstance(keyp, six.string_types): + keyp = keyp.split("/") cur = yobj for tok in keyp: if tok not in cur: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 7513176..25db43e 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -112,6 +112,9 @@ cloud_final_modules: - landscape - lxd {% endif %} +{% if variant in ["ubuntu", "unknown"] %} + - ubuntu-drivers +{% endif %} {% if variant not in ["freebsd"] %} - puppet - chef diff --git a/debian/changelog b/debian/changelog index ac376ab..f869278 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,32 @@ +cloud-init (18.5-61-gb76714c3-0ubuntu1) disco; urgency=medium + + * New upstream snapshot. + - Change DataSourceNoCloud to ignore file system label's case. + [Risto Oikarinen] + - cmd:main.py: Fix missing 'modules-init' key in modes dict + [Antonio Romito] (LP: #1815109) + - ubuntu_advantage: rewrite cloud-config module + - Azure: Treat _unset network configuration as if it were absent + [Jason Zions (MSFT)] (LP: #1823084) + - DatasourceAzure: add additional logging for azure datasource [Anh Vo] + - cloud_tests: fix apt_pipelining test-cases + - Azure: Ensure platform random_seed is always serializable as JSON. + [Jason Zions (MSFT)] + - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert] + - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold] + - net: Fix ipv6 static routes when using eni renderer + [Raphael Glon] (LP: #1818669) + - Add ubuntu_drivers config module + - doc: Refresh Azure walinuxagent docs + - tox: bump pylint version to latest (2.3.1) + - DataSource: move update_events from a class to an instance attribute + (LP: #1819913) + - net/sysconfig: Handle default route setup for dhcp configured NICs + [Robert Schweikert] (LP: #1812117) + - DataSourceEc2: update RELEASE_BLOCKER to be more accurate + + -- Daniel Watkins <oddbl...@ubuntu.com> Wed, 10 Apr 2019 11:49:03 -0400 + cloud-init (18.5-45-g3554ffe8-0ubuntu1) disco; urgency=medium * New upstream snapshot. diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index 720a475..b41cddd 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -5,9 +5,30 @@ Azure This datasource finds metadata and user-data from the Azure cloud platform. -Azure Platform --------------- -The azure cloud-platform provides initial data to an instance via an attached +walinuxagent +------------ +walinuxagent has several functions within images. For cloud-init +specifically, the relevant functionality it performs is to register the +instance with the Azure cloud platform at boot so networking will be +permitted. For more information about the other functionality of +walinuxagent, see `Azure's documentation +<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details. +(Note, however, that only one of walinuxagent's provisioning and cloud-init +should be used to perform instance customisation.) + +If you are configuring walinuxagent yourself, you will want to ensure that you +have `Provisioning.UseCloudInit +<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to +``y``. + + +Builtin Agent +------------- +An alternative to using walinuxagent to register to the Azure cloud platform +is to use the ``__builtin__`` agent command. This section contains more +background on what that code path does, and how to enable it. + +The Azure cloud platform provides initial data to an instance via an attached CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some information. Additional information is obtained via interaction with the "endpoint". @@ -36,25 +57,17 @@ for the endpoint server (again option 245). You can define the path to the lease file with the 'dhclient_lease_file' configuration. -walinuxagent ------------- -In order to operate correctly, cloud-init needs walinuxagent to provide much -of the interaction with azure. In addition to "provisioning" code, walinux -does the following on the agent is a long running daemon that handles the -following things: -- generate a x509 certificate and send that to the endpoint - -waagent.conf config -^^^^^^^^^^^^^^^^^^^ -in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. - - :: - - # disabling provisioning turns off all 'Provisioning.*' function - Provisioning.Enabled=n - # this is currently not handled by cloud-init, so let walinuxagent do it. - ResourceDisk.Format=y - ResourceDisk.MountPoint=/mnt + +IMDS +---- +Azure provides the `instance metadata service (IMDS) +<https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_ +which is a REST service on ``196.254.196.254`` providing additional +configuration information to the instance. Cloud-init uses the IMDS for: + +- network configuration for the instance which is applied per boot +- a preprovisioing gate which blocks instance configuration until Azure fabric + is ready to provision Configuration diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e8..1c5cf96 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index d9720f6..3dcdd3b 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -54,6 +54,7 @@ Modules .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone .. automodule:: cloudinit.config.cc_ubuntu_advantage +.. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname .. automodule:: cloudinit.config.cc_users_groups diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml index bd9b5d0..22a31dc 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml @@ -5,8 +5,7 @@ required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: false + apt_pipelining: false collect_scripts: 90cloud-init-pipelining: | #!/bin/bash diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py index 740dc7c..2b940a6 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py @@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase): """Test apt-pipelining module.""" def test_os_pipelining(self): - """Test pipelining set to os.""" - out = self.get_data_file('90cloud-init-pipelining') - self.assertIn('Acquire::http::Pipeline-Depth "0";', out) + """test 'os' settings does not write apt config file.""" + out = self.get_data_file('90cloud-init-pipelining_not_written') + self.assertEqual(0, int(out)) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml index cbed3ba..86d5220 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml @@ -1,15 +1,14 @@ # -# Set apt pipelining value to OS +# Set apt pipelining value to OS, no conf written # required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: os + apt_pipelining: os collect_scripts: - 90cloud-init-pipelining: | + 90cloud-init-pipelining_not_written: | #!/bin/bash - cat /etc/apt/apt.conf.d/90cloud-init-pipelining + ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l # vi: ts=4 expandtab diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 0000000..b9ecefb --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f..53c56cd 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272..b785362 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a..3bfd752 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + def test_update_events_is_correct(self): + """ensure update_events contains correct data""" + self.assertEqual( + {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + self.datasource.update_events) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e453040..c3c0c8c 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 @@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4..8c18aa1 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 1bad07f..e69a47a 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase): 'cc_runcmd', 'cc_snap', 'cc_ubuntu_advantage', + 'cc_ubuntu_drivers', 'cc_zypper_add_repo' ], [subschema['id'] for subschema in schema['allOf']]) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e3b9e02..fd03deb 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -691,6 +691,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -729,6 +732,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -860,6 +866,7 @@ NETWORK_CONFIGS = { BOOTPROTO=dhcp DEFROUTE=yes DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes DNS1=8.8.8.8 DNS2=8.8.4.4 DOMAIN="barley.maas sach.maas" @@ -979,6 +986,7 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=iface0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes NETMASK=255.255.255.0 @@ -1113,8 +1121,8 @@ iface eth0.101 inet static iface eth0.101 inet static address 192.168.2.10/24 -post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true """), 'expected_netplan': textwrap.dedent(""" network: @@ -1234,6 +1242,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-bond0.200': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 @@ -1247,6 +1256,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true DEFROUTE=yes DEVICE=br0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes IPV6_DEFAULTGW=2001:4800:78ff:1b::1 @@ -1333,6 +1343,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-eth5': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no @@ -1505,17 +1516,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - gateway: 192.168.0.3 netmask: 255.255.255.0 network: 10.1.3.0 - - gateway: 2001:67c:1562:1 - network: 2001:67c:1 - netmask: ffff:ffff:0 - - gateway: 3001:67c:1562:1 - network: 3001:67c:1 - netmask: ffff:ffff:0 - metric: 10000 - type: static address: 192.168.1.2/24 - type: static address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 + netmask: ffff:ffff:0 + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 + netmask: ffff:ffff:0 + metric: 10000 """), 'expected_netplan': textwrap.dedent(""" network: @@ -1554,6 +1566,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true to: 3001:67c:1/32 via: 3001:67c:1562:1 """), + 'expected_eni': textwrap.dedent("""\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + """), 'yaml-v2': textwrap.dedent(""" version: 2 ethernets: @@ -1641,6 +1698,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1696,6 +1754,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1786,6 +1845,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true GATEWAY=192.168.1.1 IPADDR=192.168.2.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes IPV6_DEFAULTGW=2001:1::1 @@ -1847,6 +1907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth0 HWADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 IPV6ADDR=2001:1::100/96 IPV6INIT=yes NM_CONTROLLED=no @@ -1860,6 +1921,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true BRIDGE=br0 DEVICE=eth1 HWADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 IPV6ADDR=2001:1::101/96 IPV6INIT=yes NM_CONTROLLED=no @@ -1988,6 +2050,23 @@ CONFIG_V1_SIMPLE_SUBNET = { 'type': 'static'}], 'type': 'physical'}]} +CONFIG_V1_MULTI_IFACE = { + 'version': 1, + 'config': [{'type': 'physical', + 'mtu': 1500, + 'subnets': [{'type': 'static', + 'netmask': '255.255.240.0', + 'routes': [{'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '51.68.80.1'}], + 'address': '51.68.89.122', + 'ipv4': True}], + 'mac_address': 'fa:16:3e:25:b4:59', + 'name': 'eth0'}, + {'type': 'physical', + 'mtu': 9000, + 'subnets': [{'type': 'dhcp4'}], + 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} DEFAULT_DEV_ATTRS = { 'eth1000': { @@ -2460,6 +2539,49 @@ USERCTL=no respath = '/etc/resolv.conf' self.assertNotIn(respath, found.keys()) + def test_network_config_v1_multi_iface_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network-scripts/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected_i1 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=51.68.80.1 +HWADDR=fa:16:3e:25:b4:59 +IPADDR=51.68.89.122 +MTU=1500 +NETMASK=255.255.240.0 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) + expected_i2 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +DHCLIENT_SET_DEFAULT_ROUTE=no +HWADDR=fa:16:3e:b1:ca:29 +MTU=9000 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) + def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") @@ -2634,6 +2756,7 @@ USERCTL=no GATEWAY=192.168.42.1 HWADDR=52:54:00:ab:cd:ef IPADDR=192.168.42.100 + IPADDR6=2001:db8::100/32 IPV6ADDR=2001:db8::100/32 IPV6INIT=yes IPV6_DEFAULTGW=2001:db8::1 @@ -3570,17 +3693,17 @@ class TestEniRoundTrip(CiTestCase): 'iface eth0 inet static', ' address 172.23.31.42/26', ' gateway 172.23.31.2', - ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('post-up route add -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('pre-down route del -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), - ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() @@ -3588,6 +3711,77 @@ class TestEniRoundTrip(CiTestCase): self.assertEqual( expected, [line for line in found if line]) + def test_ipv6_static_routes(self): + # as reported in bug 1818669 + conf = [ + {'name': 'eno3', 'type': 'physical', + 'subnets': [{ + 'address': 'fd00::12/64', + 'dns_nameservers': ['fd00:2::15'], + 'gateway': 'fd00::1', + 'ipv6': True, + 'type': 'static', + 'routes': [{'netmask': '32', + 'network': 'fd00:12::', + 'gateway': 'fd00::2'}, + {'network': 'fd00:14::', + 'gateway': 'fd00::3'}, + {'destination': 'fe00:14::/48', + 'gateway': 'fe00::4', + 'metric': 500}, + {'gateway': '192.168.23.1', + 'metric': 999, + 'netmask': 24, + 'network': '192.168.23.0'}, + {'destination': '10.23.23.0/24', + 'gateway': '10.23.23.2', + 'metric': 300}]}]}, + ] + + files = self._render_and_read( + network_config={'config': conf, 'version': 1}) + expected = [ + 'auto lo', + 'iface lo inet loopback', + 'auto eno3', + 'iface eno3 inet6 static', + ' address fd00::12/64', + ' dns-nameservers fd00:2::15', + ' gateway fd00::1', + (' post-up route add -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' pre-down route del -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' post-up route add -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' pre-down route del -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' post-up route add -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' pre-down route del -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' post-up route add -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' pre-down route del -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' post-up route add -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + (' pre-down route del -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + + ] + found = files['/etc/network/interfaces'].splitlines() + + self.assertEqual( + expected, [line for line in found if line]) + + def testsimple_render_bond(self): + entry = NETWORK_CONFIGS['bond'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + class TestNetRenderers(CiTestCase): @mock.patch("cloudinit.net.renderers.sysconfig.available") diff --git a/tools/ds-identify b/tools/ds-identify index b78b273..6518901 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" diff --git a/tox.ini b/tox.ini index d371720..1f01eb7 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ setenv = basepython = python3 deps = # requirements - pylint==2.2.2 + pylint==2.3.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} @@ -96,19 +96,18 @@ deps = six==1.9.0 -r{toxinidir}/test-requirements.txt -[testenv:opensusel42] +[testenv:opensusel150] basepython = python2.7 commands = nosetests {posargs:tests/unittests cloudinit} deps = # requirements - argparse==1.3.0 - jinja2==2.8 - PyYAML==3.11 - oauthlib==0.7.2 + jinja2==2.10 + PyYAML==3.12 + oauthlib==2.0.6 configobj==5.0.6 - requests==2.11.1 - jsonpatch==1.11 - six==1.9.0 + requests==2.18.4 + jsonpatch==1.16 + six==1.11.0 -r{toxinidir}/test-requirements.txt [testenv:tip-pycodestyle]
_______________________________________________ Mailing list: https://launchpad.net/~cloud-init-dev Post to : cloud-init-dev@lists.launchpad.net Unsubscribe : https://launchpad.net/~cloud-init-dev More help : https://help.launchpad.net/ListHelp