Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2021-05-07 16:45:42 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.2988 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Fri May 7 16:45:42 2021 rev:208 rq:891152 version:4.3.0+20210507.bf02d791 Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2021-04-21 20:59:55.626251212 +0200 +++ /work/SRC/openSUSE:Factory/.crmsh.new.2988/crmsh.changes 2021-05-07 16:46:04.604225743 +0200 @@ -1,0 +2,23 @@ +Fri May 07 02:46:22 UTC 2021 - [email protected] + +- Update to version 4.3.0+20210507.bf02d791: + * Dev: bootstrap: raise exception and execute status_done on success + +------------------------------------------------------------------- +Fri May 07 02:14:21 UTC 2021 - [email protected] + +- Update to version 4.3.0+20210507.2bbd169d: + * Dev: unittest: adjust unittest for previous changes + * Dev: behave: add functional test for adding sbd on running cluster + * Dev: unittest: adjust unit test for adding sbd on existing cluster + * Fix: bootstrap: add sbd via bootstrap stage on an existing cluster (bsc#1181906) + * Fix: bootstrap: change StrictHostKeyChecking=no as a constants(bsc#1185437) + +------------------------------------------------------------------- +Thu May 06 02:04:16 UTC 2021 - [email protected] + +- Update to version 4.3.0+20210506.8cee9321: + * Dev: unittest: adjust unit test for the change of status_long + * Dev: bootstrap: change status_long with contextmanager + +------------------------------------------------------------------- Old: ---- crmsh-4.3.0+20210416.49f489c2.tar.bz2 New: ---- crmsh-4.3.0+20210507.bf02d791.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.ldnK9P/_old 2021-05-07 16:46:05.208223844 +0200 +++ /var/tmp/diff_new_pack.ldnK9P/_new 2021-05-07 16:46:05.208223844 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 4.3.0+20210416.49f489c2 +Version: 4.3.0+20210507.bf02d791 Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.ldnK9P/_old 2021-05-07 16:46:05.260223681 +0200 +++ /var/tmp/diff_new_pack.ldnK9P/_new 2021-05-07 16:46:05.260223681 +0200 @@ -9,6 +9,6 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">49f489c2fad5b63d34c39c16dbe14e902551eef5</param> + <param name="changesrevision">bf02d7910c0978febb1b82a18be03f8134770dd1</param> </service> </servicedata> \ No newline at end of file ++++++ crmsh-4.3.0+20210416.49f489c2.tar.bz2 -> crmsh-4.3.0+20210507.bf02d791.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/crmsh/bootstrap.py new/crmsh-4.3.0+20210507.bf02d791/crmsh/bootstrap.py --- old/crmsh-4.3.0+20210416.49f489c2/crmsh/bootstrap.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/crmsh/bootstrap.py 2021-05-07 04:34:39.000000000 +0200 @@ -23,6 +23,7 @@ from lxml import etree from pathlib import Path from enum import Enum +from contextlib import contextmanager from . import config from . import utils from . import xmlutil @@ -33,8 +34,7 @@ from . import term from . import lock from . import userdir -from . import constants - +from .constants import SSH_OPTION, QDEVICE_HELP_INFO LOG_FILE = "/var/log/crmsh/ha-cluster-bootstrap.log" CSYNC2_KEY = "/etc/csync2/key_hagroup" @@ -114,6 +114,7 @@ self.ui_context = None self.interfaces_inst = None self.with_other_user = True + self.cluster_is_running = None self.default_nic_list = [] self.default_ip_list = [] self.local_ip_list = [] @@ -143,20 +144,28 @@ cmds=self.qdevice_heuristics, mode=self.qdevice_heuristics_mode) + def _validate_sbd_option(self): + """ + Validate sbd options + """ + if self.sbd_devices and self.diskless_sbd: + error("Can't use -s and -S options together") + if self.stage == "sbd": + if not self.sbd_devices and not self.diskless_sbd and self.yes_to_all: + error("Stage sbd should specify sbd device by -s or diskless sbd by -S option") + if utils.service_is_active("sbd.service"): + error("Cannot configure stage sbd: sbd.service already running!") + if self.cluster_is_running: + utils.check_all_nodes_reachable() + def validate_option(self): """ Validate options """ if self.admin_ip: - try: - Validation.valid_admin_ip(self.admin_ip) - except ValueError as err: - error(err) + Validation.valid_admin_ip(self.admin_ip) if self.qdevice_inst: - try: - self.qdevice_inst.valid_attr() - except ValueError as err: - error(err) + self.qdevice_inst.valid_attr() if self.nic_list: if len(self.nic_list) > 2: error("Maximum number of interface is 2") @@ -166,6 +175,7 @@ warn("--no-overwrite-sshkey option is deprecated since crmsh does not overwrite ssh keys by default anymore and will be removed in future versions") if self.type == "join" and self.watchdog: warn("-w option is deprecated and will be removed in future versions") + self._validate_sbd_option() def init_sbd_manager(self): self.sbd_manager = SBDManager(self.sbd_devices, self.diskless_sbd) @@ -254,7 +264,7 @@ """ Given watchdog device name, get driver name on remote node """ - cmd = "ssh -o StrictHostKeyChecking=no root@{} {}".format(self._peer_host, self.QUERY_CMD) + cmd = "ssh {} root@{} {}".format(SSH_OPTION, self._peer_host, self.QUERY_CMD) rc, out, err = utils.get_stdout_stderr(cmd) if rc == 0 and out: # output format might like: @@ -374,6 +384,7 @@ self.diskless_sbd = diskless_sbd self._sbd_devices = None self._watchdog_inst = None + self._stonith_watchdog_timeout = "10s" def _parse_sbd_device(self): """ @@ -393,7 +404,7 @@ """ cmd = "sbd -d {} dump".format(dev) if node: - cmd = "ssh -o StrictHostKeyChecking=no root@{} '{}'".format(node, cmd) + cmd = "ssh {} root@{} '{}'".format(SSH_OPTION, node, cmd) rc, out, err = utils.get_stdout_stderr(cmd) if rc != 0 and err: @@ -509,6 +520,17 @@ utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict) csync2_update(SYSCONFIG_SBD) + def _determine_stonith_watchdog_timeout(self): + """ + Determine value of stonith-watchdog-timeout + """ + conf = utils.parse_sysconfig(SYSCONFIG_SBD) + res = conf.get("SBD_WATCHDOG_TIMEOUT") + if res: + self._stonith_watchdog_timeout = -1 + elif "390" in os.uname().machine: + self._stonith_watchdog_timeout = "30s" + def _get_sbd_device_from_config(self): """ Gets currently configured SBD device, i.e. what's in /etc/sysconfig/sbd @@ -520,6 +542,48 @@ else: return None + def _restart_cluster_and_configure_sbd_ra(self): + """ + Try to configure sbd resource, restart cluster on needed + """ + if not utils.has_resource_running(): + status("Restarting cluster service") + utils.cluster_run_cmd("crm cluster restart") + wait_for_cluster() + self.configure_sbd_resource() + else: + warn("To start sbd.service, need to restart cluster service manually on each node") + if self.diskless_sbd: + warn("Then run \"crm configure property stonith-enabled=true stonith-watchdog-timeout={}\" on any node".format(self._stonith_watchdog_timeout)) + else: + self.configure_sbd_resource() + + def _enable_sbd_service(self): + """ + Try to enable sbd service + """ + if _context.cluster_is_running: + # in sbd stage, enable sbd.service on cluster wide + utils.cluster_run_cmd("systemctl enable sbd.service") + self._restart_cluster_and_configure_sbd_ra() + else: + # in init process + invoke("systemctl enable sbd.service") + + def _warn_diskless_sbd(self, peer=None): + """ + Give warning when configuring diskless sbd + """ + # When in sbd stage or join process + if (self.diskless_sbd and _context.cluster_is_running) or peer: + vote_dict = utils.get_quorum_votes_dict(peer) + expected_vote = int(vote_dict['Expected']) + if (expected_vote < 2 and peer) or (expected_vote < 3 and not peer): + warn(self.DISKLESS_SBD_WARNING) + # When in init process + elif self.diskless_sbd: + warn(self.DISKLESS_SBD_WARNING) + def sbd_init(self): """ Function sbd_init includes these steps: @@ -535,29 +599,30 @@ if not self._sbd_devices and not self.diskless_sbd: invoke("systemctl disable sbd.service") return - if self.diskless_sbd: - warn(self.DISKLESS_SBD_WARNING) - status_long("Initializing {}SBD...".format("diskless " if self.diskless_sbd else "")) - self._initialize_sbd() - self._update_configuration() - invoke("systemctl enable sbd.service") - status_done() + self._warn_diskless_sbd() + with status_long("Initializing {}SBD...".format("diskless " if self.diskless_sbd else "")): + self._initialize_sbd() + self._update_configuration() + self._determine_stonith_watchdog_timeout() + self._enable_sbd_service() def configure_sbd_resource(self): """ Configure stonith-sbd resource and stonith-enabled property """ - if not utils.package_is_installed("sbd"): + if not utils.package_is_installed("sbd") or \ + not utils.service_is_enabled("sbd.service") or \ + utils.has_resource_configured("stonith:external/sbd"): return - if utils.service_is_enabled("sbd.service"): - if self._get_sbd_device_from_config(): - if not invokerc("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s"): - error("Can't create stonith-sbd primitive") - if not invokerc("crm configure property stonith-enabled=true"): - error("Can't enable STONITH for SBD") - else: - if not invokerc("crm configure property stonith-enabled=true stonith-watchdog-timeout=5s"): - error("Can't enable STONITH for diskless SBD") + + if self._get_sbd_device_from_config(): + if not invokerc("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s"): + error("Can't create stonith-sbd primitive") + if not invokerc("crm configure property stonith-enabled=true"): + error("Can't enable STONITH for SBD") + else: + if not invokerc("crm configure property stonith-enabled=true stonith-watchdog-timeout={}".format(self._stonith_watchdog_timeout)): + error("Can't enable STONITH for diskless SBD") def join_sbd(self, peer_host): """ @@ -576,9 +641,7 @@ if dev_list: self._verify_sbd_device(dev_list, [peer_host]) else: - vote_dict = utils.get_quorum_votes_dict(peer_host) - if int(vote_dict['Expected']) < 2: - warn(self.DISKLESS_SBD_WARNING) + self._warn_diskless_sbd(peer_host) status("Got {}SBD configuration".format("" if dev_list else "diskless ")) invoke("systemctl enable sbd.service") @@ -748,16 +811,15 @@ def wait_for_resource(message, resource, needle="running on"): - status_long(message) - while True: - _rc, out, err = utils.get_stdout_stderr("crm_resource --locate --resource " + resource) - if needle in out: - break - if needle in err: - break - status_progress() - sleep(1) - status_done() + with status_long(message): + while True: + _rc, out, err = utils.get_stdout_stderr("crm_resource --locate --resource " + resource) + if needle in out: + break + if needle in err: + break + status_progress() + sleep(1) def wait_for_stop(message, resource): @@ -765,14 +827,13 @@ def wait_for_cluster(): - status_long("Waiting for cluster") - while True: - _rc, out, _err = utils.get_stdout_stderr("crm_mon -1") - if is_online(out): - break - status_progress() - sleep(2) - status_done() + with status_long("Waiting for cluster"): + while True: + _rc, out, _err = utils.get_stdout_stderr("crm_mon -1") + if is_online(out): + break + status_progress() + sleep(2) def get_cluster_node_hostname(): @@ -781,7 +842,7 @@ """ peer_node = None if _context.cluster_node: - rc, out, err = utils.get_stdout_stderr("ssh {} crm_node --name".format(_context.cluster_node)) + rc, out, err = utils.get_stdout_stderr("ssh {} {} crm_node --name".format(SSH_OPTION, _context.cluster_node)) if rc != 0: error(err) peer_node = out @@ -841,11 +902,18 @@ print(" {}".format(msg)) +@contextmanager def status_long(msg): log("# {}...".format(msg)) if not _context.quiet: sys.stdout.write(" {}...".format(msg)) sys.stdout.flush() + try: + yield + except: + raise + else: + status_done() def status_progress(): @@ -875,10 +943,9 @@ def probe_partitions(): - status_long("Probing for new partitions") - partprobe() - sleep(5) - status_done() + with status_long("Probing for new partitions"): + partprobe() + sleep(5) def check_tty(): @@ -1243,7 +1310,7 @@ As the hint, likely, `PasswordAuthentication` is 'no' in /etc/ssh/sshd_config. Given in this case, users must setup passwordless ssh beforehand, or change it to 'yes' and manage passwords properly """ - cmd = "cat {} | ssh -oStrictHostKeyChecking=no root@{} 'cat >> {}'".format(fromfile, remote_node, tofile) + cmd = "cat {} | ssh {} root@{} 'cat >> {}'".format(fromfile, SSH_OPTION, remote_node, tofile) rc, _, err = invoke(cmd) if not rc: error("Failed to append contents of {} to {}:\n\"{}\"\n{}".format(fromfile, remote_node, err, err_details_string)) @@ -1256,10 +1323,9 @@ return invoke("rm", "-f", CSYNC2_KEY) - status_long("Generating csync2 shared key (this may take a while)") - if not invokerc("csync2", "-k", CSYNC2_KEY): - error("Can't create csync2 key {}".format(CSYNC2_KEY)) - status_done() + with status_long("Generating csync2 shared key (this may take a while)"): + if not invokerc("csync2", "-k", CSYNC2_KEY): + error("Can't create csync2 key {}".format(CSYNC2_KEY)) csync2_file_list = "" for f in FILES_TO_SYNC: @@ -1274,9 +1340,8 @@ """.format(utils.this_node(), csync2_file_list), CSYNC2_CFG) utils.start_service("csync2.socket", enable=True) - status_long("csync2 checking files") - invoke("csync2", "-cr", "/") - status_done() + with status_long("csync2 checking files"): + invoke("csync2", "-cr", "/") def csync2_update(path): @@ -1682,27 +1747,25 @@ if partitions: if not confirm("Really?"): return - status_long("Erasing existing partitions...") - for part in partitions: - if not invokerc("parted -s %s rm %s" % (dev, part)): - error("Failed to remove partition %s from %s" % (part, dev)) - status_done() - - status_long("Creating partitions...") - if not invokerc("parted", "-s", dev, "mklabel", "msdos"): - error("Failed to create partition table") - - # This is a bit rough, and probably won't result in great performance, - # but it's fine for test/demo purposes to carve off 1MB for SBD. Note - # we have to specify the size of the first partition in this in bytes - # rather than MB, or parted's rounding gives us a ~30Kb partition - # (see rhbz#623268). - if not invokerc("parted -s %s mkpart primary 0 1048576B" % (dev)): - error("Failed to create first partition on %s" % (dev)) - if not invokerc("parted -s %s mkpart primary 1M 100%%" % (dev)): - error("Failed to create second partition") + with status_long("Erasing existing partitions..."): + for part in partitions: + if not invokerc("parted -s %s rm %s" % (dev, part)): + error("Failed to remove partition %s from %s" % (part, dev)) + + with status_long("Creating partitions..."): + if not invokerc("parted", "-s", dev, "mklabel", "msdos"): + error("Failed to create partition table") + + # This is a bit rough, and probably won't result in great performance, + # but it's fine for test/demo purposes to carve off 1MB for SBD. Note + # we have to specify the size of the first partition in this in bytes + # rather than MB, or parted's rounding gives us a ~30Kb partition + # (see rhbz#623268). + if not invokerc("parted -s %s mkpart primary 0 1048576B" % (dev)): + error("Failed to create first partition on %s" % (dev)) + if not invokerc("parted -s %s mkpart primary 1M 100%%" % (dev)): + error("Failed to create second partition") - status_done() # TODO: May not be strictly necessary, but... probe_partitions() @@ -1791,15 +1854,14 @@ wait_for_stop("Waiting for resource %s to stop" % (res), res) invoke("crm configure delete dlm clusterfs base-group base-clone c-clusterfs base-then-clusterfs clusterfs-with-base") - status_long("Creating OCFS2 filesystem") - # TODO: want "-T vmstore", but this'll only fly on >2GB partition - # Note: using undocumented '-x' switch to avoid prompting if overwriting - # existing partition. For the commit that introduced this, see: - # http://oss.oracle.com/git/?p=ocfs2-tools.git;a=commit;h=8345a068479196172190f4fa287052800fa2b66f - # TODO: if make the cluster name configurable, we need to update it here too - if not invokerc("mkfs.ocfs2 --cluster-stack pcmk --cluster-name %s -N 8 -x %s" % (_context.cluster_name, dev)): - error("Failed to create OCFS2 filesystem on %s" % (dev)) - status_done() + with status_long("Creating OCFS2 filesystem"): + # TODO: want "-T vmstore", but this'll only fly on >2GB partition + # Note: using undocumented '-x' switch to avoid prompting if overwriting + # existing partition. For the commit that introduced this, see: + # http://oss.oracle.com/git/?p=ocfs2-tools.git;a=commit;h=8345a068479196172190f4fa287052800fa2b66f + # TODO: if make the cluster name configurable, we need to update it here too + if not invokerc("mkfs.ocfs2 --cluster-stack pcmk --cluster-name %s -N 8 -x %s" % (_context.cluster_name, dev)): + error("Failed to create OCFS2 filesystem on %s" % (dev)) # TODO: refactor, maybe if not invokerc("mkdir -p %s" % (mntpoint)): @@ -1870,7 +1932,7 @@ """ if _context.yes_to_all: return - status("\nConfigure Qdevice/Qnetd:\n" + constants.qdevice_help_info + "\n") + status("\nConfigure Qdevice/Qnetd:\n" + QDEVICE_HELP_INFO + "\n") if not confirm("Do you want to configure QDevice?"): return qnetd_addr = prompt_for_string("HOST or IP of the QNetd server to be used") @@ -1928,9 +1990,8 @@ config_qdevice() # Execute certificate process when tls flag is on if utils.is_qdevice_tls_on(): - status_long("Qdevice certification process") - qdevice_inst.certificate_process_on_init() - status_done() + with status_long("Qdevice certification process"): + qdevice_inst.certificate_process_on_init() start_qdevice_service() @@ -1970,11 +2031,10 @@ qdevice_inst.write_qdevice_config() if not corosync.is_unicast(): corosync.add_nodelist_from_cmaptool() - status_long("Update configuration") - update_expected_votes() - if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD: - utils.cluster_run_cmd("crm corosync reload") - status_done() + with status_long("Update configuration"): + update_expected_votes() + if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD: + utils.cluster_run_cmd("crm corosync reload") def init(): @@ -2001,7 +2061,7 @@ # authorized_keys file (again, to help with the case where the # user has done manual initial setup without the assistance of # ha-cluster-init). - rc, _, err = invoke("ssh root@{} crm cluster init -i {} ssh_remote".format(seed_host, _context.default_nic_list[0])) + rc, _, err = invoke("ssh {} root@{} crm cluster init -i {} ssh_remote".format(SSH_OPTION, seed_host, _context.default_nic_list[0])) if not rc: error("Can't invoke crm cluster init -i {} ssh_remote on {}: {}".format(_context.default_nic_list[0], seed_host, err)) @@ -2045,11 +2105,11 @@ home_dir = userdir.gethomedir(user) for key in ("id_rsa", "id_ecdsa", "id_ed25519", "id_dsa"): public_key_file = "{}/.ssh/{}.pub".format(home_dir, key) - cmd = "ssh -oStrictHostKeyChecking=no root@{} 'test -f {}'".format(node, public_key_file) + cmd = "ssh {} root@{} 'test -f {}'".format(SSH_OPTION, node, public_key_file) if not invokerc(cmd): continue _, temp_public_key_file = tmpfiles.create() - cmd = "scp -oStrictHostKeyChecking=no root@{}:{} {}".format(node, public_key_file, temp_public_key_file) + cmd = "scp {} root@{}:{} {}".format(SSH_OPTION, node, public_key_file, temp_public_key_file) rc, _, err = invoke(cmd) if not rc: error("Failed to run \"{}\": {}".format(cmd, err)) @@ -2063,49 +2123,47 @@ """ if not seed_host: error("No existing IP/hostname specified (use -c option)") - status_long("Configuring csync2") + with status_long("Configuring csync2"): - # Necessary if re-running join on a node that's been configured before. - rmfile("/var/lib/csync2/{}.db3".format(utils.this_node()), ignore_errors=True) + # Necessary if re-running join on a node that's been configured before. + rmfile("/var/lib/csync2/{}.db3".format(utils.this_node()), ignore_errors=True) - # Not automatically updating /etc/hosts - risky in the general case. - # etc_hosts_add_me - # local hosts_line=$(etc_hosts_get_me) - # [ -n "$hosts_line" ] || error "No valid entry for $(hostname) in /etc/hosts - csync2 can't work" - - # If we *were* updating /etc/hosts, the next line would have "\"$hosts_line\"" as - # the last arg (but this requires re-enabling this functionality in ha-cluster-init) - cmd = "crm cluster init -i {} csync2_remote {}".format(_context.default_nic_list[0], utils.this_node()) - rc, _, err = invoke("ssh -o StrictHostKeyChecking=no root@{} {}".format(seed_host, cmd)) - if not rc: - error("Can't invoke \"{}\" on {}: {}".format(cmd, seed_host, err)) - - # This is necessary if syncing /etc/hosts (to ensure everyone's got the - # same list of hosts) - # local tmp_conf=/etc/hosts.$$ - # invoke scp root@seed_host:/etc/hosts $tmp_conf \ - # || error "Can't retrieve /etc/hosts from seed_host" - # install_tmp $tmp_conf /etc/hosts - rc, _, err = invoke("scp root@%s:'/etc/csync2/{csync2.cfg,key_hagroup}' /etc/csync2" % (seed_host)) - if not rc: - error("Can't retrieve csync2 config from {}: {}".format(seed_host, err)) + # Not automatically updating /etc/hosts - risky in the general case. + # etc_hosts_add_me + # local hosts_line=$(etc_hosts_get_me) + # [ -n "$hosts_line" ] || error "No valid entry for $(hostname) in /etc/hosts - csync2 can't work" + + # If we *were* updating /etc/hosts, the next line would have "\"$hosts_line\"" as + # the last arg (but this requires re-enabling this functionality in ha-cluster-init) + cmd = "crm cluster init -i {} csync2_remote {}".format(_context.default_nic_list[0], utils.this_node()) + rc, _, err = invoke("ssh {} root@{} {}".format(SSH_OPTION, seed_host, cmd)) + if not rc: + error("Can't invoke \"{}\" on {}: {}".format(cmd, seed_host, err)) - utils.start_service("csync2.socket", enable=True) + # This is necessary if syncing /etc/hosts (to ensure everyone's got the + # same list of hosts) + # local tmp_conf=/etc/hosts.$$ + # invoke scp root@seed_host:/etc/hosts $tmp_conf \ + # || error "Can't retrieve /etc/hosts from seed_host" + # install_tmp $tmp_conf /etc/hosts + rc, _, err = invoke("scp root@%s:'/etc/csync2/{csync2.cfg,key_hagroup}' /etc/csync2" % (seed_host)) + if not rc: + error("Can't retrieve csync2 config from {}: {}".format(seed_host, err)) - # Sync new config out. This goes to all hosts; csync2.cfg definitely - # needs to go to all hosts (else hosts other than the seed and the - # joining host won't have the joining host in their config yet). - # Strictly, the rest of the files need only go to the new host which - # could theoretically be effected using `csync2 -xv -P $(hostname)`, - # but this still leaves all the other files in dirty state (becuase - # they haven't gone to all nodes in the cluster, which means a - # subseqent join of another node can fail its sync of corosync.conf - # when it updates expected_votes. Grrr... - if not invokerc('ssh -o StrictHostKeyChecking=no root@{} "csync2 -rm /; csync2 -rxv || csync2 -rf / && csync2 -rxv"'.format(seed_host)): - print("") - warn("csync2 run failed - some files may not be sync'd") + utils.start_service("csync2.socket", enable=True) - status_done() + # Sync new config out. This goes to all hosts; csync2.cfg definitely + # needs to go to all hosts (else hosts other than the seed and the + # joining host won't have the joining host in their config yet). + # Strictly, the rest of the files need only go to the new host which + # could theoretically be effected using `csync2 -xv -P $(hostname)`, + # but this still leaves all the other files in dirty state (becuase + # they haven't gone to all nodes in the cluster, which means a + # subseqent join of another node can fail its sync of corosync.conf + # when it updates expected_votes. Grrr... + if not invokerc('ssh {} root@{} "csync2 -rm /; csync2 -rxv || csync2 -rf / && csync2 -rxv"'.format(SSH_OPTION, seed_host)): + print("") + warn("csync2 run failed - some files may not be sync'd") def join_ssh_merge(_cluster_node): @@ -2236,7 +2294,7 @@ Should fetch the node list from init node, then swap the key """ # Fetch cluster nodes list - cmd = "ssh -o StrictHostKeyChecking=no root@{} crm_node -l".format(init_node) + cmd = "ssh {} root@{} crm_node -l".format(SSH_OPTION, init_node) rc, out, err = utils.get_stdout_stderr(cmd) if rc != 0: error("Can't fetch cluster nodes list from {}: {}".format(init_node, err)) @@ -2247,7 +2305,7 @@ cluster_nodes_list.append(node) # Filter out init node from cluster_nodes_list - cmd = "ssh -o StrictHostKeyChecking=no root@{} hostname".format(init_node) + cmd = "ssh {} root@{} hostname".format(SSH_OPTION, init_node) rc, out, err = utils.get_stdout_stderr(cmd) if rc != 0: error("Can't fetch hostname of {}: {}".format(init_node, err)) @@ -2313,7 +2371,7 @@ # that yet, so the following crawling horror takes a punt on the seed # node being up, then asks it for a list of mountpoints... if _context.cluster_node: - _rc, outp, _ = utils.get_stdout_stderr("ssh -o StrictHostKeyChecking=no root@{} 'cibadmin -Q --xpath \"//primitive\"'".format(seed_host)) + _rc, outp, _ = utils.get_stdout_stderr("ssh {} root@{} 'cibadmin -Q --xpath \"//primitive\"'".format(SSH_OPTION, seed_host)) if outp: xml = etree.fromstring(outp) mountpoints = xml.xpath(' and '.join(['//primitive[@class="ocf"', @@ -2356,7 +2414,7 @@ except corosync.IPAlreadyConfiguredError as e: warn(e) csync2_update(corosync.conf()) - invoke("ssh -o StrictHostKeyChecking=no root@{} corosync-cfgtool -R".format(seed_host)) + invoke("ssh {} root@{} corosync-cfgtool -R".format(SSH_OPTION, seed_host)) _context.sbd_manager.join_sbd(seed_host) @@ -2377,47 +2435,46 @@ # attempt to join the cluster failed) init_cluster_local() - status_long("Reloading cluster configuration") - - if ipv6_flag and not is_unicast: - # for ipv6 mcast - nodeid_dict = {} - _rc, outp, _ = utils.get_stdout_stderr("crm_node -l") - if _rc == 0: - for line in outp.split('\n'): - tmp = line.split() - nodeid_dict[tmp[1]] = tmp[0] - - # apply nodelist in cluster - if is_unicast or is_qdevice_configured: - invoke("crm cluster run 'crm corosync reload'") - - update_expected_votes() - # Trigger corosync config reload to ensure expected_votes is propagated - invoke("corosync-cfgtool -R") - - # Ditch no-quorum-policy=ignore - _rc, outp = utils.get_stdout("crm configure show") - if re.search('no-quorum-policy=.*ignore', outp): - invoke("crm_attribute --attr-name no-quorum-policy --delete-attr") - - # if unicast, we need to reload the corosync configuration - # on the other nodes - if is_unicast: - invoke("crm cluster run 'crm corosync reload'") + with status_long("Reloading cluster configuration"): - if ipv6_flag and not is_unicast: - # for ipv6 mcast - # after csync2_update, all config files are same - # but nodeid must be uniqe - for node in list(nodeid_dict.keys()): - if node == utils.this_node(): - continue - update_nodeid(int(nodeid_dict[node]), node) - update_nodeid(local_nodeid) + if ipv6_flag and not is_unicast: + # for ipv6 mcast + nodeid_dict = {} + _rc, outp, _ = utils.get_stdout_stderr("crm_node -l") + if _rc == 0: + for line in outp.split('\n'): + tmp = line.split() + nodeid_dict[tmp[1]] = tmp[0] + + # apply nodelist in cluster + if is_unicast or is_qdevice_configured: + invoke("crm cluster run 'crm corosync reload'") + + update_expected_votes() + # Trigger corosync config reload to ensure expected_votes is propagated + invoke("corosync-cfgtool -R") + + # Ditch no-quorum-policy=ignore + _rc, outp = utils.get_stdout("crm configure show") + if re.search('no-quorum-policy=.*ignore', outp): + invoke("crm_attribute --attr-name no-quorum-policy --delete-attr") + + # if unicast, we need to reload the corosync configuration + # on the other nodes + if is_unicast: + invoke("crm cluster run 'crm corosync reload'") + + if ipv6_flag and not is_unicast: + # for ipv6 mcast + # after csync2_update, all config files are same + # but nodeid must be uniqe + for node in list(nodeid_dict.keys()): + if node == utils.this_node(): + continue + update_nodeid(int(nodeid_dict[node]), node) + update_nodeid(local_nodeid) - sync_files_to_disk() - status_done() + sync_files_to_disk() if is_qdevice_configured: start_qdevice_on_join_node(seed_host) @@ -2429,17 +2486,16 @@ """ Doing qdevice certificate process and start qdevice service on join node """ - status_long("Starting corosync-qdevice.service") - if not corosync.is_unicast(): - corosync.add_nodelist_from_cmaptool() - csync2_update(corosync.conf()) - invoke("crm corosync reload") - if utils.is_qdevice_tls_on(): - qnetd_addr = corosync.get_value("quorum.device.net.host") - qdevice_inst = corosync.QDevice(qnetd_addr, cluster_node=seed_host) - qdevice_inst.certificate_process_on_join() - utils.start_service("corosync-qdevice.service", enable=True) - status_done() + with status_long("Starting corosync-qdevice.service"): + if not corosync.is_unicast(): + corosync.add_nodelist_from_cmaptool() + csync2_update(corosync.conf()) + invoke("crm corosync reload") + if utils.is_qdevice_tls_on(): + qnetd_addr = corosync.get_value("quorum.device.net.host") + qdevice_inst = corosync.QDevice(qnetd_addr, cluster_node=seed_host) + qdevice_inst.certificate_process_on_join() + utils.start_service("corosync-qdevice.service", enable=True) def set_cluster_node_ip(): @@ -2483,7 +2539,7 @@ stop_services(SERVICES_STOP_LIST, remote_addr=node) # delete configuration files from the node to be removed - rc, _, err = invoke('ssh -o StrictHostKeyChecking=no root@{} "bash -c \\\"rm -f {}\\\""'.format(node, " ".join(_context.rm_list))) + rc, _, err = invoke('ssh {} root@{} "bash -c \\\"rm -f {}\\\""'.format(SSH_OPTION, node, " ".join(_context.rm_list))) if not rc: error("Deleting the configuration files failed: {}".format(err)) @@ -2571,7 +2627,7 @@ elif stage == "": if corosync_active: error("Cluster is currently active - can't run") - elif stage not in ("ssh", "ssh_remote", "csync2", "csync2_remote"): + elif stage not in ("ssh", "ssh_remote", "csync2", "csync2_remote", "sbd"): if corosync_active: error("Cluster is currently active - can't run %s stage" % (stage)) @@ -2694,13 +2750,12 @@ status("Stopping corosync-qdevice.service") invoke("crm cluster run 'systemctl stop corosync-qdevice'") - status_long("Removing QDevice configuration from cluster") - qnetd_host = corosync.get_value('quorum.device.net.host') - qdevice_inst = corosync.QDevice(qnetd_host) - qdevice_inst.remove_qdevice_config() - qdevice_inst.remove_qdevice_db() - update_expected_votes() - status_done() + with status_long("Removing QDevice configuration from cluster"): + qnetd_host = corosync.get_value('quorum.device.net.host') + qdevice_inst = corosync.QDevice(qnetd_host) + qdevice_inst.remove_qdevice_config() + qdevice_inst.remove_qdevice_db() + update_expected_votes() if _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD: invoke("crm cluster run 'crm corosync reload'") elif _context.qdevice_reload_policy == QdevicePolicy.QDEVICE_RESTART: @@ -2767,7 +2822,7 @@ if othernode is not None: # remove from other node cmd = "crm cluster remove{} -c {}".format(" -y" if yes_to_all else "", me) - rc = utils.ext_cmd_nosudo("ssh{} -o StrictHostKeyChecking=no {} '{}'".format("" if yes_to_all else " -t", othernode, cmd)) + rc = utils.ext_cmd_nosudo("ssh{} {} {} '{}'".format("" if yes_to_all else " -t", SSH_OPTION, othernode, cmd)) if rc != 0: error("Failed to remove this node from {}".format(othernode)) else: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/crmsh/constants.py new/crmsh-4.3.0+20210507.bf02d791/crmsh/constants.py --- old/crmsh-4.3.0+20210416.49f489c2/crmsh/constants.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/crmsh/constants.py 2021-05-07 04:34:39.000000000 +0200 @@ -482,9 +482,12 @@ } -qdevice_help_info = """ QDevice participates in quorum decisions. With the assistance of +QDEVICE_HELP_INFO = """ QDevice participates in quorum decisions. With the assistance of a third-party arbitrator Qnetd, it provides votes so that a cluster is able to sustain more node failures than standard quorum rules allow. It is recommended for clusters with an even number of nodes and highly recommended for 2 node clusters.""" + + +SSH_OPTION = "-o StrictHostKeyChecking=no" # vim:ts=4:sw=4:et: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/crmsh/ui_cluster.py new/crmsh-4.3.0+20210507.bf02d791/crmsh/ui_cluster.py --- old/crmsh-4.3.0+20210416.49f489c2/crmsh/ui_cluster.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/crmsh/ui_cluster.py 2021-05-07 04:34:39.000000000 +0200 @@ -244,7 +244,7 @@ network_group.add_argument("-I", "--ipv6", action="store_true", dest="ipv6", help="Configure corosync use IPv6") - qdevice_group = parser.add_argument_group("QDevice configuration", re.sub(' ', '', constants.qdevice_help_info) + "\n\nOptions for configuring QDevice and QNetd.") + qdevice_group = parser.add_argument_group("QDevice configuration", re.sub(' ', '', constants.QDEVICE_HELP_INFO) + "\n\nOptions for configuring QDevice and QNetd.") qdevice_group.add_argument("--qnetd-hostname", dest="qnetd_addr", metavar="HOST", help="HOST or IP of the QNetd server to be used") qdevice_group.add_argument("--qdevice-port", dest="qdevice_port", metavar="PORT", type=int, default=5403, @@ -285,15 +285,13 @@ elif re.search("--qdevice-.*", ' '.join(sys.argv)) or (stage == "qdevice" and options.yes_to_all): parser.error("Option --qnetd-hostname is required if want to configure qdevice") - if options.sbd_devices and options.diskless_sbd: - parser.error("Can't use -s and -S options together") - # if options.geo and options.name == "hacluster": # parser.error("For a geo cluster, each cluster must have a unique name (use --name to set)") boot_context = bootstrap.Context.set_context(options) boot_context.ui_context = context boot_context.stage = stage boot_context.args = args + boot_context.cluster_is_running = utils.service_is_active("pacemaker.service") boot_context.type = "init" bootstrap.bootstrap_init(boot_context) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/crmsh/utils.py new/crmsh-4.3.0+20210507.bf02d791/crmsh/utils.py --- old/crmsh-4.3.0+20210416.49f489c2/crmsh/utils.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/crmsh/utils.py 2021-05-07 04:34:39.000000000 +0200 @@ -24,6 +24,7 @@ from . import term from . import parallax from .msg import common_warn, common_info, common_debug, common_err, err_buf +from .constants import SSH_OPTION class TerminateSubCommand(Exception): @@ -2652,7 +2653,7 @@ Common function to get stdout from cmd or raise exception """ if remote: - cmd = "ssh -o StrictHostKeyChecking=no root@{} \"{}\"".format(remote, cmd) + cmd = "ssh {} root@{} \"{}\"".format(SSH_OPTION, remote, cmd) rc, out, err = get_stdout_stderr(cmd) if rc != success_val: raise ValueError("Failed to run \"{}\": {}".format(cmd, err)) @@ -2675,6 +2676,14 @@ return re.search("No active resources", out) is None +def has_resource_configured(ra_type): + """ + Check if the RA configured + """ + out = get_stdout_or_raise_error("crm configure show") + return re.search(r' {} '.format(ra_type), out) is not None + + def check_all_nodes_reachable(): """ Check if all cluster nodes are reachable diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/test/features/bootstrap_sbd.feature new/crmsh-4.3.0+20210507.bf02d791/test/features/bootstrap_sbd.feature --- old/crmsh-4.3.0+20210416.49f489c2/test/features/bootstrap_sbd.feature 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/test/features/bootstrap_sbd.feature 2021-05-07 04:34:39.000000000 +0200 @@ -101,3 +101,38 @@ Then Cluster service is "started" on "hanode2" And Service "sbd" is "started" on "hanode2" And Resource "stonith:external/sbd" not configured + + @clean + Scenario: Configure sbd on running cluster via stage(bsc#1181906) + Given Cluster service is "stopped" on "hanode1" + Given Cluster service is "stopped" on "hanode2" + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + And Online nodes are "hanode1 hanode2" + When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1" + Then Service "sbd" is "started" on "hanode1" + And Service "sbd" is "started" on "hanode2" + And Resource "stonith-sbd" type "external/sbd" is "Started" + + @clean + Scenario: Configure sbd on running cluster via stage with ra running(bsc#1181906) + Given Cluster service is "stopped" on "hanode1" + Given Cluster service is "stopped" on "hanode2" + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + And Online nodes are "hanode1 hanode2" + When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1" + When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1" + Then Expected "WARNING: To start sbd.service, need to restart cluster service manually on each node" in stdout + Then Service "sbd" is "stopped" on "hanode1" + And Service "sbd" is "stopped" on "hanode2" + When Run "crm cluster restart" on "hanode1" + Then Service "sbd" is "started" on "hanode1" + When Run "crm cluster restart" on "hanode2" + Then Service "sbd" is "started" on "hanode2" + When Run "sleep 20" on "hanode1" + Then Resource "stonith-sbd" type "external/sbd" is "Started" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/test/unittests/test_bootstrap.py new/crmsh-4.3.0+20210507.bf02d791/test/unittests/test_bootstrap.py --- old/crmsh-4.3.0+20210416.49f489c2/test/unittests/test_bootstrap.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/test/unittests/test_bootstrap.py 2021-05-07 04:34:39.000000000 +0200 @@ -20,6 +20,130 @@ from crmsh import bootstrap from crmsh import corosync +from crmsh import constants + + +class TestContext(unittest.TestCase): + """ + Unitary tests for crmsh.bootstrap.Context + """ + + @classmethod + def setUpClass(cls): + """ + Global setUp. + """ + + def setUp(self): + """ + Test setUp. + """ + self.ctx_inst = bootstrap.Context() + + def tearDown(self): + """ + Test tearDown. + """ + + @classmethod + def tearDownClass(cls): + """ + Global tearDown. + """ + + def test_set_context(self): + options = mock.Mock(yes_to_all=True, ipv6=False) + ctx = self.ctx_inst.set_context(options) + self.assertEqual(ctx.yes_to_all, True) + self.assertEqual(ctx.ipv6, False) + + @mock.patch('crmsh.corosync.QDevice') + def test_initialize_qdevice_return(self, mock_qdevice): + self.ctx_inst.initialize_qdevice() + mock_qdevice.assert_not_called() + + @mock.patch('crmsh.corosync.QDevice') + def test_initialize_qdevice(self, mock_qdevice): + options = mock.Mock(qnetd_addr="node3", qdevice_port=123) + ctx = self.ctx_inst.set_context(options) + ctx.initialize_qdevice() + mock_qdevice.assert_called_once_with('node3', port=123, algo=None, tie_breaker=None, tls=None, cmds=None, mode=None) + + @mock.patch('crmsh.bootstrap.error') + def test_validate_sbd_option_error_together(self, mock_error): + mock_error.side_effect = SystemExit + options = mock.Mock(sbd_devices=["/dev/sda1"], diskless_sbd=True) + ctx = self.ctx_inst.set_context(options) + with self.assertRaises(SystemExit): + ctx._validate_sbd_option() + mock_error.assert_called_once_with("Can't use -s and -S options together") + + @mock.patch('crmsh.bootstrap.error') + def test_validate_sbd_option_error_sbd_stage_no_option(self, mock_error): + mock_error.side_effect = SystemExit + options = mock.Mock(stage="sbd", yes_to_all=True) + ctx = self.ctx_inst.set_context(options) + with self.assertRaises(SystemExit): + ctx._validate_sbd_option() + mock_error.assert_called_once_with("Stage sbd should specify sbd device by -s or diskless sbd by -S option") + + @mock.patch('crmsh.bootstrap.error') + @mock.patch('crmsh.utils.service_is_active') + def test_validate_sbd_option_error_sbd_stage_service(self, mock_active, mock_error): + mock_error.side_effect = SystemExit + options = mock.Mock(stage="sbd", diskless_sbd=True) + ctx = self.ctx_inst.set_context(options) + mock_active.return_value = True + with self.assertRaises(SystemExit): + ctx._validate_sbd_option() + mock_error.assert_called_once_with("Cannot configure stage sbd: sbd.service already running!") + mock_active.assert_called_once_with("sbd.service") + + @mock.patch('crmsh.utils.check_all_nodes_reachable') + @mock.patch('crmsh.utils.service_is_active') + def test_validate_sbd_option_error_sbd_stage(self, mock_active, mock_check_all): + options = mock.Mock(stage="sbd", diskless_sbd=True, cluster_is_running=True) + ctx = self.ctx_inst.set_context(options) + mock_active.return_value = False + ctx._validate_sbd_option() + mock_active.assert_called_once_with("sbd.service") + mock_check_all.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.error') + def test_validate_option_error_nic_number(self, mock_error): + mock_error.side_effect = SystemExit + options = mock.Mock(nic_list=["eth1", "eth2", "eth3"]) + ctx = self.ctx_inst.set_context(options) + with self.assertRaises(SystemExit): + ctx.validate_option() + mock_error.assert_called_once_with("Maximum number of interface is 2") + + @mock.patch('crmsh.bootstrap.error') + def test_validate_option_error_nic_dup(self, mock_error): + mock_error.side_effect = SystemExit + options = mock.Mock(nic_list=["eth2", "eth2"]) + ctx = self.ctx_inst.set_context(options) + with self.assertRaises(SystemExit): + ctx.validate_option() + mock_error.assert_called_once_with("Duplicated input") + + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.bootstrap.Validation.valid_admin_ip') + def test_validate_option(self, mock_admin_ip, mock_warn): + options = mock.Mock(admin_ip="10.10.10.123", qdevice_inst=mock.Mock()) + ctx = self.ctx_inst.set_context(options) + ctx._validate_sbd_option = mock.Mock() + ctx.validate_option() + mock_admin_ip.assert_called_once_with("10.10.10.123") + ctx.qdevice_inst.valid_attr.assert_called_once_with() + ctx._validate_sbd_option.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.SBDManager') + def test_init_sbd_manager(self, mock_sbd): + options = mock.Mock(sbd_devices=["/dev/sda1"], diskless_sbd=False) + ctx = self.ctx_inst.set_context(options) + ctx.init_sbd_manager() + mock_sbd.assert_called_once_with(["/dev/sda1"], False) class TestWatchdog(unittest.TestCase): @@ -157,7 +281,7 @@ def test_get_driver_through_device_remotely_error(self, mock_run, mock_error): mock_run.return_value = (1, None, "error") self.watchdog_join_inst._get_driver_through_device_remotely("test") - mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no root@node1 sbd query-watchdog") + mock_run.assert_called_once_with("ssh {} root@node1 sbd query-watchdog".format(constants.SSH_OPTION)) mock_error.assert_called_once_with("Failed to run sbd query-watchdog remotely: error") @mock.patch('crmsh.utils.get_stdout_stderr') @@ -165,7 +289,7 @@ mock_run.return_value = (0, "data", None) res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog") self.assertEqual(res, None) - mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no root@node1 sbd query-watchdog") + mock_run.assert_called_once_with("ssh {} root@node1 sbd query-watchdog".format(constants.SSH_OPTION)) @mock.patch('crmsh.utils.get_stdout_stderr') def test_get_driver_through_device_remotely(self, mock_run): @@ -189,7 +313,7 @@ mock_run.return_value = (0, output, None) res = self.watchdog_join_inst._get_driver_through_device_remotely("/dev/watchdog") self.assertEqual(res, "softdog") - mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no root@node1 sbd query-watchdog") + mock_run.assert_called_once_with("ssh {} root@node1 sbd query-watchdog".format(constants.SSH_OPTION)) def test_get_first_unused_device_none(self): res = self.watchdog_inst._get_first_unused_device() @@ -563,6 +687,22 @@ mock_parse_inst.get.assert_called_once_with("SBD_DEVICE") mock_split.assert_called_once_with(bootstrap.SBDManager.PARSE_RE, "/dev/sdb1;/dev/sdc1") + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.utils.get_quorum_votes_dict') + def test_warn_diskless_sbd_diskless(self, mock_vote, mock_warn): + bootstrap._context = mock.Mock(cluster_is_running=False) + self.sbd_inst_diskless._warn_diskless_sbd() + mock_vote.assert_not_called() + mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING) + + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.utils.get_quorum_votes_dict') + def test_warn_diskless_sbd_peer(self, mock_vote, mock_warn): + mock_vote.return_value = {'Expected': '1'} + self.sbd_inst_diskless._warn_diskless_sbd("node2") + mock_vote.assert_called_once_with("node2") + mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING) + @mock.patch('crmsh.utils.package_is_installed') def test_sbd_init_not_installed(self, mock_package): mock_package.return_value = False @@ -570,14 +710,13 @@ mock_package.assert_called_once_with("sbd") @mock.patch('crmsh.bootstrap.invoke') - @mock.patch('crmsh.bootstrap.status_done') @mock.patch('crmsh.bootstrap.SBDManager._update_configuration') @mock.patch('crmsh.bootstrap.SBDManager._initialize_sbd') @mock.patch('crmsh.bootstrap.status_long') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device') @mock.patch('crmsh.bootstrap.Watchdog') @mock.patch('crmsh.utils.package_is_installed') - def test_sbd_init_return(self, mock_package, mock_watchdog, mock_get_device, mock_status, mock_initialize, mock_update, mock_status_done, mock_invoke): + def test_sbd_init_return(self, mock_package, mock_watchdog, mock_get_device, mock_status, mock_initialize, mock_update, mock_invoke): mock_package.return_value = True self.sbd_inst._sbd_devices = None self.sbd_inst.diskless_sbd = False @@ -592,20 +731,20 @@ mock_status.assert_not_called() mock_initialize.assert_not_called() mock_update.assert_not_called() - mock_status_done.assert_not_called() mock_watchdog.assert_called_once_with(_input=None) mock_watchdog_inst.init_watchdog.assert_called_once_with() mock_invoke.assert_called_once_with("systemctl disable sbd.service") - - @mock.patch('crmsh.bootstrap.warn') - @mock.patch('crmsh.bootstrap.status_done') + + @mock.patch('crmsh.bootstrap.SBDManager._determine_stonith_watchdog_timeout') + @mock.patch('crmsh.bootstrap.SBDManager._enable_sbd_service') + @mock.patch('crmsh.bootstrap.SBDManager._warn_diskless_sbd') @mock.patch('crmsh.bootstrap.SBDManager._update_configuration') @mock.patch('crmsh.bootstrap.SBDManager._initialize_sbd') @mock.patch('crmsh.bootstrap.status_long') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device') @mock.patch('crmsh.bootstrap.Watchdog') @mock.patch('crmsh.utils.package_is_installed') - def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, mock_status, mock_initialize, mock_update, mock_status_done, mock_warn): + def test_sbd_init(self, mock_package, mock_watchdog, mock_get_device, mock_status, mock_initialize, mock_update, mock_warn, mock_enable_sbd, mock_determine): bootstrap._context = mock.Mock(watchdog=None) mock_package.return_value = True mock_watchdog_inst = mock.Mock() @@ -618,10 +757,78 @@ mock_status.assert_called_once_with("Initializing diskless SBD...") mock_initialize.assert_called_once_with() mock_update.assert_called_once_with() - mock_status_done.assert_called_once_with() mock_watchdog.assert_called_once_with(_input=None) mock_watchdog_inst.init_watchdog.assert_called_once_with() - mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING) + mock_warn.assert_called_once_with() + mock_enable_sbd.assert_called_once_with() + mock_determine.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.SBDManager.configure_sbd_resource') + @mock.patch('crmsh.bootstrap.wait_for_cluster') + @mock.patch('crmsh.utils.cluster_run_cmd') + @mock.patch('crmsh.bootstrap.status') + @mock.patch('crmsh.utils.has_resource_running') + def test_restart_cluster_on_needed_no_ra_running(self, mock_ra_running, mock_status, mock_cluster_run, mock_wait, mock_config_sbd_ra): + mock_ra_running.return_value = False + self.sbd_inst._restart_cluster_and_configure_sbd_ra() + mock_status.assert_called_once_with("Restarting cluster service") + mock_cluster_run.assert_called_once_with("crm cluster restart") + mock_wait.assert_called_once_with() + mock_config_sbd_ra.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.utils.has_resource_running') + def test_restart_cluster_on_needed_diskless(self, mock_ra_running, mock_warn): + mock_ra_running.return_value = True + self.sbd_inst_diskless._restart_cluster_and_configure_sbd_ra() + mock_warn.assert_has_calls([ + mock.call("To start sbd.service, need to restart cluster service manually on each node"), + mock.call("Then run \"crm configure property stonith-enabled=true stonith-watchdog-timeout=10s\" on any node") + ]) + + @mock.patch('crmsh.bootstrap.SBDManager.configure_sbd_resource') + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.utils.has_resource_running') + def test_restart_cluster_on_needed(self, mock_ra_running, mock_warn, mock_config_sbd_ra): + mock_ra_running.return_value = True + self.sbd_inst._restart_cluster_and_configure_sbd_ra() + mock_warn.assert_has_calls([ + mock.call("To start sbd.service, need to restart cluster service manually on each node"), + ]) + mock_config_sbd_ra.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.invoke') + def test_enable_sbd_service_init(self, mock_invoke): + bootstrap._context = mock.Mock(cluster_is_running=False) + self.sbd_inst._enable_sbd_service() + mock_invoke.assert_called_once_with("systemctl enable sbd.service") + + @mock.patch('crmsh.bootstrap.SBDManager._restart_cluster_and_configure_sbd_ra') + @mock.patch('crmsh.utils.cluster_run_cmd') + def test_enable_sbd_service_restart(self, mock_cluster_run, mock_restart): + bootstrap._context = mock.Mock(cluster_is_running=True) + self.sbd_inst._enable_sbd_service() + mock_cluster_run.assert_has_calls([ + mock.call("systemctl enable sbd.service"), + ]) + mock_restart.assert_called_once_with() + + @mock.patch('crmsh.bootstrap.warn') + @mock.patch('crmsh.bootstrap.SBDManager.configure_sbd_resource') + @mock.patch('crmsh.utils.has_resource_running') + @mock.patch('crmsh.utils.cluster_run_cmd') + def test_enable_sbd_service(self, mock_cluster_run, mock_ra_running, mock_config_sbd_ra, mock_warn): + bootstrap._context = mock.Mock(cluster_is_running=True) + mock_ra_running.return_value = True + + self.sbd_inst._enable_sbd_service() + + mock_cluster_run.assert_has_calls([ + mock.call("systemctl enable sbd.service"), + ]) + mock_ra_running.assert_called_once_with() + mock_config_sbd_ra.assert_called_once_with() + mock_warn.assert_called_once_with("To start sbd.service, need to restart cluster service manually on each node") @mock.patch('crmsh.utils.package_is_installed') def test_configure_sbd_resource_not_installed(self, mock_package): @@ -632,11 +839,13 @@ @mock.patch('crmsh.bootstrap.error') @mock.patch('crmsh.bootstrap.invokerc') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config') + @mock.patch('crmsh.utils.has_resource_configured') @mock.patch('crmsh.utils.service_is_enabled') @mock.patch('crmsh.utils.package_is_installed') - def test_configure_sbd_resource_error_primitive(self, mock_package, mock_enabled, mock_get_device, mock_invoke, mock_error): + def test_configure_sbd_resource_error_primitive(self, mock_package, mock_enabled, mock_ra_configured, mock_get_device, mock_invoke, mock_error): mock_package.return_value = True mock_enabled.return_value = True + mock_ra_configured.return_value = False mock_get_device.return_value = ["/dev/sdb1"] mock_invoke.return_value = False mock_error.side_effect = ValueError @@ -646,6 +855,7 @@ mock_package.assert_called_once_with("sbd") mock_enabled.assert_called_once_with("sbd.service") + mock_ra_configured.assert_called_once_with("stonith:external/sbd") mock_get_device.assert_called_once_with() mock_invoke.assert_called_once_with("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s") mock_error.assert_called_once_with("Can't create stonith-sbd primitive") @@ -653,11 +863,13 @@ @mock.patch('crmsh.bootstrap.error') @mock.patch('crmsh.bootstrap.invokerc') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config') + @mock.patch('crmsh.utils.has_resource_configured') @mock.patch('crmsh.utils.service_is_enabled') @mock.patch('crmsh.utils.package_is_installed') - def test_configure_sbd_resource_error_property(self, mock_package, mock_enabled, mock_get_device, mock_invoke, mock_error): + def test_configure_sbd_resource_error_property(self, mock_package, mock_enabled, mock_ra_configured, mock_get_device, mock_invoke, mock_error): mock_package.return_value = True mock_enabled.return_value = True + mock_ra_configured.return_value = False mock_get_device.return_value = ["/dev/sdb1"] mock_invoke.side_effect = [True, False] mock_error.side_effect = ValueError @@ -667,6 +879,7 @@ mock_package.assert_called_once_with("sbd") mock_enabled.assert_called_once_with("sbd.service") + mock_ra_configured.assert_called_once_with("stonith:external/sbd") mock_get_device.assert_called_once_with() mock_invoke.assert_has_calls([ mock.call("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s"), @@ -677,11 +890,13 @@ @mock.patch('crmsh.bootstrap.error') @mock.patch('crmsh.bootstrap.invokerc') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config') + @mock.patch('crmsh.utils.has_resource_configured') @mock.patch('crmsh.utils.service_is_enabled') @mock.patch('crmsh.utils.package_is_installed') - def test_configure_sbd_resource_diskless(self, mock_package, mock_enabled, mock_get_device, mock_invoke, mock_error): + def test_configure_sbd_resource_diskless(self, mock_package, mock_enabled, mock_ra_configured, mock_get_device, mock_invoke, mock_error): mock_package.return_value = True mock_enabled.return_value = True + mock_ra_configured.return_value = False mock_get_device.return_value = None mock_invoke.return_value = False mock_error.side_effect = ValueError @@ -692,8 +907,9 @@ mock_package.assert_called_once_with("sbd") mock_enabled.assert_called_once_with("sbd.service") mock_get_device.assert_called_once_with() - mock_invoke.assert_called_once_with("crm configure property stonith-enabled=true stonith-watchdog-timeout=5s") + mock_invoke.assert_called_once_with("crm configure property stonith-enabled=true stonith-watchdog-timeout=10s") mock_error.assert_called_once_with("Can't enable STONITH for diskless SBD") + mock_ra_configured.assert_called_once_with("stonith:external/sbd") @mock.patch('crmsh.utils.package_is_installed') def test_join_sbd_config_not_installed(self, mock_package): @@ -758,15 +974,14 @@ mock_watchdog_inst.join_watchdog.assert_called_once_with() @mock.patch('crmsh.bootstrap.status') - @mock.patch('crmsh.bootstrap.warn') - @mock.patch('crmsh.utils.get_quorum_votes_dict') + @mock.patch('crmsh.bootstrap.SBDManager._warn_diskless_sbd') @mock.patch('crmsh.bootstrap.SBDManager._get_sbd_device_from_config') @mock.patch('crmsh.bootstrap.Watchdog') @mock.patch('crmsh.bootstrap.invoke') @mock.patch('crmsh.utils.service_is_enabled') @mock.patch('os.path.exists') @mock.patch('crmsh.utils.package_is_installed') - def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_quorum_votes, mock_warn, mock_status): + def test_join_sbd_diskless(self, mock_package, mock_exists, mock_enabled, mock_invoke, mock_watchdog, mock_get_device, mock_warn, mock_status): mock_package.return_value = True mock_exists.return_value = True mock_enabled.return_value = True @@ -774,7 +989,6 @@ mock_watchdog_inst = mock.Mock() mock_watchdog.return_value = mock_watchdog_inst mock_watchdog_inst.join_watchdog = mock.Mock() - mock_quorum_votes.return_value = {'Expected': '1', 'Total': '1'} self.sbd_inst.join_sbd("node1") @@ -782,8 +996,7 @@ mock_exists.assert_called_once_with("/etc/sysconfig/sbd") mock_invoke.assert_called_once_with("systemctl enable sbd.service") mock_get_device.assert_called_once_with() - mock_quorum_votes.assert_called_once_with("node1") - mock_warn.assert_called_once_with(bootstrap.SBDManager.DISKLESS_SBD_WARNING) + mock_warn.assert_called_once_with("node1") mock_enabled.assert_called_once_with("sbd.service", "node1") mock_status.assert_called_once_with("Got diskless SBD configuration") mock_watchdog.assert_called_once_with(peer_host="node1") @@ -855,6 +1068,31 @@ self.assertEqual(res, "a2e9a92c-cc72-4ef9-ac55-ccc342f3546b") mock_run.assert_called_once_with("sbd -d /dev/sda1 dump") + @mock.patch('crmsh.utils.parse_sysconfig') + def test_determine_watchdog_timeout(self, mock_parse): + mock_parse_inst = mock.Mock() + mock_parse.return_value = mock_parse_inst + mock_parse_inst.get.return_value = "5" + self.sbd_inst._determine_stonith_watchdog_timeout() + assert self.sbd_inst._stonith_watchdog_timeout == -1 + mock_parse.assert_called_once_with(bootstrap.SYSCONFIG_SBD) + mock_parse_inst.get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT") + + @mock.patch('os.uname') + @mock.patch('crmsh.utils.parse_sysconfig') + def test_determine_watchdog_timeout_s390(self, mock_parse, mock_uname): + mock_parse_inst = mock.Mock() + mock_parse.return_value = mock_parse_inst + mock_parse_inst.get.return_value = None + mock_uname_inst = mock.Mock() + mock_uname.return_value = mock_uname_inst + mock_uname_inst.machine = "s390" + self.sbd_inst._determine_stonith_watchdog_timeout() + assert self.sbd_inst._stonith_watchdog_timeout == "30s" + mock_parse.assert_called_once_with(bootstrap.SYSCONFIG_SBD) + mock_parse_inst.get.assert_called_once_with("SBD_WATCHDOG_TIMEOUT") + mock_uname.assert_called_once_with() + class TestBootstrap(unittest.TestCase): """ @@ -1002,7 +1240,7 @@ mock_invoke.return_value = (False, None, "error") error_string = 'Failed to append contents of fromfile to node1:\n"error"\n\n crmsh has no way to help you to setup up passwordless ssh among nodes at this time. \n As the hint, likely, `PasswordAuthentication` is \'no\' in /etc/ssh/sshd_config. \n Given in this case, users must setup passwordless ssh beforehand, or change it to \'yes\' and manage passwords properly\n ' bootstrap.append_to_remote_file("fromfile", "node1", "tofile") - cmd = "cat fromfile | ssh -oStrictHostKeyChecking=no root@node1 'cat >> tofile'" + cmd = "cat fromfile | ssh {} root@node1 'cat >> tofile'".format(constants.SSH_OPTION) mock_invoke.assert_called_once_with(cmd) mock_error.assert_called_once_with(error_string) @@ -1015,10 +1253,10 @@ self.assertEqual("No ssh key exist on node1", str(err.exception)) mock_invoke.assert_has_calls([ - mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f /root/.ssh/id_rsa.pub'"), - mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f /root/.ssh/id_ecdsa.pub'"), - mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f /root/.ssh/id_ed25519.pub'"), - mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f /root/.ssh/id_dsa.pub'") + mock.call("ssh {} root@node1 'test -f /root/.ssh/id_rsa.pub'".format(constants.SSH_OPTION)), + mock.call("ssh {} root@node1 'test -f /root/.ssh/id_ecdsa.pub'".format(constants.SSH_OPTION)), + mock.call("ssh {} root@node1 'test -f /root/.ssh/id_ed25519.pub'".format(constants.SSH_OPTION)), + mock.call("ssh {} root@node1 'test -f /root/.ssh/id_dsa.pub'".format(constants.SSH_OPTION)) ]) @mock.patch('crmsh.tmpfiles.create') @@ -1032,8 +1270,8 @@ res = bootstrap.fetch_public_key_from_remote_node("node1") self.assertEqual(res, "temp_file_name") - mock_invokerc.assert_called_once_with("ssh -oStrictHostKeyChecking=no root@node1 'test -f /root/.ssh/id_rsa.pub'") - mock_invoke.assert_called_once_with("scp -oStrictHostKeyChecking=no root@node1:/root/.ssh/id_rsa.pub temp_file_name") + mock_invokerc.assert_called_once_with("ssh {} root@node1 'test -f /root/.ssh/id_rsa.pub'".format(constants.SSH_OPTION)) + mock_invoke.assert_called_once_with("scp -o StrictHostKeyChecking=no root@node1:/root/.ssh/id_rsa.pub temp_file_name") mock_tmpfile.assert_called_once_with() @mock.patch('crmsh.bootstrap.error') @@ -1063,7 +1301,7 @@ mock.call("node1", "root"), mock.call("node1", "hacluster") ]) - mock_invoke.assert_called_once_with("ssh root@node1 crm cluster init -i eth1 ssh_remote") + mock_invoke.assert_called_once_with("ssh {} root@node1 crm cluster init -i eth1 ssh_remote".format(constants.SSH_OPTION)) mock_error.assert_called_once_with("Can't invoke crm cluster init -i eth1 ssh_remote on node1: error") def test_swap_public_ssh_key_return(self): @@ -1115,7 +1353,7 @@ with self.assertRaises(SystemExit): bootstrap.setup_passwordless_with_other_nodes("node1") - mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no root@node1 crm_node -l") + mock_run.assert_called_once_with("ssh {} root@node1 crm_node -l".format(constants.SSH_OPTION)) mock_error.assert_called_once_with("Can't fetch cluster nodes list from node1: None") @mock.patch('crmsh.bootstrap.error') @@ -1133,8 +1371,8 @@ bootstrap.setup_passwordless_with_other_nodes("node1") mock_run.assert_has_calls([ - mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node -l"), - mock.call("ssh -o StrictHostKeyChecking=no root@node1 hostname") + mock.call("ssh {} root@node1 crm_node -l".format(constants.SSH_OPTION)), + mock.call("ssh {} root@node1 hostname".format(constants.SSH_OPTION)) ]) mock_error.assert_called_once_with("Can't fetch hostname of node1: None") @@ -1151,8 +1389,8 @@ bootstrap.setup_passwordless_with_other_nodes("node1") mock_run.assert_has_calls([ - mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node -l"), - mock.call("ssh -o StrictHostKeyChecking=no root@node1 hostname") + mock.call("ssh {} root@node1 crm_node -l".format(constants.SSH_OPTION)), + mock.call("ssh {} root@node1 hostname".format(constants.SSH_OPTION)) ]) mock_swap.assert_has_calls([ mock.call("node2", "root"), @@ -1199,7 +1437,7 @@ peer_node = bootstrap.get_cluster_node_hostname() assert peer_node == "Node1" - mock_stdout_stderr.assert_called_once_with("ssh node1 crm_node --name") + mock_stdout_stderr.assert_called_once_with("ssh {} node1 crm_node --name".format(constants.SSH_OPTION)) @mock.patch('crmsh.bootstrap.error') @mock.patch('crmsh.utils.get_stdout_stderr') @@ -1211,7 +1449,7 @@ with self.assertRaises(SystemExit): bootstrap.get_cluster_node_hostname() - mock_stdout_stderr.assert_called_once_with("ssh node2 crm_node --name") + mock_stdout_stderr.assert_called_once_with("ssh {} node2 crm_node --name".format(constants.SSH_OPTION)) mock_error.assert_called_once_with("error") @mock.patch('crmsh.utils.this_node') @@ -1390,7 +1628,6 @@ mock_qdevice_start.assert_called_once_with() @mock.patch('crmsh.bootstrap.start_qdevice_service') - @mock.patch('crmsh.bootstrap.status_done') @mock.patch('crmsh.corosync.QDevice.certificate_process_on_init') @mock.patch('crmsh.bootstrap.status_long') @mock.patch('crmsh.utils.is_qdevice_tls_on') @@ -1400,7 +1637,7 @@ @mock.patch('crmsh.utils.check_ssh_passwd_need') @mock.patch('crmsh.bootstrap.status') def test_init_qdevice(self, mock_status, mock_ssh, mock_qdevice_configured, mock_valid_qnetd, mock_config_qdevice, - mock_tls, mock_status_long, mock_certificate, mock_status_done, mock_start_qdevice): + mock_tls, mock_status_long, mock_certificate, mock_start_qdevice): bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip) mock_ssh.return_value = False mock_qdevice_configured.return_value = False @@ -1416,7 +1653,6 @@ mock_tls.assert_called_once_with() mock_status_long.assert_called_once_with("Qdevice certification process") mock_certificate.assert_called_once_with() - mock_status_done.assert_called_once_with() mock_start_qdevice.assert_called_once_with() @mock.patch('crmsh.bootstrap.prompt_for_string') @@ -1532,7 +1768,6 @@ mock_enable_qnetd.assert_called_once_with() mock_start_qnetd.assert_called_once_with() - @mock.patch('crmsh.bootstrap.status_done') @mock.patch('crmsh.utils.cluster_run_cmd') @mock.patch('crmsh.bootstrap.update_expected_votes') @mock.patch('crmsh.bootstrap.status_long') @@ -1541,7 +1776,7 @@ @mock.patch('crmsh.corosync.QDevice.write_qdevice_config') @mock.patch('crmsh.corosync.QDevice.remove_qdevice_db') def test_config_qdevice(self, mock_remove_qdevice_db, mock_write_qdevice_config, mock_is_unicast, - mock_add_nodelist, mock_status_long, mock_update_votes, mock_cluster_run, mock_status_done): + mock_add_nodelist, mock_status_long, mock_update_votes, mock_cluster_run): bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, qdevice_reload_policy=bootstrap.QdevicePolicy.QDEVICE_RELOAD) mock_is_unicast.return_value = False @@ -1554,7 +1789,6 @@ mock_status_long.assert_called_once_with("Update configuration") mock_update_votes.assert_called_once_with() mock_cluster_run.assert_called_once_with("crm corosync reload") - mock_status_done.assert_called_once_with() @mock.patch('crmsh.bootstrap.error') @mock.patch('crmsh.utils.is_qdevice_configured') @@ -1579,7 +1813,6 @@ mock_qdevice_configured.assert_called_once_with() mock_confirm.assert_called_once_with("Removing QDevice service and configuration from cluster: Are you sure?") - @mock.patch('crmsh.bootstrap.status_done') @mock.patch('crmsh.bootstrap.update_expected_votes') @mock.patch('crmsh.corosync.QDevice') @mock.patch('crmsh.corosync.get_value') @@ -1591,7 +1824,7 @@ @mock.patch('crmsh.bootstrap.confirm') @mock.patch('crmsh.utils.is_qdevice_configured') def test_remove_qdevice_reload(self, mock_qdevice_configured, mock_confirm, mock_reachable, mock_evaluate, - mock_status, mock_invoke, mock_status_long, mock_get_value, mock_qdevice, mock_update_votes, mock_status_done): + mock_status, mock_invoke, mock_status_long, mock_get_value, mock_qdevice, mock_update_votes): mock_qdevice_configured.return_value = True mock_confirm.return_value = True mock_evaluate.return_value = bootstrap.QdevicePolicy.QDEVICE_RELOAD @@ -1622,9 +1855,7 @@ mock_qdevice_inst.remove_qdevice_config.assert_called_once_with() mock_qdevice_inst.remove_qdevice_db.assert_called_once_with() mock_update_votes.assert_called_once_with() - mock_status_done.assert_called_once_with() - @mock.patch('crmsh.bootstrap.status_done') @mock.patch('crmsh.utils.start_service') @mock.patch('crmsh.corosync.QDevice') @mock.patch('crmsh.corosync.get_value') @@ -1637,7 +1868,7 @@ @mock.patch('crmsh.bootstrap.status_long') def test_start_qdevice_on_join_node(self, mock_status_long, mock_is_unicast, mock_add_nodelist, mock_conf, mock_csync2_update, mock_invoke, mock_qdevice_tls, - mock_get_value, mock_qdevice, mock_start_service, mock_status_done): + mock_get_value, mock_qdevice, mock_start_service): mock_is_unicast.return_value = False mock_qdevice_tls.return_value = True mock_conf.return_value = "corosync.conf" @@ -1659,7 +1890,6 @@ mock_qdevice.assert_called_once_with("10.10.10.123", cluster_node="node2") mock_qdevice_inst.certificate_process_on_join.assert_called_once_with() mock_start_service.assert_called_once_with("corosync-qdevice.service", enable=True) - mock_status_done.assert_called_once_with() @mock.patch('crmsh.utils.get_stdout_stderr') @mock.patch('crmsh.bootstrap.log') @@ -2047,7 +2277,7 @@ bootstrap.remove_self() mock_list.assert_called_once_with(include_remote_nodes=False) - mock_ext.assert_called_once_with("ssh -o StrictHostKeyChecking=no node2 'crm cluster remove -y -c node1'") + mock_ext.assert_called_once_with("ssh {} node2 'crm cluster remove -y -c node1'".format(constants.SSH_OPTION)) mock_error.assert_called_once_with("Failed to remove this node from node2") @mock.patch('crmsh.bootstrap.error') @@ -2123,7 +2353,7 @@ mock_get_ip.assert_called_once_with() mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") - mock_invoke.assert_called_once_with('ssh -o StrictHostKeyChecking=no root@node1 "bash -c \\"rm -f file1 file2\\""') + mock_invoke.assert_called_once_with('ssh {} root@node1 "bash -c \\"rm -f file1 file2\\""'.format(constants.SSH_OPTION)) mock_error.assert_called_once_with("Deleting the configuration files failed: error") @mock.patch('crmsh.bootstrap.error') @@ -2144,7 +2374,7 @@ mock_get_ip.assert_called_once_with() mock_status.assert_called_once_with("Removing the node node1") mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") - mock_invoke.assert_called_once_with('ssh -o StrictHostKeyChecking=no root@node1 "bash -c \\"rm -f file1 file2\\""') + mock_invoke.assert_called_once_with('ssh {} root@node1 "bash -c \\"rm -f file1 file2\\""'.format(constants.SSH_OPTION)) mock_invokerc.assert_called_once_with("crm node delete node1") mock_error.assert_called_once_with("Failed to remove node1") @@ -2166,7 +2396,7 @@ mock_get_ip.assert_called_once_with() mock_status.assert_called_once_with("Removing the node node1") mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") - mock_invoke.assert_called_once_with('ssh -o StrictHostKeyChecking=no root@node1 "bash -c \\"rm -f file1 file2\\""') + mock_invoke.assert_called_once_with('ssh {} root@node1 "bash -c \\"rm -f file1 file2\\""'.format(constants.SSH_OPTION)) mock_invokerc.assert_has_calls([ mock.call('crm node delete node1'), mock.call("sed -i /node1/d {}".format(bootstrap.CSYNC2_CFG)) @@ -2199,7 +2429,7 @@ ]) mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") mock_invoke.assert_has_calls([ - mock.call('ssh -o StrictHostKeyChecking=no root@node1 "bash -c \\"rm -f file1 file2\\""'), + mock.call('ssh {} root@node1 "bash -c \\"rm -f file1 file2\\""'.format(constants.SSH_OPTION)), mock.call("corosync-cfgtool -R") ]) mock_invokerc.assert_has_calls([ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.3.0+20210416.49f489c2/test/unittests/test_utils.py new/crmsh-4.3.0+20210507.bf02d791/test/unittests/test_utils.py --- old/crmsh-4.3.0+20210416.49f489c2/test/unittests/test_utils.py 2021-04-16 17:29:55.000000000 +0200 +++ new/crmsh-4.3.0+20210507.bf02d791/test/unittests/test_utils.py 2021-05-07 04:34:39.000000000 +0200 @@ -1307,3 +1307,13 @@ def test_re_split_string(): assert utils.re_split_string('[; ]', "/dev/sda1; /dev/sdb1 ; ") == ["/dev/sda1", "/dev/sdb1"] assert utils.re_split_string('[; ]', "/dev/sda1 ") == ["/dev/sda1"] + + [email protected]("crmsh.utils.get_stdout_or_raise_error") +def test_has_resource_configured(mock_run): + mock_run.return_value = """ +primitive stonith-sbd stonith:external/sbd \ + params pcmk_delay_max=30s + """ + res = utils.has_resource_configured("stonith:external/sbd") + assert res is True
