Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2024-04-03 17:20:34 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.1905 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Wed Apr 3 17:20:34 2024 rev:328 rq:1164253 version:4.6.0+20240403.3ed4d839 Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2024-04-02 16:44:11.916979724 +0200 +++ /work/SRC/openSUSE:Factory/.crmsh.new.1905/crmsh.changes 2024-04-03 17:21:57.632914066 +0200 @@ -1,0 +2,20 @@ +Wed Apr 03 03:33:06 UTC 2024 - [email protected] + +- Update to version 4.6.0+20240403.3ed4d839: + * Dev: unittest: Adjust unit test for previous commit + * Dev: behave: Adjust functional test for previous commit + * Fix: ui_node: When `utils.list_cluster_nodes` return None, try to get ip list from corosync.conf + +------------------------------------------------------------------- +Mon Apr 01 08:00:47 UTC 2024 - [email protected] + +- Update to version 4.6.0+20240401.d4bf74c8: + * Dev: unittest: Adjust unit test for previous commit + * Dev: bootstrap: Add all nodes' keys to qnetd authroized_keys even init + * Fix: bootstrap: fix NameError + * Dev: qdevice: Refactor init_qdevice function + * Dev: qdevice: Add all nodes' keys to qnetd authorized_keys + * Fix: ui_node: prevent traceback on node online + * Fix: ui_node: prevent traceback on node standby + +------------------------------------------------------------------- Old: ---- crmsh-4.6.0+20240330.3473a5ba.tar.bz2 New: ---- crmsh-4.6.0+20240403.3ed4d839.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.uf6azD/_old 2024-04-03 17:21:58.200935023 +0200 +++ /var/tmp/diff_new_pack.uf6azD/_new 2024-04-03 17:21:58.200935023 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 4.6.0+20240330.3473a5ba +Version: 4.6.0+20240403.3ed4d839 Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.uf6azD/_old 2024-04-03 17:21:58.240936499 +0200 +++ /var/tmp/diff_new_pack.uf6azD/_new 2024-04-03 17:21:58.240936499 +0200 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">3473a5ba98249197c99ba63dd2a32b675493d6f9</param> + <param name="changesrevision">df8a7f1c1e6365ec13cd5ec1e7d6bbc7a1bd503d</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-4.6.0+20240330.3473a5ba.tar.bz2 -> crmsh-4.6.0+20240403.3ed4d839.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/crmsh/bootstrap.py new/crmsh-4.6.0+20240403.3ed4d839/crmsh/bootstrap.py --- old/crmsh-4.6.0+20240330.3473a5ba/crmsh/bootstrap.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/crmsh/bootstrap.py 2024-04-03 05:07:17.000000000 +0200 @@ -74,7 +74,7 @@ "/etc/drbd.conf", "/etc/drbd.d", "/etc/ha.d/ldirectord.cf", "/etc/lvm/lvm.conf", "/etc/multipath.conf", "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SYSCONFIG_SBD, PCMK_REMOTE_AUTH, WATCHDOG_CFG, PROFILES_FILE, CRM_CFG, SBD_SYSTEMD_DELAY_START_DIR) -INIT_STAGES = ("ssh", "csync2", "csync2_remote", "corosync", "remote_auth", "sbd", "cluster", "ocfs2", "admin", "qdevice") +INIT_STAGES = ("ssh", "csync2", "csync2_remote", "qnetd_remote", "corosync", "remote_auth", "sbd", "cluster", "ocfs2", "admin", "qdevice") class Context(object): @@ -802,7 +802,7 @@ try: keys = ssh_key.AgentClient().list() logger.info("Using public keys from ssh-agent...") - except Error: + except ssh_key.Error: logger.error("Cannot get a public key from ssh-agent.") raise return keys @@ -1178,6 +1178,18 @@ _context.quiet = was_quiet +def init_qnetd_remote(): + """ + Triggered by join_cluster, this function adds the joining node's key to the qnetd's authorized_keys + """ + local_user, remote_user, join_node = _select_user_pair_for_ssh_for_secondary_components(_context.cluster_node) + join_node_key_content = remote_public_key_from(remote_user, local_user, join_node, remote_user) + qnetd_host = corosync.get_value("quorum.device.net.host") + _, qnetd_user, qnetd_host = _select_user_pair_for_ssh_for_secondary_components(qnetd_host) + authorized_key_manager = ssh_key.AuthorizedKeyManager(sh.cluster_shell()) + authorized_key_manager.add(qnetd_host, qnetd_user, ssh_key.InMemoryPublicKey(join_node_key_content)) + + def init_corosync_auth(): """ Generate the corosync authkey @@ -1570,59 +1582,72 @@ is_stage=_context.stage == "qdevice") +def _setup_passwordless_ssh_for_qnetd(cluster_node_list: typing.List[str]): + local_user, qnetd_user, qnetd_addr = _select_user_pair_for_ssh_for_secondary_components(_context.qnetd_addr_input) + # Configure ssh passwordless to qnetd if detect password is needed + if UserOfHost.instance().use_ssh_agent(): + logger.info("Adding public keys to authorized_keys for user root...") + for key in ssh_key.AgentClient().list(): + ssh_key.AuthorizedKeyManager(sh.SSHShell( + sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')}), + 'root', + )).add(qnetd_addr, qnetd_user, key) + else: + if utils.check_ssh_passwd_need(local_user, qnetd_user, qnetd_addr): + if 0 != utils.ssh_copy_id_no_raise(local_user, qnetd_user, qnetd_addr): + msg = f"Failed to login to {qnetd_user}@{qnetd_addr}. Please check the credentials." + sudoer = userdir.get_sudoer() + if sudoer and qnetd_user != sudoer: + args = ['sudo crm'] + args += [x for x in sys.argv[1:]] + for i, arg in enumerate(args): + if arg == '--qnetd-hostname' and i + 1 < len(args): + if '@' not in args[i + 1]: + args[i + 1] = f'{sudoer}@{qnetd_addr}' + msg += '\nOr, run "{}".'.format(' '.join(args)) + raise ValueError(msg) + + cluster_shell = sh.cluster_shell() + # Add other nodes' public keys to qnetd's authorized_keys + for node in cluster_node_list: + if node == utils.this_node(): + continue + local_user, remote_user, node = _select_user_pair_for_ssh_for_secondary_components(node) + remote_key_content = remote_public_key_from(remote_user, local_user, node, remote_user) + in_memory_key = ssh_key.InMemoryPublicKey(remote_key_content) + ssh_key.AuthorizedKeyManager(cluster_shell).add(qnetd_addr, qnetd_user, in_memory_key) + + user_by_host = utils.HostUserConfig() + user_by_host.add(local_user, utils.this_node()) + user_by_host.add(qnetd_user, qnetd_addr) + user_by_host.save_remote(cluster_node_list) + + def init_qdevice(): """ Setup qdevice and qnetd service """ if not _context.qdevice_inst: configure_qdevice_interactive() - # If don't want to config qdevice, return if not _context.qdevice_inst: ServiceManager().disable_service("corosync-qdevice.service") return + logger.info("""Configure Qdevice/Qnetd:""") cluster_node_list = utils.list_cluster_nodes() for node in cluster_node_list: if not ServiceManager().service_is_available("corosync-qdevice.service", node): utils.fatal("corosync-qdevice.service is not available on {}".format(node)) + + _setup_passwordless_ssh_for_qnetd(cluster_node_list) + qdevice_inst = _context.qdevice_inst - local_user, ssh_user, qnetd_addr = _select_user_pair_for_ssh_for_secondary_components(_context.qnetd_addr_input) - # Configure ssh passwordless to qnetd if detect password is needed - if UserOfHost.instance().use_ssh_agent(): - logger.info("Adding public keys to authorized_keys for user root...") - for key in ssh_key.AgentClient().list(): - ssh_key.AuthorizedKeyManager(sh.SSHShell( - sh.LocalShell(additional_environ={'SSH_AUTH_SOCK': os.environ.get('SSH_AUTH_SOCK')}), - 'root', - )).add(qnetd_addr, ssh_user, key) - elif utils.check_ssh_passwd_need(local_user, ssh_user, qnetd_addr): - configure_ssh_key(local_user) - if 0 != utils.ssh_copy_id_no_raise(local_user, ssh_user, qnetd_addr): - msg = f"Failed to login to {ssh_user}@{qnetd_addr}. Please check the credentials." - sudoer = userdir.get_sudoer() - if sudoer and ssh_user != sudoer: - args = ['sudo crm'] - args += [x for x in sys.argv[1:]] - for i, arg in enumerate(args): - if arg == '--qnetd-hostname' and i + 1 < len(args): - if '@' not in args[i + 1]: - args[i + 1] = f'{sudoer}@{qnetd_addr}' - msg += '\nOr, run "{}".'.format(' '.join(args)) - raise ValueError(msg) - user_by_host = utils.HostUserConfig() - user_by_host.add(local_user, utils.this_node()) - user_by_host.add(ssh_user, qnetd_addr) - user_by_host.save_remote(cluster_node_list) - # Start qdevice service if qdevice already configured if utils.is_qdevice_configured() and not confirm("Qdevice is already configured - overwrite?"): qdevice_inst.start_qdevice_service() return qdevice_inst.set_cluster_name() - # Validate qnetd node qdevice_inst.valid_qnetd() - qdevice_inst.config_and_start_qdevice() - if _context.stage == "qdevice": adjust_properties() @@ -2032,6 +2057,13 @@ if is_qdevice_configured and not ServiceManager().service_is_available("corosync-qdevice.service"): utils.fatal("corosync-qdevice.service is not available") + shell = sh.cluster_shell() + + if is_qdevice_configured and not _context.use_ssh_agent: + # trigger init_qnetd_remote on init node + cmd = f"crm cluster init qnetd_remote {utils.this_node()} -y" + shell.get_stdout_or_raise_error(cmd, seed_host) + shutil.copy(corosync.conf(), COROSYNC_CONF_ORIG) # check if use IPv6 @@ -2055,7 +2087,6 @@ # mountpoints for clustered filesystems. Unfortunately we don't have # that yet, so the following crawling horror takes a punt on the seed # node being up, then asks it for a list of mountpoints... - shell = sh.cluster_shell() if seed_host: _rc, outp, _ = shell.get_rc_stdout_stderr_without_input(seed_host, "cibadmin -Q --xpath \"//primitive\"") if outp: @@ -2357,7 +2388,7 @@ elif stage == "": if _context.cluster_is_running: utils.fatal("Cluster is currently active - can't run") - elif stage not in ("ssh", "csync2", "csync2_remote", "sbd", "ocfs2"): + elif stage not in ("ssh", "csync2", "csync2_remote", "qnetd_remote", "sbd", "ocfs2"): if _context.cluster_is_running: utils.fatal("Cluster is currently active - can't run %s stage" % (stage)) @@ -2365,15 +2396,15 @@ _context.init_sbd_manager() # Need hostname resolution to work, want NTP (but don't block csync2_remote) - if stage not in ('csync2_remote',): + if stage not in ('csync2_remote', 'qnetd_remote'): check_tty() if not check_prereqs(stage): return - elif stage == 'csync2_remote': + else: args = _context.args logger_utils.log_only_to_file("args: {}".format(args)) if len(args) != 2: - utils.fatal("Expected NODE argument to csync2_remote") + utils.fatal(f"Expected NODE argument to {stage} stage") _context.cluster_node = args[1] if stage and _context.cluster_is_running and \ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/crmsh/corosync.py new/crmsh-4.6.0+20240403.3ed4d839/crmsh/corosync.py --- old/crmsh-4.6.0+20240330.3473a5ba/crmsh/corosync.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/crmsh/corosync.py 2024-04-03 05:07:17.000000000 +0200 @@ -105,12 +105,6 @@ if not qnetd_addr: raise ValueError("host for qnetd not configured!") - # Configure ssh passwordless to qnetd if detect password is needed - local_user, remote_user = utils.user_pair_for_ssh(qnetd_addr) - if utils.check_ssh_passwd_need(local_user, remote_user, qnetd_addr): - crmsh.bootstrap.configure_ssh_key(local_user) - utils.ssh_copy_id(local_user, remote_user, qnetd_addr) - cmd = "corosync-qnetd-tool -lv -c {}".format(cluster_name) result = parallax.parallax_call([qnetd_addr], cmd) _, qnetd_result_stdout, _ = result[0][1] diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/crmsh/ssh_key.py new/crmsh-4.6.0+20240403.3ed4d839/crmsh/ssh_key.py --- old/crmsh-4.6.0+20240330.3473a5ba/crmsh/ssh_key.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/crmsh/ssh_key.py 2024-04-03 05:07:17.000000000 +0200 @@ -74,7 +74,7 @@ class InMemoryPublicKey(Key): def __init__(self, content: str): - self.content = content + self.content = content.strip() def public_key(self) -> str: return self.content diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/crmsh/ui_node.py new/crmsh-4.6.0+20240403.3ed4d839/crmsh/ui_node.py --- old/crmsh-4.6.0+20240330.3473a5ba/crmsh/ui_node.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/crmsh/ui_node.py 2024-04-03 05:07:17.000000000 +0200 @@ -246,9 +246,12 @@ # return local node if (not options.all and not args) or (len(args) == 1 and args[0] == utils.this_node()): return [utils.this_node()] - member_list = utils.list_cluster_nodes() + member_list = utils.list_cluster_nodes() or utils.get_address_list_from_corosync_conf() if not member_list: context.fatal_error("Cannot get the node list from cluster") + for node in args: + if node not in member_list: + context.fatal_error(f"Node '{node}' is not a member of the cluster") node_list = member_list if options.all else args for node in node_list: @@ -375,6 +378,8 @@ xml_query_path_oppsite = constants.XML_NODE_QUERY_STANDBY_PATH cib = xmlutil.cibdump2elem() + if cib is None: + return False # IMPORTANT: # Do NOT call cibdump2elem twice, or you risk a race where the # resulting diff will contain more changes than the values for @@ -433,6 +438,8 @@ return cib = xmlutil.cibdump2elem() + if cib is None: + return False # IMPORTANT: Do NOT call cibdump2elem twice, or you risk a race. # Really use the same xml as "original" and basis for the changes. # Thus the "deepcopy" here; see also do_standby(). diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/crmsh/utils.py new/crmsh-4.6.0+20240403.3ed4d839/crmsh/utils.py --- old/crmsh-4.6.0+20240330.3473a5ba/crmsh/utils.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/crmsh/utils.py 2024-04-03 05:07:17.000000000 +0200 @@ -1812,10 +1812,10 @@ else: cib_path = os.getenv('CIB_file', constants.CIB_RAW_FILE) if not os.path.isfile(cib_path): - return get_address_list_from_corosync_conf() + return None cib = xmlutil.file2cib_elem(cib_path) if cib is None: - return get_address_list_from_corosync_conf() + return None node_list = [] for node in cib.xpath(constants.XML_NODE_PATH): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/test/features/bootstrap_bugs.feature new/crmsh-4.6.0+20240403.3ed4d839/test/features/bootstrap_bugs.feature --- old/crmsh-4.6.0+20240330.3473a5ba/test/features/bootstrap_bugs.feature 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/test/features/bootstrap_bugs.feature 2024-04-03 05:07:17.000000000 +0200 @@ -151,6 +151,9 @@ Then Cluster service is "started" on "hanode1" Then Cluster service is "started" on "hanode2" + When Try "crm cluster start xxx" + Then Except "ERROR: cluster.start: Node 'xxx' is not a member of the cluster" + @clean Scenario: Can't stop all nodes' cluster service when local node's service is down(bsc#1213889) Given Cluster service is "stopped" on "hanode1" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/test/features/crm_report_normal.feature new/crmsh-4.6.0+20240403.3ed4d839/test/features/crm_report_normal.feature --- old/crmsh-4.6.0+20240330.3473a5ba/test/features/crm_report_normal.feature 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/test/features/crm_report_normal.feature 2024-04-03 05:07:17.000000000 +0200 @@ -105,4 +105,5 @@ When Run "crm cluster stop --all" on "hanode1" When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode1" When Run "rm -f /var/lib/pacemaker/cib/cib*" on "hanode2" - When Run "crm report" OK + When Try "crm report" on "hanode1" + Then Expected "Could not figure out a list of nodes; is this a cluster node" in stderr diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_bootstrap.py new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_bootstrap.py --- old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_bootstrap.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_bootstrap.py 2024-04-03 05:07:17.000000000 +0200 @@ -914,6 +914,75 @@ mock_interfaces_inst.get_default_nic_list_from_route.assert_called_once_with() mock_interfaces_inst.get_default_ip_list.assert_called_once_with() + @mock.patch('crmsh.utils.HostUserConfig') + @mock.patch('crmsh.ssh_key.AuthorizedKeyManager') + @mock.patch('crmsh.sh.cluster_shell') + @mock.patch('crmsh.ssh_key.InMemoryPublicKey') + @mock.patch('crmsh.bootstrap.remote_public_key_from') + @mock.patch('crmsh.utils.this_node') + @mock.patch('crmsh.utils.ssh_copy_id_no_raise') + @mock.patch('crmsh.utils.check_ssh_passwd_need') + @mock.patch('crmsh.bootstrap.UserOfHost.instance') + @mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components') + def test_setup_passwordless_ssh_for_qnetd_add_keys(self, mock_select, mock_user_of_host, mock_check_passwd, mock_ssh_copy_id, mock_this_node, mock_remote_public_key_from, mock_in_memory_public_key, mock_cluster_shell, mock_authorized_key_manager, mock_host_user_config_class): + bootstrap._context = mock.Mock(qnetd_addr_input="user@qnetd-node") + mock_select.side_effect = [("bob", "bob", "qnetd-node"), ("bob", "bob", "node2")] + mock_user_of_host_instance = mock.Mock() + mock_user_of_host.return_value = mock_user_of_host_instance + mock_user_of_host_instance.use_ssh_agent.return_value = False + mock_check_passwd.return_value = True + mock_ssh_copy_id.return_value = 0 + mock_this_node.return_value = "node1" + mock_remote_public_key_from.return_value = "public_key" + mock_in_memory_public_key.return_value = "public_key" + mock_authorized_key_manager_instance = mock.Mock() + mock_authorized_key_manager.return_value = mock_authorized_key_manager_instance + mock_host_user_config_instance = mock.Mock() + mock_host_user_config_class.return_value = mock_host_user_config_instance + + bootstrap._setup_passwordless_ssh_for_qnetd(["node1", "node2"]) + + mock_select.assert_has_calls([ + mock.call(bootstrap._context.qnetd_addr_input), + mock.call('node2') + ]) + + @mock.patch('crmsh.utils.this_node') + @mock.patch('crmsh.utils.HostUserConfig') + @mock.patch('os.environ.get') + @mock.patch('crmsh.sh.SSHShell') + @mock.patch('crmsh.sh.LocalShell') + @mock.patch('crmsh.ssh_key.AuthorizedKeyManager') + @mock.patch('crmsh.ssh_key.AgentClient') + @mock.patch('logging.Logger.info') + @mock.patch('crmsh.bootstrap.UserOfHost.instance') + @mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components') + def test_setup_passwordless_ssh_for_qnetd_ssh_agent(self, mock_select, mock_user_of_host, mock_info, mock_agent, mock_authorized_key_manager, mock_local_shell, mock_ssh_shell, mock_get, mock_host_user_config_class, mock_this_node): + bootstrap._context = mock.Mock(qnetd_addr_input="user@qnetd-node") + mock_select.return_value = ("bob", "bob", "qnetd-node") + mock_user_of_host_instance = mock.Mock() + mock_user_of_host.return_value = mock_user_of_host_instance + mock_user_of_host_instance.use_ssh_agent.return_value = True + mock_agent_instance = mock.Mock() + mock_agent.return_value = mock_agent_instance + key_in_memory = mock.MagicMock(crmsh.ssh_key.InMemoryPublicKey) + mock_agent_instance.list.return_value = [key_in_memory] + mock_authorized_key_manager_instance = mock.Mock() + mock_authorized_key_manager.return_value = mock_authorized_key_manager_instance + mock_get.return_value = "/ssh-agent-path" + mock_local_shell_instance = mock.Mock() + mock_local_shell.return_value = mock_local_shell_instance + mock_ssh_shell_instance = mock.Mock() + mock_ssh_shell.return_value = mock_ssh_shell_instance + mock_this_node.return_value = "node1" + mock_host_uesr_config_instance = mock.Mock() + mock_host_user_config_class.return_value = mock_host_uesr_config_instance + + bootstrap._setup_passwordless_ssh_for_qnetd(["node1", "node2"]) + + mock_info.assert_called_once_with("Adding public keys to authorized_keys for user root...") + mock_authorized_key_manager_instance.add.assert_called_once_with("qnetd-node", "bob", key_in_memory) + @mock.patch('crmsh.service_manager.ServiceManager.disable_service') @mock.patch('logging.Logger.info') def test_init_qdevice_no_config(self, mock_status, mock_disable): @@ -924,39 +993,6 @@ @mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components') @mock.patch('crmsh.utils.HostUserConfig') - @mock.patch('crmsh.user_of_host.UserOfHost.instance') - @mock.patch('crmsh.utils.list_cluster_nodes') - @mock.patch('crmsh.utils.ssh_copy_id_no_raise') - @mock.patch('crmsh.bootstrap.configure_ssh_key') - @mock.patch('crmsh.utils.check_ssh_passwd_need') - @mock.patch('logging.Logger.info') - def test_init_qdevice_copy_ssh_key_failed( - self, - mock_status, mock_check_ssh_passwd_need, - mock_configure_ssh_key, mock_ssh_copy_id, mock_list_nodes, mock_user_of_host, - mock_host_user_config_class, - mock_select_user_pair_for_ssh, - ): - mock_list_nodes.return_value = [] - bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob") - mock_check_ssh_passwd_need.return_value = True - mock_ssh_copy_id.return_value = 255 - mock_user_of_host.return_value = mock.MagicMock(crmsh.user_of_host.UserOfHost) - mock_user_of_host.return_value.use_ssh_agent.return_value = False - mock_select_user_pair_for_ssh.return_value = ("bob", "bob", 'qnetd-node') - - with self.assertRaises(ValueError): - bootstrap.init_qdevice() - - mock_status.assert_has_calls([ - mock.call("Configure Qdevice/Qnetd:"), - ]) - mock_check_ssh_passwd_need.assert_called_once_with("bob", "bob", "qnetd-node") - mock_configure_ssh_key.assert_called_once_with('bob') - mock_ssh_copy_id.assert_called_once_with('bob', 'bob', 'qnetd-node') - - @mock.patch('crmsh.bootstrap._select_user_pair_for_ssh_for_secondary_components') - @mock.patch('crmsh.utils.HostUserConfig') @mock.patch('crmsh.user_of_host.UserOfHost.instance') @mock.patch('crmsh.utils.list_cluster_nodes') @mock.patch('crmsh.bootstrap.confirm') diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_corosync.py new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_corosync.py --- old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_corosync.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_corosync.py 2024-04-03 05:07:17.000000000 +0200 @@ -179,18 +179,14 @@ @mock.patch('crmsh.utils.user_pair_for_ssh') @mock.patch("crmsh.parallax.parallax_call") [email protected]("crmsh.utils.ssh_copy_id") [email protected]('crmsh.bootstrap.configure_ssh_key') [email protected]("crmsh.utils.check_ssh_passwd_need") @mock.patch("crmsh.corosync.get_value") @mock.patch("crmsh.utils.is_qdevice_configured") def test_query_qnetd_status_copy_id_failed(mock_qdevice_configured, - mock_get_value, mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh): + mock_get_value, mock_parallax_call, mock_user_pair_for_ssh): mock_user_pair_for_ssh.return_value = "alice", "root" mock_parallax_call.side_effect = ValueError("Failed on 10.10.10.123: foo") mock_qdevice_configured.return_value = True mock_get_value.side_effect = ["hacluster", "10.10.10.123"] - mock_check_passwd.return_value = True with pytest.raises(ValueError) as err: corosync.query_qnetd_status() assert err.value.args[0] == "Failed on 10.10.10.123: foo" @@ -199,26 +195,19 @@ mock.call("totem.cluster_name"), mock.call("quorum.device.net.host") ]) - mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123") - mock_config_ssh_key.assert_called_once_with('alice') - mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123') @mock.patch('crmsh.utils.user_pair_for_ssh') @mock.patch("crmsh.utils.print_cluster_nodes") @mock.patch("crmsh.parallax.parallax_call") [email protected]("crmsh.utils.ssh_copy_id") [email protected]('crmsh.bootstrap.configure_ssh_key') [email protected]("crmsh.utils.check_ssh_passwd_need") @mock.patch("crmsh.corosync.get_value") @mock.patch("crmsh.utils.is_qdevice_configured") def test_query_qnetd_status_copy(mock_qdevice_configured, mock_get_value, - mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes, + mock_parallax_call, mock_print_nodes, mock_user_pair_for_ssh): mock_user_pair_for_ssh.return_value = "alice", "root" mock_qdevice_configured.return_value = True mock_get_value.side_effect = ["hacluster", "10.10.10.123"] - mock_check_passwd.return_value = True mock_parallax_call.return_value = [("node1", (0, "data", None)), ] corosync.query_qnetd_status() @@ -228,9 +217,6 @@ mock.call("totem.cluster_name"), mock.call("quorum.device.net.host") ]) - mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123") - mock_config_ssh_key.assert_called_once_with('alice') - mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123') mock_parallax_call.assert_called_once_with(["10.10.10.123"], "corosync-qnetd-tool -lv -c hacluster") mock_print_nodes.assert_called_once_with() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_utils.py new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_utils.py --- old/crmsh-4.6.0+20240330.3473a5ba/test/unittests/test_utils.py 2024-03-30 12:48:39.000000000 +0100 +++ new/crmsh-4.6.0+20240403.3ed4d839/test/unittests/test_utils.py 2024-04-03 05:07:17.000000000 +0200 @@ -1202,43 +1202,26 @@ mock_run_inst.get_stdout_or_raise_error.assert_called_once_with("corosync-quorumtool -s", None, success_exit_status={0, 2}) [email protected]('crmsh.utils.get_address_list_from_corosync_conf') @mock.patch('crmsh.utils.etree.fromstring') @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr') -def test_list_cluster_nodes_none(mock_run, mock_etree, mock_corosync): +def test_list_cluster_nodes_none(mock_run, mock_etree): mock_run.return_value = (0, "data", None) mock_etree.return_value = None - mock_corosync.return_value = ["node1", "node2"] res = utils.list_cluster_nodes() - assert res == ["node1", "node2"] + assert res is None mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False) mock_etree.assert_called_once_with("data") [email protected]('crmsh.utils.get_address_list_from_corosync_conf') [email protected]('crmsh.utils.etree.fromstring') [email protected]('crmsh.sh.ShellUtils.get_stdout_stderr') -def test_list_cluster_nodes_none_no_reg(mock_run, mock_etree, mock_corosync): - mock_run.return_value = (0, "data", None) - mock_etree.return_value = None - mock_corosync.return_value = ["node1", "node2"] - res = utils.list_cluster_nodes(no_reg=True) - assert res == ["node1", "node2"] - mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=True) - mock_etree.assert_called_once_with("data") - - [email protected]('crmsh.utils.get_address_list_from_corosync_conf') @mock.patch('os.path.isfile') @mock.patch('os.getenv') @mock.patch('crmsh.sh.ShellUtils.get_stdout_stderr') -def test_list_cluster_nodes_cib_not_exist(mock_run, mock_env, mock_isfile, mock_corosync): +def test_list_cluster_nodes_cib_not_exist(mock_run, mock_env, mock_isfile): mock_run.return_value = (1, None, None) mock_env.return_value = constants.CIB_RAW_FILE mock_isfile.return_value = False - mock_corosync.return_value = ["node1", "node2"] res = utils.list_cluster_nodes() - assert res == ["node1", "node2"] + assert res is None mock_run.assert_called_once_with(constants.CIB_QUERY, no_reg=False) mock_env.assert_called_once_with("CIB_file", constants.CIB_RAW_FILE) mock_isfile.assert_called_once_with(constants.CIB_RAW_FILE)
