Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2024-09-24 17:33:35 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.29891 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Tue Sep 24 17:33:35 2024 rev:344 rq:1202782 version:5.0.0+20240920.c4ce88eb Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2024-09-09 14:46:27.035705953 +0200 +++ /work/SRC/openSUSE:Factory/.crmsh.new.29891/crmsh.changes 2024-09-24 17:33:49.446455279 +0200 @@ -1,0 +2,15 @@ +Fri Sep 20 06:36:45 UTC 2024 - xli...@suse.com + +- Update to version 5.0.0+20240920.c4ce88eb: + * Fix: bootstrap: check is_nologin more robustly (bsc#1228251) + * Dev: unittests: Adjust unit test for previous commit + * Dev: completers: Reuse node completer for cluster remove and health + * Dev: bootstrap: Enhance log clarity during crm cluster remove process + +------------------------------------------------------------------- +Mon Sep 09 09:41:14 UTC 2024 - xli...@suse.com + +- Update to version 5.0.0+20240909.28abc7cb: + * Dev: utils: Catch PermissionError when reading files + +------------------------------------------------------------------- Old: ---- crmsh-5.0.0+20240909.1d497d0c.tar.bz2 New: ---- crmsh-5.0.0+20240920.c4ce88eb.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.yWae4V/_old 2024-09-24 17:33:50.106482909 +0200 +++ /var/tmp/diff_new_pack.yWae4V/_new 2024-09-24 17:33:50.110483077 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 5.0.0+20240909.1d497d0c +Version: 5.0.0+20240920.c4ce88eb Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.yWae4V/_old 2024-09-24 17:33:50.154484919 +0200 +++ /var/tmp/diff_new_pack.yWae4V/_new 2024-09-24 17:33:50.158485086 +0200 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">1d497d0c6115e4253c487dc2ec117adc9765f6cb</param> + <param name="changesrevision">c4ce88eb36b4431a174a202759b7ce00426cd9d5</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-5.0.0+20240909.1d497d0c.tar.bz2 -> crmsh-5.0.0+20240920.c4ce88eb.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240909.1d497d0c/crmsh/bootstrap.py new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/bootstrap.py --- old/crmsh-5.0.0+20240909.1d497d0c/crmsh/bootstrap.py 2024-09-09 07:32:40.000000000 +0200 +++ new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/bootstrap.py 2024-09-20 07:53:51.000000000 +0200 @@ -976,15 +976,18 @@ """ Check if user's shell is nologin """ - passwd_file = "/etc/passwd" - pattern = f"{user}:.*:/.*/nologin" - if remote: - cmd = f"cat {passwd_file}|grep {pattern}" - rc, _, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(remote, cmd) - return rc == 0 - else: - with open(passwd_file) as f: - return re.search(pattern, f.read()) is not None + rc, error = sh.cluster_shell().get_rc_and_error( + remote, None, + "set -e\n" + f"shell=$(getent passwd '{user}' | awk -F: '{{ print $NF }}')\n" + '[ -n "${shell}" ] && [ -f "${shell}" ] && [ -x "${shell}" ] || exit 1\n' + 'case $(basename "$shell") in\n' + ' nologin) exit 1 ;;\n' + ' false) exit 1 ;;\n' + 'esac\n' + '"${shell}" < /dev/null &>/dev/null\n' + ) + return 0 != rc def change_user_shell(user, remote=None): @@ -2074,7 +2077,7 @@ service_manager = ServiceManager() for service in stop_list: if service_manager.service_is_active(service, remote_addr=remote_addr): - logger.info("Stopping the %s%s", service, " on {}".format(remote_addr) if remote_addr else "") + logger.info("Stopping the %s on %s", service, remote_addr if remote_addr else utils.this_node()) service_manager.stop_service(service, disable=True, remote_addr=remote_addr) @@ -2101,7 +2104,7 @@ rm_configuration_files(node) # execute the command : crm node delete $HOSTNAME - logger.info("Removing the node {}".format(node)) + logger.info("Removing node %s from CIB", node) if not NodeMgmt.call_delnode(node): utils.fatal("Failed to remove {}.".format(node)) @@ -2341,7 +2344,7 @@ def bootstrap_finished(): - logger.info("Done (log saved to %s)" % (log.CRMSH_LOG_FILE)) + logger.info("Done (log saved to %s on %s)", log.CRMSH_LOG_FILE, utils.this_node()) def join_ocfs2(peer_host, peer_user): @@ -2398,13 +2401,15 @@ init() + if _context.qdevice_rm_flag and _context.cluster_node: + utils.fatal("Either remove node or qdevice") + if _context.cluster_node: + logger.info("Removing node %s from cluster", _context.cluster_node) + service_manager = ServiceManager() if not service_manager.service_is_active("corosync.service"): utils.fatal("Cluster is not active - can't execute removing action") - if _context.qdevice_rm_flag and _context.cluster_node: - utils.fatal("Either remove node or qdevice") - _context.skip_csync2 = not service_manager.service_is_active(CSYNC2_SERVICE) if _context.skip_csync2: _context.node_list_in_cluster = utils.fetch_cluster_node_list_from_node(utils.this_node()) @@ -2451,11 +2456,13 @@ nodes = xmlutil.listnodes(include_remote_nodes=False) othernode = next((x for x in nodes if x != me), None) if othernode is not None: - # remove from other node + logger.info("Removing node %s from cluster on %s", me, othernode) cmd = "crm{} cluster remove{} -c {}".format(" -F" if force_flag else "", " -y" if yes_to_all else "", me) - rc, _, _ = sh.cluster_shell().get_rc_stdout_stderr_without_input(othernode, cmd) + rc, stdout, stderr = sh.cluster_shell().get_rc_stdout_stderr_without_input(othernode, cmd) if rc != 0: - utils.fatal("Failed to remove this node from {}".format(othernode)) + utils.fatal(f"Failed to remove this node from {othernode}: {stderr}") + elif stdout: + print(stdout) else: # disable and stop cluster stop_services(SERVICES_STOP_LIST) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240909.1d497d0c/crmsh/ui_cluster.py new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/ui_cluster.py --- old/crmsh-5.0.0+20240909.1d497d0c/crmsh/ui_cluster.py 2024-09-09 07:32:40.000000000 +0200 +++ new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/ui_cluster.py 2024-09-20 07:53:51.000000000 +0200 @@ -42,17 +42,6 @@ return options, args -def _remove_completer(args): - try: - n = utils.list_cluster_nodes() - except: - n = [] - for node in args[1:]: - if node in n: - n.remove(node) - return scripts.param_completion_list('remove') + n - - def script_printer(): from .ui_script import ConsolePrinter return ConsolePrinter() @@ -319,7 +308,6 @@ return args + ['%s=%s' % (name, ','.join(vals))] return args - # @command.completers_repeating(compl.call(scripts.param_completion_list, 'init')) @command.skill_level('administrator') def do_init(self, context, *args): ''' @@ -532,7 +520,7 @@ return True @command.alias("delete") - @command.completers_repeating(_remove_completer) + @command.completers_repeating(compl.nodes) @command.skill_level('administrator') def do_remove(self, context, *args): ''' @@ -568,6 +556,7 @@ for node in args: rm_context.cluster_node = node bootstrap.bootstrap_remove(rm_context) + print() return True @command.skill_level('administrator') @@ -757,7 +746,7 @@ bootstrap.bootstrap_arbitrator(geo_context) return True - @command.completers_repeating(compl.call(scripts.param_completion_list, 'health')) + @command.completers_repeating(compl.nodes) def do_health(self, context, *args): ''' Extensive health check. @@ -768,9 +757,6 @@ raise ValueError("health script failed to load") return scripts.run(script, script_args(params), script_printer()) - def _node_in_cluster(self, node): - return node in utils.list_cluster_nodes() - def do_status(self, context): ''' Quick cluster health status. Corosync status, DRBD status... diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240909.1d497d0c/crmsh/utils.py new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/utils.py --- old/crmsh-5.0.0+20240909.1d497d0c/crmsh/utils.py 2024-09-09 07:32:40.000000000 +0200 +++ new/crmsh-5.0.0+20240920.c4ce88eb/crmsh/utils.py 2024-09-20 07:53:51.000000000 +0200 @@ -3102,6 +3102,9 @@ try: with _open(infile, 'rt', encoding='utf-8', errors='replace') as f: data = f.read() + except PermissionError as err: + logger.warning("When reading file \"%s\": %s", infile, str(err)) + return "" except Exception as err: logger.error("When reading file \"%s\": %s", infile, str(err)) return "" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240909.1d497d0c/test/unittests/test_bootstrap.py new/crmsh-5.0.0+20240920.c4ce88eb/test/unittests/test_bootstrap.py --- old/crmsh-5.0.0+20240909.1d497d0c/test/unittests/test_bootstrap.py 2024-09-09 07:32:40.000000000 +0200 +++ new/crmsh-5.0.0+20240920.c4ce88eb/test/unittests/test_bootstrap.py 2024-09-20 07:53:51.000000000 +0200 @@ -470,15 +470,18 @@ mock.call(node_list, "systemctl daemon-reload"), ]) + @mock.patch('crmsh.bootstrap.change_user_shell') @mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch('crmsh.service_manager.ServiceManager.start_service') - def test_init_ssh(self, mock_start_service, mock_config_ssh): + def test_init_ssh(self, mock_start_service, mock_config_ssh, mock_change_user_shell): bootstrap._context = mock.Mock(current_user="alice", user_at_node_list=[], use_ssh_agent=False) bootstrap.init_ssh() mock_start_service.assert_called_once_with("sshd.service", enable=True) mock_config_ssh.assert_has_calls([ - mock.call("alice") - ]) + mock.call("alice"), + mock.call("hacluster"), + ]) + mock_change_user_shell.assert_called_once_with("hacluster") @mock.patch('crmsh.userdir.gethomedir') def test_key_files(self, mock_gethome): @@ -487,13 +490,6 @@ self.assertEqual(bootstrap.key_files("root"), expected_res) mock_gethome.assert_called_once_with("root") - @mock.patch('builtins.open') - def test_is_nologin(self, mock_open_file): - data = "hacluster:x:90:90:heartbeat processes:/var/lib/heartbeat/cores/hacluster:/sbin/nologin" - mock_open_file.return_value = mock.mock_open(read_data=data).return_value - assert bootstrap.is_nologin("hacluster") is not None - mock_open_file.assert_called_once_with("/etc/passwd") - @mock.patch('crmsh.bootstrap.confirm') @mock.patch('logging.Logger.info') @mock.patch('crmsh.bootstrap.is_nologin') @@ -563,10 +559,12 @@ @mock.patch('crmsh.ssh_key.AuthorizedKeyManager.add') @mock.patch('crmsh.ssh_key.KeyFileManager.ensure_key_pair_exists_for_user') - def test_configure_ssh_key(self, mock_ensure_key_pair, mock_add): + @mock.patch('crmsh.bootstrap.change_user_shell') + def test_configure_ssh_key(self, mock_change_user_shell, mock_ensure_key_pair, mock_add): public_key = crmsh.ssh_key.InMemoryPublicKey('foo') mock_ensure_key_pair.return_value = (True, [public_key]) bootstrap.configure_ssh_key('alice') + mock_change_user_shell.assert_called_once_with('alice') mock_ensure_key_pair.assert_called_once_with(None, 'alice') mock_add.assert_called_once_with(None, 'alice', public_key) @@ -1518,7 +1516,7 @@ @mock.patch('crmsh.bootstrap.Context') def test_bootstrap_remove_cluster_is_active(self, mock_context, mock_init, mock_active, mock_error): - mock_context_inst = mock.Mock() + mock_context_inst = mock.Mock(qdevice=False, cluster_node=None) mock_context.return_value = mock_context_inst mock_active.return_value = False mock_error.side_effect = SystemExit @@ -1558,14 +1556,13 @@ def test_bootstrap_remove_qdevice_cluster_node(self, mock_context, mock_init, mock_active, mock_error): mock_context_inst = mock.Mock(qdevice=True, cluster_node="node1") mock_context.return_value = mock_context_inst - mock_active.return_value = True mock_error.side_effect = SystemExit with self.assertRaises(SystemExit): bootstrap.bootstrap_remove(mock_context_inst) mock_init.assert_called_once_with() - mock_active.assert_called_once_with("corosync.service") + mock_active.assert_not_called() mock_error.assert_called_once_with("Either remove node or qdevice") @mock.patch('crmsh.bootstrap.prompt_for_string') @@ -1655,6 +1652,7 @@ mock_this_node.assert_called_once_with() mock_error.assert_called_once_with("Removing self requires --force") + @mock.patch('crmsh.bootstrap.bootstrap_finished') @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') @mock.patch('crmsh.bootstrap.remove_self') @mock.patch('crmsh.utils.this_node') @@ -1666,7 +1664,7 @@ @mock.patch('crmsh.bootstrap.init') @mock.patch('crmsh.bootstrap.Context') def test_bootstrap_remove_self(self, mock_context, mock_init, mock_active, - mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run): + mock_error, mock_qdevice, mock_hostname, mock_confirm, mock_this_node, mock_self, mock_run, mock_finished): mock_context_inst = mock.Mock(cluster_node="node1", force=True, qdevice_rm_flag=None) mock_context.return_value = mock_context_inst mock_active.return_value = [True, True] @@ -1774,7 +1772,7 @@ mock_list.assert_called_once_with(include_remote_nodes=False) mock_run.assert_called_once_with("node2", "crm cluster remove -y -c node1") - mock_error.assert_called_once_with("Failed to remove this node from node2") + mock_error.assert_called_once_with("Failed to remove this node from node2: err") @mock.patch('crmsh.utils.package_is_installed') @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') @@ -1804,11 +1802,13 @@ mock_get_values.assert_called_once_with("nodelist.node.ring0_addr") mock_get_iplist.assert_called_once_with('node1') + @mock.patch('crmsh.utils.this_node') @mock.patch('crmsh.service_manager.ServiceManager.stop_service') @mock.patch('logging.Logger.info') @mock.patch('crmsh.service_manager.ServiceManager.service_is_active') - def test_stop_services(self, mock_active, mock_status, mock_stop): + def test_stop_services(self, mock_active, mock_status, mock_stop, mock_this_node): mock_active.side_effect = [True, True, True, True] + mock_this_node.side_effect = ['node1', 'node1', 'node1', 'node1'] bootstrap.stop_services(bootstrap.SERVICES_STOP_LIST) mock_active.assert_has_calls([ mock.call("corosync-qdevice.service", remote_addr=None), @@ -1817,10 +1817,10 @@ mock.call("csync2.socket", remote_addr=None) ]) mock_status.assert_has_calls([ - mock.call('Stopping the %s%s', 'corosync-qdevice.service', ''), - mock.call('Stopping the %s%s', 'corosync.service', ''), - mock.call('Stopping the %s%s', 'hawk.service', ''), - mock.call('Stopping the %s%s', 'csync2.socket', '') + mock.call('Stopping the %s on %s', 'corosync-qdevice.service', 'node1'), + mock.call('Stopping the %s on %s', 'corosync.service', 'node1'), + mock.call('Stopping the %s on %s', 'hawk.service', 'node1'), + mock.call('Stopping the %s on %s', 'csync2.socket', 'node1') ]) mock_stop.assert_has_calls([ mock.call("corosync-qdevice.service", disable=True, remote_addr=None), @@ -1846,7 +1846,7 @@ bootstrap.remove_node_from_cluster('node1') mock_get_ip.assert_called_once_with('node1') - mock_status.assert_called_once_with("Removing the node node1") + mock_status.assert_called_once_with("Removing node %s from CIB", "node1") mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") mock_invoke.assert_not_called() mock_call_delnode.assert_called_once_with("node1") @@ -1871,7 +1871,7 @@ bootstrap.remove_node_from_cluster('node1') mock_get_ip.assert_called_once_with('node1') - mock_status.assert_called_once_with("Removing the node node1") + mock_status.assert_called_once_with("Removing node %s from CIB", "node1") mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1") mock_invoke.assert_not_called() mock_call_delnode.assert_called_once_with("node1") @@ -1909,7 +1909,7 @@ mock_get_ip.assert_called_once_with('node1') mock_status.assert_has_calls([ - mock.call("Removing the node node1"), + mock.call("Removing node %s from CIB", "node1"), mock.call("Propagating configuration changes across the remaining nodes") ]) mock_stop.assert_called_once_with(bootstrap.SERVICES_STOP_LIST, remote_addr="node1")