Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-11-28 16:53:43
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.14147 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Fri Nov 28 16:53:43 2025 rev:389 rq:1320435 version:5.0.0+20251128.b328f794

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-11-11 
19:21:16.523318840 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.14147/crmsh.changes   2025-11-28 
16:55:26.912642706 +0100
@@ -1,0 +2,48 @@
+Fri Nov 28 10:01:42 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251128.b328f794:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: utils: Rename utils.ssh_reachable_check to 
utils.ssh_port_reachable_check
+  * Dev: utils: Check if the peer node needs password to access
+
+-------------------------------------------------------------------
+Fri Nov 28 09:04:01 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251128.1e105666:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: utils: Rename utils.node_reachable_check to utils.ssh_reachable_check
+  * Dev: ui_utils: Move ui_node.parse_option_for_nodes to 
ui_utils.parse_and_validate_node_args
+  * Dev: behave: Adjust functional test for previous commit
+  * Fix: utils: Raise UnreachableNodeError for those ssh unreachable nodes 
(bsc#1250645)
+  * Dev: utils: Adjust node_reachable_check function and the way it is used
+
+-------------------------------------------------------------------
+Mon Nov 24 09:53:48 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251124.0c3bd0aa:
+  * Dev: behave: Add functional test case for previous commit
+  * Dev: ui_sbd: Validate purge option first
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Improve the process of leveraging maintenance mode
+
+-------------------------------------------------------------------
+Mon Nov 24 04:08:17 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251124.722ae586:
+  * Dev: Update Docker image
+
+-------------------------------------------------------------------
+Fri Nov 14 06:00:48 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251114.99d958b3:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Remove sbd configuration directories while removing cluster node
+
+-------------------------------------------------------------------
+Tue Nov 11 08:55:29 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20251111.9d10628b:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: report: Collect xml format output from crm_mon
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20251110.f97886d4.tar.bz2

New:
----
  crmsh-5.0.0+20251128.b328f794.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.C5ZYil/_old  2025-11-28 16:55:28.076691717 +0100
+++ /var/tmp/diff_new_pack.C5ZYil/_new  2025-11-28 16:55:28.076691717 +0100
@@ -41,7 +41,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20251110.f97886d4
+Version:        5.0.0+20251128.b328f794
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.C5ZYil/_old  2025-11-28 16:55:28.120693570 +0100
+++ /var/tmp/diff_new_pack.C5ZYil/_new  2025-11-28 16:55:28.124693738 +0100
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">f97886d418c571326b6b4ab9853098673fb83695</param>
+  <param 
name="changesrevision">185c39f29dec57cc79f1b044ad03160a77406578</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20251110.f97886d4.tar.bz2 -> 
crmsh-5.0.0+20251128.b328f794.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/bootstrap.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/bootstrap.py        2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/bootstrap.py        2025-11-28 
10:33:24.000000000 +0100
@@ -137,7 +137,7 @@
         self.default_nic = None
         self.default_ip_list = []
         self.corosync_conf_orig = None
-        self.rm_list = [sbd.SBDManager.SYSCONFIG_SBD, corosync.conf(), 
COROSYNC_AUTH, "/var/lib/pacemaker/cib/*",
+        self.rm_list = [corosync.conf(), COROSYNC_AUTH, 
"/var/lib/pacemaker/cib/*",
                 "/var/lib/corosync/*", "/var/lib/pacemaker/pengine/*", 
PCMK_REMOTE_AUTH, "~/.config/crm/*"]
         self.use_ssh_agent = None
         self.skip_csync2 = None
@@ -273,7 +273,7 @@
             utils.fatal(f"Overriding current user '{self.current_user}' by 
'{user}'. Ouch, don't do it.")
         self.user_at_node_list = [value for (user, node), value in zip(li, 
self.user_at_node_list) if node != me]
         for user, node in (utils.parse_user_at_host(x) for x in 
self.user_at_node_list):
-            utils.node_reachable_check(node)
+            utils.ssh_port_reachable_check(node)
 
     def _validate_cluster_node(self):
         """
@@ -2137,10 +2137,8 @@
     """
     shell = sh.cluster_shell()
     shell.get_stdout_or_raise_error("rm -f {}".format(' 
'.join(_context.rm_list)), remote)
-    # restore original sbd configuration file from 
/usr/share/fillup-templates/sysconfig.sbd
-    if utils.package_is_installed("sbd", remote_addr=remote):
-        cmd = "cp {} {}".format(sbd.SBDManager.SYSCONFIG_SBD_TEMPLATE, 
sbd.SBDManager.SYSCONFIG_SBD)
-        shell.get_stdout_or_raise_error(cmd, remote)
+    if os.path.exists(sbd.SBDManager.SYSCONFIG_SBD):
+        sbd.cleanup_sbd_configurations(remote)
 
 
 def remove_pacemaker_remote_node_from_cluster(node):
@@ -2362,7 +2360,7 @@
             _context.initialize_user()
 
         remote_user, cluster_node = _parse_user_at_host(_context.cluster_node, 
_context.current_user)
-        utils.node_reachable_check(cluster_node)
+        utils.ssh_port_reachable_check(cluster_node)
         join_ssh(cluster_node, remote_user)
         remote_user = utils.user_of(cluster_node)
 
@@ -2370,7 +2368,7 @@
         try:
             with lock_inst.lock():
                 service_manager = ServiceManager()
-                utils.check_all_nodes_reachable("joining a node to the 
cluster", cluster_node)
+                utils.check_all_nodes_reachable("joining a node to the 
cluster", cluster_node, check_passwd=False)
                 setup_passwordless_with_other_nodes(cluster_node)
                 join_firewalld()
                 join_ssh_merge(cluster_node, remote_user)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/qdevice.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/qdevice.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/qdevice.py  2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/qdevice.py  2025-11-28 
10:33:24.000000000 +0100
@@ -208,14 +208,11 @@
         except socket.error:
             raise ValueError("host \"{}\" is unreachable".format(qnetd_addr))
 
-        utils.node_reachable_check(qnetd_addr)
+        utils.ssh_port_reachable_check(qnetd_addr)
 
         if utils.InterfacesInfo.ip_in_local(qnetd_ip):
             raise ValueError("host for qnetd must be a remote one")
 
-        if not utils.check_port_open(qnetd_ip, 22):
-            raise ValueError("ssh service on \"{}\" not 
available".format(qnetd_addr))
-
     @staticmethod
     def check_qdevice_port(qdevice_port):
         if not utils.valid_port(qdevice_port):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/crmsh/report/collect.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/report/collect.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/report/collect.py   2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/report/collect.py   2025-11-28 
10:33:24.000000000 +0100
@@ -359,9 +359,10 @@
         ("-n1", "resources grouped by node"),
         ("-rf1", "resource fail counts"),
         ("-rnt1", "resource operation history with timing details"),
+        ("--output-as=xml", "XML format")
     ]:
         cmd = f"crm_mon {option}"
-        out += f"\n#### Display cluster state and {desc}: {cmd} ####\n"
+        out += f"\n#### Display cluster state with {desc}: {cmd} ####\n"
         out += cluster_shell_inst.get_stdout_or_raise_error(cmd)
         out += "\n\n"
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/sbd.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/sbd.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/sbd.py      2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/sbd.py      2025-11-28 
10:33:24.000000000 +0100
@@ -9,7 +9,6 @@
 from . import corosync
 from . import xmlutil
 from . import watchdog
-from . import parallax
 from .service_manager import ServiceManager
 from .sh import ShellUtils
 
@@ -594,22 +593,6 @@
                 logger.info("Enable %s on node %s", constants.SBD_SERVICE, 
node)
                 service_manager.enable_service(constants.SBD_SERVICE, node)
 
-    @staticmethod
-    def restart_cluster_if_possible(with_maintenance_mode=False):
-        if not ServiceManager().service_is_active(constants.PCMK_SERVICE):
-            return
-        if not xmlutil.CrmMonXmlParser().is_non_stonith_resource_running():
-            bootstrap.restart_cluster()
-        elif with_maintenance_mode:
-            if not utils.is_dlm_running():
-                bootstrap.restart_cluster()
-            else:
-                logger.warning("Resource is running, need to restart cluster 
service manually on each node")
-        else:
-            logger.warning("Resource is running, need to restart cluster 
service manually on each node")
-            logger.warning("Or, run with `crm -F` or `--force` option, the 
`sbd` subcommand will leverage maintenance mode for any changes that require 
restarting sbd.service")
-            logger.warning("Understand risks that running RA has no cluster 
protection while the cluster is in maintenance mode and restarting")
-
     def configure_sbd(self):
         '''
         Configure fence_sbd resource and related properties
@@ -747,6 +730,9 @@
             self._load_attributes_from_bootstrap()
 
         with utils.leverage_maintenance_mode() as enabled:
+            if not utils.able_to_restart_cluster(enabled):
+                return
+
             self.initialize_sbd()
             self.update_configuration()
             self.enable_sbd_service()
@@ -761,7 +747,7 @@
                 restart_cluster_first = restart_first or \
                         (self.diskless_sbd and not 
ServiceManager().service_is_active(constants.SBD_SERVICE))
                 if restart_cluster_first:
-                    
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
+                    bootstrap.restart_cluster()
 
                 self.configure_sbd()
                 bootstrap.adjust_properties(with_sbd=True)
@@ -771,7 +757,7 @@
                 # This helps prevent unexpected issues, such as nodes being 
fenced
                 # due to large SBD_WATCHDOG_TIMEOUT values combined with 
smaller timeouts.
                 if not restart_cluster_first:
-                    
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
+                    bootstrap.restart_cluster()
 
     def join_sbd(self, remote_user, peer_host):
         '''
@@ -813,6 +799,19 @@
         utils.ext_cmd("crm configure delete {}".format(' '.join(sbd_id_list)))
 
 
+def cleanup_sbd_configurations(remote=None):
+    shell = sh.cluster_shell()
+    sysconfig_sbd_bak = f"{SBDManager.SYSCONFIG_SBD}.bak"
+    logger.info("Rename %s to %s on node %s",
+                SBDManager.SYSCONFIG_SBD, sysconfig_sbd_bak, remote or 
utils.this_node())
+    cmd = f"test -f {SBDManager.SYSCONFIG_SBD} && mv 
{SBDManager.SYSCONFIG_SBD} {sysconfig_sbd_bak} || exit 0"
+    shell.get_stdout_or_raise_error(cmd, host=remote)
+
+    for _dir in [SBDManager.SBD_SYSTEMD_DELAY_START_DIR, 
SBDManager.SBD_SYSTEMD_DELAY_START_DISABLE_DIR]:
+        cmd = f"test -d {_dir} && rm -rf {_dir} && systemctl daemon-reload || 
exit 0"
+        shell.get_stdout_or_raise_error(cmd, host=remote)
+
+
 def purge_sbd_from_cluster():
     '''
     Purge SBD from cluster, the process includes:
@@ -830,17 +829,10 @@
         if service_manager.service_is_enabled(constants.SBD_SERVICE, node):
             logger.info("Disable %s on node %s", constants.SBD_SERVICE, node)
             service_manager.disable_service(constants.SBD_SERVICE, node)
-
-    config_bak = f"{SBDManager.SYSCONFIG_SBD}.bak"
-    logger.info("Move %s to %s on all nodes", SBDManager.SYSCONFIG_SBD, 
config_bak)
-    utils.cluster_run_cmd(f"mv {SBDManager.SYSCONFIG_SBD} {config_bak}")
+        cleanup_sbd_configurations(node)
 
     out = sh.cluster_shell().get_stdout_or_raise_error("stonith_admin -L")
     res = re.search("([0-9]+) fence device[s]* found", out)
     # after disable sbd.service, check if sbd is the last stonith device
     if res and int(res.group(1)) <= 1:
         utils.cleanup_stonith_related_properties()
-
-    for _dir in [SBDManager.SBD_SYSTEMD_DELAY_START_DIR, 
SBDManager.SBD_SYSTEMD_DELAY_START_DISABLE_DIR]:
-        cmd = f"test -d {_dir} && rm -rf {_dir} || exit 0"
-        parallax.parallax_call(cluster_nodes, cmd)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_cluster.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_cluster.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_cluster.py       2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_cluster.py       2025-11-28 
10:33:24.000000000 +0100
@@ -26,7 +26,7 @@
 from .prun import prun
 from .service_manager import ServiceManager
 from .sh import ShellUtils
-from .ui_node import parse_option_for_nodes
+from . import ui_utils
 from . import constants
 
 
@@ -167,7 +167,13 @@
         '''
         Starts the cluster stack on all nodes or specific node(s)
         '''
-        node_list = parse_option_for_nodes(context, *args)
+        try:
+            node_list = ui_utils.parse_and_validate_node_args("start", *args)
+        except utils.NoSSHError as msg:
+            logger.error('%s', msg)
+            logger.info("Please try 'crm cluster start' on each node")
+            return False
+
         service_check_list = ["pacemaker.service"]
         start_qdevice = False
         if corosync.is_qdevice_configured():
@@ -175,15 +181,10 @@
             service_check_list.append("corosync-qdevice.service")
 
         service_manager = ServiceManager()
-        try:
-            for node in node_list[:]:
-                if all([service_manager.service_is_active(srv, 
remote_addr=node) for srv in service_check_list]):
-                    logger.info("The cluster stack already started on 
{}".format(node))
-                    node_list.remove(node)
-        except utils.NoSSHError as msg:
-            logger.error('%s', msg)
-            logger.info("Please try 'crm cluster start' on each node")
-            return False
+        for node in node_list[:]:
+            if all([service_manager.service_is_active(srv, remote_addr=node) 
for srv in service_check_list]):
+                logger.info("The cluster stack already started on 
{}".format(node))
+                node_list.remove(node)
         if not node_list:
             return
 
@@ -248,13 +249,14 @@
         '''
         Stops the cluster stack on all nodes or specific node(s)
         '''
-        node_list = parse_option_for_nodes(context, *args)
         try:
-            node_list = [n for n in node_list if 
self._node_ready_to_stop_cluster_service(n)]
+            node_list = ui_utils.parse_and_validate_node_args("stop", *args)
         except utils.NoSSHError as msg:
             logger.error('%s', msg)
             logger.info("Please try 'crm cluster stop' on each node")
             return False
+
+        node_list = [n for n in node_list if 
self._node_ready_to_stop_cluster_service(n)]
         if not node_list:
             return
         logger.debug(f"stop node list: {node_list}")
@@ -297,7 +299,7 @@
         '''
         Enable the cluster services on this node
         '''
-        node_list = parse_option_for_nodes(context, *args)
+        node_list = ui_utils.parse_and_validate_node_args("enable", *args)
         service_manager = ServiceManager()
         node_list = service_manager.enable_service("pacemaker.service", 
node_list=node_list)
         if service_manager.service_is_available("corosync-qdevice.service") 
and corosync.is_qdevice_configured():
@@ -310,7 +312,7 @@
         '''
         Disable the cluster services on this node
         '''
-        node_list = parse_option_for_nodes(context, *args)
+        node_list = ui_utils.parse_and_validate_node_args("disable", *args)
         service_manager = ServiceManager()
         node_list = service_manager.disable_service("pacemaker.service", 
node_list=node_list)
         service_manager.disable_service("corosync-qdevice.service", 
node_list=node_list)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_node.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_node.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_node.py  2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_node.py  2025-11-28 
10:33:24.000000000 +0100
@@ -6,7 +6,6 @@
 import copy
 import subprocess
 from lxml import etree
-from argparse import ArgumentParser, RawDescriptionHelpFormatter
 
 from . import config
 from . import command
@@ -219,47 +218,6 @@
         print(term.render("\t%s" % (s)))
 
 
-def parse_option_for_nodes(context, *args):
-    """
-    Parse option for nodes
-    Return a node list
-    """
-    action_type = context.get_command_name()
-    action_target = "node" if action_type in ["standby", "online"] else 
"cluster service"
-    action = "{} {}".format(action_type, action_target)
-    usage_template = """
-Specify node(s) on which to {action}.
-If no nodes are specified, {action} on the local node.
-If --all is specified, {action} on all nodes."""
-    addtion_usage = ""
-    if action_type == "standby":
-        usage_template += """
-\n\nAdditionally, you may specify a lifetime for the standby---if set to
-"reboot", the node will be back online once it reboots. "forever" will
-keep the node in standby after reboot. The life time defaults to
-"forever"."""
-        addtion_usage = " [lifetime]"
-
-    parser = ArgumentParser(description=usage_template.format(action=action),
-                       usage="{} [--all | <node>... ]{}".format(action_type, 
addtion_usage),
-                       add_help=False,
-                       formatter_class=RawDescriptionHelpFormatter)
-    parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
-    parser.add_argument("--all", help="To {} on all nodes".format(action), 
action="store_true", dest="all")
-
-    options, args = parser.parse_known_args(args)
-    if options.help:
-        parser.print_help()
-        raise utils.TerminateSubCommand(success=True)
-    if options is None or args is None:
-        raise utils.TerminateSubCommand
-    if options.all and args:
-        context.fatal_error("Should either use --all or specific node(s)")
-
-    include_remote = action_type in ["standby", "online"]
-    return utils.validate_and_get_reachable_nodes(args, options.all, 
include_remote)
-
-
 class NodeMgmt(command.UI):
     '''
     Nodes management class
@@ -343,7 +301,7 @@
             args = args[:-1]
 
         # Parse node option
-        node_list = parse_option_for_nodes(context, *args)
+        node_list = ui_utils.parse_and_validate_node_args("standby", *args)
         if not node_list:
             return
 
@@ -431,7 +389,7 @@
         To avoid race condition for --all option, melt all online values into 
one cib replace session
         """
         # Parse node option
-        node_list = parse_option_for_nodes(context, *args)
+        node_list = ui_utils.parse_and_validate_node_args("online", *args)
         if not node_list:
             return
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_sbd.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_sbd.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_sbd.py   2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_sbd.py   2025-11-28 
10:33:24.000000000 +0100
@@ -517,8 +517,11 @@
 
         logger.info("Remove devices: %s", ';'.join(devices_to_remove))
         update_dict = {"SBD_DEVICE": ";".join(left_device_list)}
-        sbd.SBDManager.update_sbd_configuration(update_dict)
-        sbd.SBDManager.restart_cluster_if_possible()
+        with utils.leverage_maintenance_mode() as enabled:
+            if not utils.able_to_restart_cluster(enabled):
+                return
+            sbd.SBDManager.update_sbd_configuration(update_dict)
+            bootstrap.restart_cluster()
 
     @command.completers_repeating(sbd_device_completer)
     def do_device(self, context, *args) -> bool:
@@ -601,22 +604,34 @@
         if not self._service_is_active(constants.SBD_SERVICE):
             return False
 
+        purge_crashdump = False
+        if args:
+            if args[0] == "crashdump":
+                if not self._is_crashdump_configured():
+                    logger.error("SBD crashdump is not configured")
+                    return False
+                purge_crashdump = True
+            else:
+                logger.error("Invalid argument: %s", ' '.join(args))
+                logger.info("Usage: crm sbd purge [crashdump]")
+                return False
+
         utils.check_all_nodes_reachable("purging SBD")
 
-        if args and args[0] == "crashdump":
-            if not self._is_crashdump_configured():
-                logger.error("SBD crashdump is not configured")
+        with utils.leverage_maintenance_mode() as enabled:
+            if not utils.able_to_restart_cluster(enabled):
                 return False
-            self._set_crashdump_option(delete=True)
-            update_dict = self._set_crashdump_in_sysconfig(restore=True)
-            if update_dict:
-                sbd.SBDManager.update_sbd_configuration(update_dict)
-                sbd.SBDManager.restart_cluster_if_possible()
-            return True
 
-        sbd.purge_sbd_from_cluster()
-        sbd.SBDManager.restart_cluster_if_possible()
-        return True
+            if purge_crashdump:
+                self._set_crashdump_option(delete=True)
+                update_dict = self._set_crashdump_in_sysconfig(restore=True)
+                if update_dict:
+                    sbd.SBDManager.update_sbd_configuration(update_dict)
+            else:
+                sbd.purge_sbd_from_cluster()
+
+            bootstrap.restart_cluster()
+            return True
 
     def _print_sbd_type(self):
         if not self.service_manager.service_is_active(constants.SBD_SERVICE):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_utils.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_utils.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/ui_utils.py 2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/ui_utils.py 2025-11-28 
10:33:24.000000000 +0100
@@ -6,6 +6,7 @@
 import inspect
 from . import utils
 from . import log
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
 
 
 logger = log.setup_logger(__name__)
@@ -162,3 +163,45 @@
     if max_args >= 0 and len(args) > max_args:
         raise ValueError("Expected (%s), takes at most %d arguments (%d 
given)" %
                          (mknamed(), max_args-nskip, len(args)-nskip))
+
+
+def parse_and_validate_node_args(command_name, *args) -> list:
+    '''
+    Parses option for node-related commands
+    Then validates and returns the reachable node list
+    '''
+    action_target = "node" if command_name in ["standby", "online"] else 
"cluster service"
+    action = f"{command_name} {action_target}"
+    usage_template = """
+Specify node(s) on which to {action}.
+If no nodes are specified, {action} on the local node.
+If --all is specified, {action} on all nodes."""
+    addtion_usage = ""
+    if command_name == "standby":
+        usage_template += """
+\n\nAdditionally, you may specify a lifetime for the standby---if set to
+"reboot", the node will be back online once it reboots. "forever" will
+keep the node in standby after reboot. The life time defaults to
+"forever"."""
+        addtion_usage = " [lifetime]"
+
+    parser = ArgumentParser(
+            description=usage_template.format(action=action),
+            usage=f"{command_name} [--all | <node>... ]{addtion_usage}",
+            add_help=False,
+            formatter_class=RawDescriptionHelpFormatter
+    )
+    parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
+    parser.add_argument("--all", help=f"To {action} on all nodes", 
action="store_true", dest="all")
+
+    options, args = parser.parse_known_args(args)
+    if options.help:
+        parser.print_help()
+        raise utils.TerminateSubCommand(success=True)
+    if options is None or args is None:
+        raise utils.TerminateSubCommand
+    if options.all and args:
+        raise ValueError("Should either use --all or specific node(s)")
+
+    include_remote = command_name in ["standby", "online"]
+    return utils.validate_and_get_reachable_nodes(args, options.all, 
include_remote)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/utils.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/utils.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/utils.py    2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/utils.py    2025-11-28 
10:33:24.000000000 +0100
@@ -2428,25 +2428,23 @@
     return rc == 0
 
 
-def node_reachable_check(node, ping_count=1, port=22, timeout=3):
+def ssh_port_reachable_check(node):
     """
-    Check if node is reachable by using ping and socket to ssh port
+    Check if node is reachable by checking SSH port is open
     """
-    rc, _, _ = ShellUtils().get_stdout_stderr(f"ping -n -c {ping_count} -W 
{timeout} {node}")
-    if rc == 0:
-        return True
-    # ping failed, try to connect to ssh port by socket
-    if check_port_open(node, port, timeout):
+    if node == this_node() or check_port_open(node, 22):
         return True
-    # both ping and socket failed
-    raise ValueError(f"host \"{node}\" is unreachable")
+    if config.core.no_ssh:
+        raise NoSSHError(constants.NO_SSH_ERROR_MSG)
+    else:
+        raise ValueError(f"host \"{node}\" is unreachable via SSH")
 
 
 def get_reachable_node_list(node_list:list[str]) -> list[str]:
     reachable_node_list = []
     for node in node_list:
         try:
-            if node == this_node() or node_reachable_check(node):
+            if ssh_port_reachable_check(node):
                 reachable_node_list.append(node)
         except ValueError as e:
             logger.warning(str(e))
@@ -2474,7 +2472,13 @@
         self.dead_nodes = dead_nodes or []
 
 
-def check_all_nodes_reachable(action_to_do: str, peer_node: str = None):
+class UnreachableNodeError(ValueError):
+    def __init__(self, msg: str, nodes_unreachable=None):
+        super().__init__(msg)
+        self.nodes_unreachable = nodes_unreachable or []
+
+
+def check_all_nodes_reachable(action_to_do: str, peer_node: str = None, 
check_passwd: bool = True):
     """
     Check if all cluster nodes are reachable
     """
@@ -2484,7 +2488,7 @@
     dead_nodes = []
     for node in offline_nodes:
         try:
-            node_reachable_check(node)
+            ssh_port_reachable_check(node)
         except ValueError:
             dead_nodes.append(node)
     if dead_nodes:
@@ -2495,8 +2499,35 @@
         """
         raise DeadNodeError(msg, dead_nodes)
 
+    nodes_unreachable = []
+    nodes_need_password = []
+    me = this_node()
     for node in online_nodes:
-        node_reachable_check(node)
+        if node == me:
+            continue
+
+        try:
+            ssh_port_reachable_check(node)
+        except ValueError:
+            nodes_unreachable.append(node)
+            continue
+
+        if check_passwd:
+            local_user, remote_user = 
crmsh.user_of_host.UserOfHost.instance().user_pair_for_ssh(node)
+            if check_ssh_passwd_need(local_user, remote_user, node):
+                nodes_need_password.append(node)
+
+    if nodes_unreachable:
+        msg = f"""There are nodes whose SSH ports are unreachable: {', 
'.join(nodes_unreachable)}.
+Please check the network connectivity before {action_to_do}.
+        """
+        raise UnreachableNodeError(msg, nodes_unreachable)
+
+    if nodes_need_password:
+        msg = f"""There are nodes which requires a password for SSH access: 
{', '.join(nodes_need_password)}.
+Please setup passwordless SSH access before {action_to_do}.
+        """
+        raise UnreachableNodeError(msg, nodes_need_password)
 
 
 def re_split_string(reg, string):
@@ -3306,4 +3337,32 @@
             member_list.remove(node)
 
     return member_list + remote_list
+
+
+def able_to_restart_cluster(in_maintenance_mode: bool = False) -> bool:
+    """
+    Check whether it is able to restart cluster now
+    1. If pacemaker is not running, return True
+    2. If no non-stonith resource is running, return True
+    3. If in maintenance mode and DLM is not running, return True
+    4. Otherwise, return False with warning messages to guide user
+    """
+    if not ServiceManager().service_is_active(constants.PCMK_SERVICE):
+        return True
+    crm_mon_parser = xmlutil.CrmMonXmlParser()
+    if not crm_mon_parser.is_non_stonith_resource_running():
+        return True
+    elif in_maintenance_mode:
+        if is_dlm_running():
+            dlm_related_ids = 
crm_mon_parser.get_resource_top_parent_id_set_via_type(constants.DLM_CONTROLD_RA)
+            logger.warning("Please stop DLM related resources (%s) and try 
again", ', '.join(dlm_related_ids))
+            return False
+        else:
+            return True
+    else:
+        logger.warning("Please stop all running resources and try again")
+        logger.warning("Or use 'crm -F/--force' option to leverage maintenance 
mode")
+        logger.warning("Understand risks that running RA has no cluster 
protection while the cluster is in maintenance mode and restarting")
+        logger.info("Aborting the configuration change attempt")
+        return False
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20251110.f97886d4/crmsh/xmlutil.py 
new/crmsh-5.0.0+20251128.b328f794/crmsh/xmlutil.py
--- old/crmsh-5.0.0+20251110.f97886d4/crmsh/xmlutil.py  2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/crmsh/xmlutil.py  2025-11-28 
10:33:24.000000000 +0100
@@ -1627,6 +1627,13 @@
         xpath = f'//resource[(@id="{ra}" or @resource_agent="{ra}") and 
@active="true" and @role="Started"]'
         return bool(self.xml_elem.xpath(xpath))
 
+    def get_resource_top_parent_id_set_via_type(self, ra_type):
+        """
+        Given configured ra type, get the topmost parent ra id set
+        """
+        xpath = f'//resource[@resource_agent="{ra_type}"]'
+        return set([get_topmost_rsc(elem).get('id') for elem in 
self.xml_elem.xpath(xpath)])
+
     def get_resource_id_list_via_type(self, ra_type):
         """
         Given configured ra type, get the ra id list
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/features/cluster_blocking_ssh.feature 
new/crmsh-5.0.0+20251128.b328f794/test/features/cluster_blocking_ssh.feature
--- 
old/crmsh-5.0.0+20251110.f97886d4/test/features/cluster_blocking_ssh.feature    
    2025-11-10 13:16:44.000000000 +0100
+++ 
new/crmsh-5.0.0+20251128.b328f794/test/features/cluster_blocking_ssh.feature    
    2025-11-28 10:33:24.000000000 +0100
@@ -60,8 +60,6 @@
     And     Run "firewall-cmd --zone=public --add-rich-rule='rule port port=22 
protocol=tcp drop' --permanent && firewall-cmd --reload" on "hanode2"
     And     Try "ssh -o ConnectTimeout=5 hanode2" on "hanode1"
     Then    Except "ssh: connect to host hanode2 port 22: Connection timed 
out" in stderr
-    When    Run "timeout 5s crm report || echo "timeout"" on "hanode1"
-    Then    Expected "timeout" in stdout
     When    Write multi lines to file "/etc/crm/crm.conf" on "hanode1"
       """
       [core]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/features/qdevice_validate.feature 
new/crmsh-5.0.0+20251128.b328f794/test/features/qdevice_validate.feature
--- old/crmsh-5.0.0+20251110.f97886d4/test/features/qdevice_validate.feature    
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/features/qdevice_validate.feature    
2025-11-28 10:33:24.000000000 +0100
@@ -23,7 +23,7 @@
   Scenario: Service ssh on qnetd node not available
     When    Run "systemctl stop sshd.service" on "node-without-ssh"
     When    Try "crm cluster init --qnetd-hostname=node-without-ssh"
-    Then    Except "ERROR: cluster.init: ssh service on "node-without-ssh" not 
available"
+    Then    Except "ERROR: cluster.init: host "node-without-ssh" is 
unreachable via SSH"
 
   @clean
   Scenario: Option "--qdevice-port" set wrong port
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/features/sbd_ui.feature 
new/crmsh-5.0.0+20251128.b328f794/test/features/sbd_ui.feature
--- old/crmsh-5.0.0+20251110.f97886d4/test/features/sbd_ui.feature      
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/features/sbd_ui.feature      
2025-11-28 10:33:24.000000000 +0100
@@ -132,3 +132,21 @@
     And     Run "crm cluster restart --all" on "hanode1"
     Then    Service "sbd.service" is "stopped" on "hanode1"
     Then    Service "sbd.service" is "stopped" on "hanode2"
+
+  @clean
+  Scenario: Leverage maintenance mode
+    When    Run "crm cluster init -y" on "hanode1"
+    And     Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode1"
+    Then    Cluster service is "started" on "hanode2"
+    When    Run "crm configure primitive d Dummy" on "hanode1"
+    When    Try "crm cluster init sbd -s /dev/sda5 -y"
+    Then    Expected "Or use 'crm -F/--force' option to leverage maintenance 
mode" in stderr
+    When    Run "crm -F cluster init sbd -s /dev/sda5 -y" on "hanode1"
+    Then    Service "sbd" is "started" on "hanode1"
+    And     Service "sbd" is "started" on "hanode2"
+    When    Try "crm sbd purge"
+    Then    Expected "Or use 'crm -F/--force' option to leverage maintenance 
mode" in stderr
+    When    Run "crm -F sbd purge" on "hanode1"
+    Then    Service "sbd.service" is "stopped" on "hanode1"
+    Then    Service "sbd.service" is "stopped" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/run-functional-tests 
new/crmsh-5.0.0+20251128.b328f794/test/run-functional-tests
--- old/crmsh-5.0.0+20251110.f97886d4/test/run-functional-tests 2025-11-10 
13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/run-functional-tests 2025-11-28 
10:33:24.000000000 +0100
@@ -1,5 +1,5 @@
 #!/bin/bash
-CONTAINER_IMAGE=${CONTAINER_IMAGE:-"docker.io/nyang23/haleap:master"}
+CONTAINER_IMAGE=${CONTAINER_IMAGE:-"docker.io/liangxin1300/haleap:master"}
 PYPI_MIRROR=${PYPI_MIRROR:-""}
 PROJECT_PATH=$(dirname $(dirname `realpath $0`))
 PROJECT_INSIDE="/opt/crmsh"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_bootstrap.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_bootstrap.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_bootstrap.py  
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_bootstrap.py  
2025-11-28 10:33:24.000000000 +0100
@@ -1964,16 +1964,17 @@
         mock_run.assert_called_once_with("node2", "crm cluster remove -y -c 
node1")
         mock_error.assert_called_once_with("Failed to remove this node from 
node2: err")
 
-    @mock.patch('crmsh.utils.package_is_installed')
+    @mock.patch('crmsh.sbd.cleanup_sbd_configurations')
+    @mock.patch('os.path.exists')
     @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error')
-    def test_rm_configuration_files(self, mock_run, mock_installed):
+    def test_rm_configuration_files(self, mock_run, mock_exists, mock_rm_sbd):
         bootstrap._context = mock.Mock(rm_list=["file1", "file2"])
-        mock_installed.return_value = True
+        mock_exists.return_value = True
         bootstrap.rm_configuration_files()
         mock_run.assert_has_calls([
             mock.call('rm -f file1 file2', None),
-            mock.call('cp /usr/share/fillup-templates/sysconfig.sbd 
/etc/sysconfig/sbd', None)
             ])
+        mock_rm_sbd.assert_called_once_with(None)
 
     @mock.patch('crmsh.utils.get_iplist_from_name')
     @mock.patch('crmsh.corosync.get_values')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_qdevice.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_qdevice.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_qdevice.py    
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_qdevice.py    
2025-11-28 10:33:24.000000000 +0100
@@ -203,7 +203,7 @@
         self.assertEqual(res, 
"/etc/corosync/qdevice/net/node1.com/qdevice-net-node.p12")
 
     @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
-    @mock.patch('crmsh.utils.node_reachable_check')
+    @mock.patch('crmsh.utils.ssh_port_reachable_check')
     @mock.patch('socket.getaddrinfo')
     def test_check_qnetd_addr_local(self, mock_getaddrinfo, mock_reachable, 
mock_in_local):
         mock_getaddrinfo.return_value = [(None, ("10.10.10.123",)),]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_report_collect.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_report_collect.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_report_collect.py     
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_report_collect.py     
2025-11-28 10:33:24.000000000 +0100
@@ -472,6 +472,7 @@
                 "crm_mon_data_n1",
                 "crm_mon_data_rf1",
                 "crm_mon_data_rnt1",
+                "crm_mon_data_xml",
                 "cib_data",
                 "crm_node_data"
                 ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_sbd.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_sbd.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_sbd.py        
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_sbd.py        
2025-11-28 10:33:24.000000000 +0100
@@ -406,57 +406,6 @@
             call("Enable %s on node %s", constants.SBD_SERVICE, 'node2')
         ])
 
-    @patch('crmsh.xmlutil.CrmMonXmlParser')
-    @patch('crmsh.sbd.ServiceManager')
-    def test_restart_cluster_if_possible_return(self, mock_ServiceManager, 
mock_CrmMonXmlParser):
-        mock_ServiceManager.return_value.service_is_active.return_value = False
-        SBDManager.restart_cluster_if_possible()
-        
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
-        mock_CrmMonXmlParser.assert_not_called()
-
-    @patch('logging.Logger.warning')
-    @patch('crmsh.utils.is_dlm_running')
-    @patch('crmsh.xmlutil.CrmMonXmlParser')
-    @patch('crmsh.sbd.ServiceManager')
-    def test_restart_cluster_if_possible_manually(
-            self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
-    ):
-        mock_ServiceManager.return_value.service_is_active.return_value = True
-        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= True
-        mock_is_dlm_running.return_value = False
-        SBDManager.restart_cluster_if_possible()
-        
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
-        mock_logger_warning.assert_has_calls([
-            call("Resource is running, need to restart cluster service 
manually on each node"),
-            call("Or, run with `crm -F` or `--force` option, the `sbd` 
subcommand will leverage maintenance mode for any changes that require 
restarting sbd.service"),
-            call("Understand risks that running RA has no cluster protection 
while the cluster is in maintenance mode and restarting")
-        ])
-
-    @patch('logging.Logger.warning')
-    @patch('crmsh.utils.is_dlm_running')
-    @patch('crmsh.xmlutil.CrmMonXmlParser')
-    @patch('crmsh.sbd.ServiceManager')
-    def test_restart_cluster_if_possible_dlm_running(
-            self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
-    ):
-        mock_ServiceManager.return_value.service_is_active.return_value = True
-        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= True
-        mock_is_dlm_running.return_value = True
-        SBDManager.restart_cluster_if_possible(with_maintenance_mode=True)
-        
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
-        mock_logger_warning.assert_called_once_with("Resource is running, need 
to restart cluster service manually on each node")
-
-    @patch('crmsh.bootstrap.restart_cluster')
-    @patch('logging.Logger.warning')
-    @patch('crmsh.xmlutil.CrmMonXmlParser')
-    @patch('crmsh.sbd.ServiceManager')
-    def test_restart_cluster_if_possible(self, mock_ServiceManager, 
mock_CrmMonXmlParser, mock_logger_warning, mock_restart_cluster):
-        mock_ServiceManager.return_value.service_is_active.return_value = True
-        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= False
-        SBDManager.restart_cluster_if_possible()
-        
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
-        mock_restart_cluster.assert_called_once()
-
     @patch('crmsh.bootstrap.prompt_for_string')
     def test_prompt_for_sbd_device_diskless(self, mock_prompt_for_string):
         mock_prompt_for_string.return_value = "none"
@@ -644,10 +593,10 @@
         sbdmanager_instance._load_attributes_from_bootstrap.assert_not_called()
 
     @patch('crmsh.bootstrap.adjust_properties')
-    @patch('crmsh.sbd.SBDManager.restart_cluster_if_possible')
+    @patch('crmsh.bootstrap.restart_cluster')
     @patch('crmsh.sbd.SBDManager.enable_sbd_service')
     @patch('crmsh.sbd.ServiceManager')
-    def test_init_and_deploy_sbd(self, mock_ServiceManager, 
mock_enable_sbd_service, mock_restart_cluster_if_possible, 
mock_adjust_properties):
+    def test_init_and_deploy_sbd(self, mock_ServiceManager, 
mock_enable_sbd_service, mock_restart_cluster, mock_adjust_properties):
         mock_bootstrap_ctx = Mock(cluster_is_running=True)
         sbdmanager_instance = SBDManager(bootstrap_context=mock_bootstrap_ctx)
         sbdmanager_instance.get_sbd_device_from_bootstrap = Mock()
@@ -811,15 +760,27 @@
             call("Remove sbd resource '%s'", 'sbd_resource')
         ])
 
-    @patch('crmsh.parallax.parallax_call')
+    @patch('logging.Logger.info')
+    @patch('crmsh.sh.cluster_shell')
+    def test_cleanup_sbd_configurations(self, mock_cluster_shell, 
mock_logger_info):
+        mock_cluster_shell_inst = Mock()
+        mock_cluster_shell.return_value = mock_cluster_shell_inst
+        mock_cluster_shell_inst.get_stdout_or_raise_error = Mock()
+        sbd.cleanup_sbd_configurations()
+        mock_cluster_shell_inst.get_stdout_or_raise_error.assert_has_calls([
+            call(f"test -f {sbd.SBDManager.SYSCONFIG_SBD} && mv 
{sbd.SBDManager.SYSCONFIG_SBD} {sbd.SBDManager.SYSCONFIG_SBD}.bak || exit 0", 
host=None),
+            call(f"test -d {sbd.SBDManager.SBD_SYSTEMD_DELAY_START_DIR} && rm 
-rf {sbd.SBDManager.SBD_SYSTEMD_DELAY_START_DIR} && systemctl daemon-reload || 
exit 0", host=None),
+            call(f"test -d 
{sbd.SBDManager.SBD_SYSTEMD_DELAY_START_DISABLE_DIR} && rm -rf 
{sbd.SBDManager.SBD_SYSTEMD_DELAY_START_DISABLE_DIR} && systemctl daemon-reload 
|| exit 0", host=None),
+        ])
+
+    @patch('crmsh.sbd.cleanup_sbd_configurations')
     @patch('crmsh.utils.cleanup_stonith_related_properties')
     @patch('crmsh.sbd.sh.cluster_shell')
-    @patch('crmsh.utils.cluster_run_cmd')
     @patch('logging.Logger.info')
     @patch('crmsh.sbd.ServiceManager')
     @patch('crmsh.utils.list_cluster_nodes')
     @patch('crmsh.sbd.cleanup_existing_sbd_resource')
-    def test_purge_sbd_from_cluster(self, mock_cleanup_existing_sbd_resource, 
mock_list_cluster_nodes, mock_ServiceManager, mock_logger_info, 
mock_cluster_run_cmd, mock_cluster_shell, 
mock_cleanup_stonith_related_properties, mock_parallax_call):
+    def test_purge_sbd_from_cluster(self, mock_cleanup_existing_sbd_resource, 
mock_list_cluster_nodes, mock_ServiceManager, mock_logger_info, 
mock_cluster_shell, mock_cleanup_stonith_related_properties, 
mock_rm_sbd_configuration_files):
         mock_list_cluster_nodes.return_value = ['node1', 'node2']
         mock_ServiceManager.return_value.service_is_enabled.side_effect = 
[True, True]
         stonith_data = """stonith-sbd
@@ -830,6 +791,6 @@
         mock_logger_info.assert_has_calls([
             call("Disable %s on node %s", constants.SBD_SERVICE, 'node1'),
             call("Disable %s on node %s", constants.SBD_SERVICE, 'node2'),
-            call("Move %s to %s on all nodes", sbd.SBDManager.SYSCONFIG_SBD, 
sbd.SBDManager.SYSCONFIG_SBD+'.bak')
         ])
         mock_cleanup_stonith_related_properties.assert_called_once()
+        mock_rm_sbd_configuration_files.assert_has_calls([call("node1"), 
call("node2")])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_ui_cluster.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_ui_cluster.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_ui_cluster.py 
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_ui_cluster.py 
2025-11-28 10:33:24.000000000 +0100
@@ -38,7 +38,7 @@
 
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
-    @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+    @mock.patch('crmsh.ui_utils.parse_and_validate_node_args')
     @mock.patch('crmsh.corosync.is_qdevice_configured')
     def test_do_start_already_started(self, mock_qdevice_configured, 
mock_parse_nodes, mock_active, mock_info):
         mock_qdevice_configured.return_value = False
@@ -46,7 +46,7 @@
         mock_parse_nodes.return_value = ["node1", "node2"]
         mock_active.side_effect = [True, True]
         self.ui_cluster_inst.do_start(context_inst, "node1", "node2")
-        mock_parse_nodes.assert_called_once_with(context_inst, "node1", 
"node2")
+        mock_parse_nodes.assert_called_once_with("start", "node1", "node2")
         mock_active.assert_has_calls([
             mock.call("pacemaker.service", remote_addr="node1"),
             mock.call("pacemaker.service", remote_addr="node2")
@@ -63,7 +63,7 @@
     @mock.patch('crmsh.corosync.is_qdevice_configured')
     @mock.patch('crmsh.service_manager.ServiceManager.start_service')
     @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
-    @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+    @mock.patch('crmsh.ui_utils.parse_and_validate_node_args')
     def test_do_start(self, mock_parse_nodes, mock_active, mock_start, 
mock_qdevice_configured, mock_info, mock_error, mock_start_pacemaker, 
mock_check_qdevice):
         context_inst = mock.Mock()
         mock_start_pacemaker.return_value = ["node1"]
@@ -86,7 +86,7 @@
 
     @mock.patch('crmsh.utils.wait_for_dc')
     @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
-    @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+    @mock.patch('crmsh.ui_utils.parse_and_validate_node_args')
     def test_do_stop_return(self, mock_parse_nodes, 
mock_node_ready_to_stop_cluster_service, mock_dc):
         mock_parse_nodes.return_value = ["node1", "node2"]
         mock_node_ready_to_stop_cluster_service.side_effect = [False, False]
@@ -94,7 +94,7 @@
         context_inst = mock.Mock()
         self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
 
-        mock_parse_nodes.assert_called_once_with(context_inst, "node1", 
"node2")
+        mock_parse_nodes.assert_called_once_with("stop", "node1", "node2")
         
mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), 
mock.call("node2")])
         mock_dc.assert_not_called()
 
@@ -104,7 +104,7 @@
     @mock.patch('crmsh.ui_cluster.Cluster._set_dlm')
     @mock.patch('crmsh.utils.wait_for_dc')
     @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service')
-    @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
+    @mock.patch('crmsh.ui_utils.parse_and_validate_node_args')
     def test_do_stop(self, mock_parse_nodes, 
mock_node_ready_to_stop_cluster_service, mock_dc,
                      mock_set_dlm, mock_service_manager, mock_info, 
mock_debug):
         mock_parse_nodes.return_value = ["node1", "node2"]
@@ -117,7 +117,7 @@
         context_inst = mock.Mock()
         self.ui_cluster_inst.do_stop(context_inst, "node1", "node2")
 
-        mock_parse_nodes.assert_called_once_with(context_inst, "node1", 
"node2")
+        mock_parse_nodes.assert_called_once_with("stop", "node1", "node2")
         
mock_node_ready_to_stop_cluster_service.assert_has_calls([mock.call("node1"), 
mock.call("node2")])
         mock_debug.assert_called_once_with("stop node list: ['node1']")
         mock_dc.assert_called_once_with("node1")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_ui_sbd.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_ui_sbd.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_ui_sbd.py     
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_ui_sbd.py     
2025-11-28 10:33:24.000000000 +0100
@@ -469,14 +469,14 @@
             self.sbd_instance_diskbased._device_remove(["/dev/sda1"])
         self.assertEqual(str(e.exception), "Not allowed to remove all devices")
 
-    @mock.patch('crmsh.sbd.SBDManager.restart_cluster_if_possible')
+    @mock.patch('crmsh.bootstrap.restart_cluster')
     @mock.patch('crmsh.sbd.SBDManager.update_sbd_configuration')
     @mock.patch('logging.Logger.info')
-    def test_device_remove(self, mock_logger_info, 
mock_update_sbd_configuration, mock_restart_cluster_if_possible):
+    def test_device_remove(self, mock_logger_info, 
mock_update_sbd_configuration, mock_restart_cluster):
         self.sbd_instance_diskbased.device_list_from_config = ["/dev/sda1", 
"/dev/sda2"]
         self.sbd_instance_diskbased._device_remove(["/dev/sda1"])
         mock_update_sbd_configuration.assert_called_once_with({"SBD_DEVICE": 
"/dev/sda2"})
-        mock_restart_cluster_if_possible.assert_called_once()
+        mock_restart_cluster.assert_called_once()
         mock_logger_info.assert_called_once_with("Remove devices: %s", 
"/dev/sda1")
 
     def test_do_device_no_service(self):
@@ -571,9 +571,10 @@
         self.assertFalse(res)
         mock_purge_sbd_from_cluster.assert_not_called()
 
+    @mock.patch('crmsh.bootstrap.restart_cluster')
     @mock.patch('crmsh.utils.check_all_nodes_reachable')
     @mock.patch('crmsh.sbd.purge_sbd_from_cluster')
-    def test_do_purge(self, mock_purge_sbd_from_cluster, 
mock_check_all_nodes_reachable):
+    def test_do_purge(self, mock_purge_sbd_from_cluster, 
mock_check_all_nodes_reachable, mock_restart_cluster):
         self.sbd_instance_diskbased._load_attributes = mock.Mock()
         self.sbd_instance_diskbased._service_is_active = 
mock.Mock(return_value=True)
         res = self.sbd_instance_diskbased.do_purge(mock.Mock())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_utils.py 
new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_utils.py
--- old/crmsh-5.0.0+20251110.f97886d4/test/unittests/test_utils.py      
2025-11-10 13:16:44.000000000 +0100
+++ new/crmsh-5.0.0+20251128.b328f794/test/unittests/test_utils.py      
2025-11-28 10:33:24.000000000 +0100
@@ -982,7 +982,7 @@
     mock_isblk.assert_called_once_with(12345)
 
 
[email protected]('crmsh.utils.node_reachable_check')
[email protected]('crmsh.utils.ssh_port_reachable_check')
 @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
 def test_check_all_nodes_reachable_dead_nodes(mock_xml, mock_reachable):
     mock_xml_inst = mock.Mock()
@@ -995,12 +995,18 @@
     assert err.value.dead_nodes == ["node2"]
 
 
[email protected]('crmsh.utils.node_reachable_check')
[email protected]('crmsh.utils.check_ssh_passwd_need')
[email protected]('crmsh.user_of_host.UserOfHost.instance')
[email protected]('crmsh.utils.ssh_port_reachable_check')
 @mock.patch('crmsh.xmlutil.CrmMonXmlParser')
-def test_check_all_nodes_reachable(mock_xml, mock_reachable):
+def test_check_all_nodes_reachable(mock_xml, mock_reachable, 
mock_user_of_host, mock_check_passwd):
     mock_xml_inst = mock.Mock()
     mock_xml.return_value = mock_xml_inst
     mock_xml_inst.get_node_list.side_effect = [["node1"], []]
+    mock_user_of_host_inst = mock.Mock()
+    mock_user_of_host.return_value = mock_user_of_host_inst
+    mock_user_of_host_inst.user_pair_for_ssh = mock.Mock(return_value=("root", 
"root"))
+    mock_check_passwd.return_value = False
     utils.check_all_nodes_reachable("testing")
     mock_reachable.assert_called_once_with("node1")
 
@@ -1410,7 +1416,7 @@
 
 
 @mock.patch('logging.Logger.warning')
[email protected]('crmsh.utils.node_reachable_check')
[email protected]('crmsh.utils.ssh_port_reachable_check')
 def test_get_reachable_node_list(mock_reachable, mock_warn):
     mock_reachable.side_effect = [False, True, ValueError("error for node3")]
     assert utils.get_reachable_node_list(["node1", "node2", "node3"]) == 
["node2"]

Reply via email to