Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-08-15 21:52:28
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1085 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Fri Aug 15 21:52:28 2025 rev:381 rq:1299487 version:5.0.0+20250815.8d89c3d0

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-07-31 
17:50:31.874950445 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1085/crmsh.changes    2025-08-15 
21:53:57.688862904 +0200
@@ -1,0 +2,37 @@
+Fri Aug 15 03:27:28 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250815.8d89c3d0:
+  * Dev: ChangeLog: update ChangeLog for release 5.0.0
+
+-------------------------------------------------------------------
+Thu Aug 14 10:23:04 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250814.d6d8099f:
+  * Dev: behave: Add new case for adding diskless sbd via stage
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Use SBDUtils.get_sbd_device_from_config for reliable diskless 
SBD detection
+
+-------------------------------------------------------------------
+Thu Aug 14 00:37:57 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250814.ae1e11bf:
+  * Fix: log: missing LF after a progress bar (#1886)
+
+-------------------------------------------------------------------
+Wed Aug 13 03:56:14 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250813.70bae071:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Add with_sbd parameter for the case when sbd is not active
+  * Dev: sbd: Restart cluster after configured sbd and adjusted properties
+  * Fix: sbd: Ensure stonith-watchdog-timeout is >= 2 * SBD_WATCHDOG_TIMEOUT 
(bsc#1247415)
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: xmlutil: Add is_non_stonith_resource_running() and use for cluster 
restart checks
+
+-------------------------------------------------------------------
+Thu Jul 31 13:02:06 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250731.f29b30db:
+  * Fix: doc: Add TimeoutFormulas help topic (bsc#1242981)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20250731.d3091c0c.tar.bz2

New:
----
  crmsh-5.0.0+20250815.8d89c3d0.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.9yn0q1/_old  2025-08-15 21:53:58.308888686 +0200
+++ /var/tmp/diff_new_pack.9yn0q1/_new  2025-08-15 21:53:58.308888686 +0200
@@ -41,7 +41,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20250731.d3091c0c
+Version:        5.0.0+20250815.8d89c3d0
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.9yn0q1/_old  2025-08-15 21:53:58.368891181 +0200
+++ /var/tmp/diff_new_pack.9yn0q1/_new  2025-08-15 21:53:58.372891348 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">d3091c0cdbf52ac3e322ffaf40177122c1156960</param>
+  <param 
name="changesrevision">e79ed2265f0927eb358cb859be514ecf1aaa7ba5</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20250731.d3091c0c.tar.bz2 -> 
crmsh-5.0.0+20250815.8d89c3d0.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/ChangeLog 
new/crmsh-5.0.0+20250815.8d89c3d0/ChangeLog
--- old/crmsh-5.0.0+20250731.d3091c0c/ChangeLog 2025-07-31 10:20:46.000000000 
+0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/ChangeLog 2025-08-15 04:59:27.000000000 
+0200
@@ -1,3 +1,21 @@
+* Thu Aug 14 2025 Xin Liang <xli...@suse.com>
+- Release 5.0.0
+- Fix: log: missing LF after a progress bar (#1886)
+- Dev: sbd: Use SBDUtils.get_sbd_device_from_config for reliable diskless SBD 
detection
+- Dev: sbd: Add with_sbd parameter for the case when sbd is not active
+- Dev: sbd: Restart cluster after configured sbd and adjusted properties
+- Fix: sbd: Ensure stonith-watchdog-timeout is >= 2 * SBD_WATCHDOG_TIMEOUT 
(bsc#1247415)
+- Dev: xmlutil: Add is_non_stonith_resource_running() and use for cluster 
restart checks
+- Fix: doc: Add TimeoutFormulas help topic (bsc#1242981)
+- Dev: command: Show help topic completion only at root level
+- Fix: sbd: should show warning instead of error when unable to restart the 
cluster automatically after changing configs (bsc#1246956)
+- Dev: qdevice: Reload corosync configuration on one node
+- Fix: bootstrap: continue qnetd setup when ssh keypair is not found for some 
cluster nodes (#1850)
+- Dev: sbd: Add pcmk_delay_max back to calculate SBD_DELAY_START
+- Dev: doc: release crmsh-5.0 document
+- Fix: ui_context: should not require root privilege when using subcommand 
'help'
+- Fix: sbd: Avoid negative value for the property 'stonith-watchdog-timeout' 
(bsc#1246622)
+
 * Tue Jul 15 2025 Xin Liang <xli...@suse.com>
 - Release 5.0.0 rc2
 - Dev: migration: allow to run migration locally (jsc#PED-8252)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/bootstrap.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/bootstrap.py        2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/bootstrap.py        2025-08-15 
04:59:27.000000000 +0200
@@ -2828,11 +2828,11 @@
             logger.info("Delete parameter 'pcmk_delay_max' for resource 
'{}'".format(res))
 
 
-def adjust_stonith_timeout():
+def adjust_stonith_timeout(with_sbd: bool = False):
     """
     Adjust stonith-timeout for sbd and other scenarios
     """
-    if ServiceManager().service_is_active(constants.SBD_SERVICE):
+    if ServiceManager().service_is_active(constants.SBD_SERVICE) or with_sbd:
         SBDTimeout.adjust_sbd_timeout_related_cluster_configuration()
     else:
         value = get_stonith_timeout_generally_expected()
@@ -2840,7 +2840,7 @@
             utils.set_property("stonith-timeout", value, conditional=True)
 
 
-def adjust_properties():
+def adjust_properties(with_sbd: bool = False):
     """
     Adjust properties for the cluster:
     - pcmk_delay_max
@@ -2858,7 +2858,7 @@
         return
     is_2node_wo_qdevice = utils.is_2node_cluster_without_qdevice()
     adjust_pcmk_delay_max(is_2node_wo_qdevice)
-    adjust_stonith_timeout()
+    adjust_stonith_timeout(with_sbd=with_sbd)
     adjust_priority_in_rsc_defaults(is_2node_wo_qdevice)
     adjust_priority_fencing_delay(is_2node_wo_qdevice)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/log.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/log.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/log.py      2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/log.py      2025-08-15 
04:59:27.000000000 +0200
@@ -402,8 +402,10 @@
         To wait and mark something finished, start with BEGIN msg, end of END 
msg
         """
         self.logger.info("BEGIN %s", msg)
+        pb = ProgressBar()
         try:
-            yield ProgressBar()
+            yield pb
+            pb._end()
         except Exception:
             self.logger.error("FAIL %s", msg)
             raise
@@ -515,6 +517,24 @@
         sys.stdout.write(line)
         sys.stdout.flush()
 
+    def _end(self):
+        try:
+            width, _ = os.get_terminal_size()
+        except OSError:
+            # not a terminal
+            return
+        if self._i == 0:
+            pass
+        elif self._i < width:
+            line = '\r{}\n'.format('.' * self._i)
+            sys.stdout.write(line)
+        else:
+            # the terminal is resized and narrower than the progress bar 
printed before
+            # just write an LF in this case
+            sys.stdout.write('\n')
+        sys.stdout.flush()
+
+
 
 def setup_logging(only_help=False):
     """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/qdevice.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/qdevice.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/qdevice.py  2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/qdevice.py  2025-08-15 
04:59:27.000000000 +0200
@@ -52,13 +52,13 @@
     elif mode == QDEVICE_ADD and not is_stage:
         # Add qdevice from init process, safe to restart
         return QdevicePolicy.QDEVICE_RESTART
-    elif xmlutil.CrmMonXmlParser().is_any_resource_running():
-        # will lose quorum, and with RA running
+    elif xmlutil.CrmMonXmlParser().is_non_stonith_resource_running():
+        # will lose quorum, with non-stonith resource running
         # no reload, no restart cluster service
         # just leave a warning
         return QdevicePolicy.QDEVICE_RESTART_LATER
     else:
-        # will lose quorum, without RA running
+        # will lose quorum, without resource running or just stonith resource 
running
         # safe to restart cluster service
         return QdevicePolicy.QDEVICE_RESTART
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/sbd.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/sbd.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/sbd.py      2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/sbd.py      2025-08-15 
04:59:27.000000000 +0200
@@ -284,16 +284,20 @@
         return int(res)
 
     @staticmethod
-    def get_stonith_watchdog_timeout():
+    def get_stonith_watchdog_timeout_expected():
         '''
-        For non-bootstrap case, get stonith-watchdog-timeout value from 
cluster property
-        The default value is 2 * SBD_WATCHDOG_TIMEOUT
+        Returns the value of the stonith-watchdog-timeout cluster property.
+
+        If the Pacemaker service is inactive, returns the default value (2 * 
SBD_WATCHDOG_TIMEOUT).
+        If the property is set and its value is equal to or greater than the 
default, returns the property value.
+        Otherwise, returns the default value.
         '''
         default = 2 * SBDTimeout.get_sbd_watchdog_timeout()
         if not ServiceManager().service_is_active(constants.PCMK_SERVICE):
             return default
         value = utils.get_property("stonith-watchdog-timeout", 
get_default=False)
-        return int(value.strip('s')) if value else default
+        return_value = value if utils.crm_msec(value) >= 
utils.crm_msec(default) else default
+        return int(utils.crm_msec(return_value)/1000)  # convert msec to sec
 
     def _load_configurations(self):
         '''
@@ -309,7 +313,7 @@
         else:  # disk-less
             self.disk_based = False
             self.sbd_watchdog_timeout = SBDTimeout.get_sbd_watchdog_timeout()
-            self.stonith_watchdog_timeout = 
SBDTimeout.get_stonith_watchdog_timeout()
+            self.stonith_watchdog_timeout = 
SBDTimeout.get_stonith_watchdog_timeout_expected()
         self.sbd_delay_start_value_expected = 
self.get_sbd_delay_start_expected() if utils.detect_virt() else "no"
         self.sbd_delay_start_value_from_config = 
SBDUtils.get_sbd_value_from_config("SBD_DELAY_START")
 
@@ -557,7 +561,7 @@
     def restart_cluster_if_possible(with_maintenance_mode=False):
         if not ServiceManager().service_is_active(constants.PCMK_SERVICE):
             return
-        if not xmlutil.CrmMonXmlParser().is_any_resource_running():
+        if not xmlutil.CrmMonXmlParser().is_non_stonith_resource_running():
             bootstrap.restart_cluster()
         elif with_maintenance_mode:
             if not utils.is_dlm_running():
@@ -573,15 +577,15 @@
         '''
         Configure fence_sbd resource and related properties
         '''
-        if self.diskless_sbd:
-            swt_value = self.timeout_dict.get("stonith-watchdog", 
SBDTimeout.get_stonith_watchdog_timeout())
-            utils.set_property("stonith-watchdog-timeout", swt_value)
-        else:
+        if SBDUtils.get_sbd_device_from_config():
             if utils.get_property("stonith-watchdog-timeout", 
get_default=False):
                 utils.delete_property("stonith-watchdog-timeout")
             if not 
xmlutil.CrmMonXmlParser().is_resource_configured(self.SBD_RA):
                 cmd = f"crm configure primitive {self.SBD_RA_ID} {self.SBD_RA}"
                 sh.cluster_shell().get_stdout_or_raise_error(cmd)
+        else:
+            swt_value = self.timeout_dict.get("stonith-watchdog", 
SBDTimeout.get_stonith_watchdog_timeout_expected())
+            utils.set_property("stonith-watchdog-timeout", swt_value)
         utils.set_property("stonith-enabled", "true")
 
     def _warn_diskless_sbd(self, peer=None):
@@ -706,9 +710,9 @@
             SBDManager.enable_sbd_service()
 
             if self.cluster_is_running:
-                
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
                 self.configure_sbd()
-                bootstrap.adjust_properties()
+                bootstrap.adjust_properties(with_sbd=True)
+                
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
 
     def join_sbd(self, remote_user, peer_host):
         '''
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/ui_cluster.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/ui_cluster.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/ui_cluster.py       2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/ui_cluster.py       2025-08-15 
04:59:27.000000000 +0200
@@ -624,7 +624,7 @@
         if not cib_factory.commit():
             context.fatal_error("Change property cluster-name failed!")
 
-        if xmlutil.CrmMonXmlParser().is_any_resource_running():
+        if xmlutil.CrmMonXmlParser().is_non_stonith_resource_running():
             context.info("To apply the change, restart the cluster service at 
convenient time")
         else:
             bootstrap.restart_cluster()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/crmsh/xmlutil.py 
new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/xmlutil.py
--- old/crmsh-5.0.0+20250731.d3091c0c/crmsh/xmlutil.py  2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/crmsh/xmlutil.py  2025-08-15 
04:59:27.000000000 +0200
@@ -1553,12 +1553,15 @@
         xpath = f'//resource[@resource_agent="{ra_type}"]'
         return bool(self.xml_elem.xpath(xpath))
 
-    def is_any_resource_running(self):
+    def is_non_stonith_resource_running(self) -> bool:
         """
-        Check if any RA is running
+        Check if any non-stonith resource is running
         """
-        xpath = '//resource[@active="true"]'
-        return bool(self.xml_elem.xpath(xpath))
+        for elem in self.xml_elem.xpath('//resource[@active="true"]'):
+            ra_type = elem.get('resource_agent', '')
+            if not ra_type.startswith('stonith'):
+                return True
+        return False
 
     def is_resource_started(self, ra):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250731.d3091c0c/doc/crm.8.adoc 
new/crmsh-5.0.0+20250815.8d89c3d0/doc/crm.8.adoc
--- old/crmsh-5.0.0+20250731.d3091c0c/doc/crm.8.adoc    2025-07-31 
10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/doc/crm.8.adoc    2025-08-15 
04:59:27.000000000 +0200
@@ -296,6 +296,74 @@
 crm(test-2)configure# commit
 ...............
 
+[[topics.TimeoutFormulas,Timeout formulas]]
+=== Timeout formulas
+
+If you use `crm cluster init` command to set up a cluster, especially
+with SBD as the STONITH device, crmsh configures several fencing related
+settings based on the calculations with a set of formulas.
+
+.SBD related timeouts
+`watchdog` timeout: Default value is 15s. This value is from the definition
+    in +/etc/crm/profiles.xml+, can be a different value with a different 
profile.
+
+`msgwait` timeout:
+...............
+    msgwait = 2 * watchdog
+...............
+`SBD_WATCHDOG_TIMEOUT`:
+...............
+    # For diskless SBD
+    SBD_WATCHDOG_TIMEOUT = Default value is 15s. This value is from the 
definition
+    in +/etc/crm/profiles.xml+, can be a different value with a different 
profile.
+
+    # When adding diskless SBD and QDevice together from crm cluster init
+    if SBD_WATCHDOG_TIMEOUT < 35s:
+       SBD_WATCHDOG_TIMEOUT = 35s
+
+    # When adding diskless SBD in a cluster with QDevice already
+    if SBD_WATCHDOG_TIMEOUT < qdevice_sync_timeout:
+       SBD_WATCHDOG_TIMEOUT = qdevice_sync_timeout + 5s
+
+    # When adding QDevice in a cluster with diskless SBD already
+    if SBD_WATCHDOG_TIMEOUT < 35s:
+       SBD_WATCHDOG_TIMEOUT = 35s
+...............
+`SBD_DELAY_START`:
+...............
+    # For disk-based SBD
+    SBD_DELAY_START =  pcmk_delay_max + msgwait + corosync.token + 
corosync.consensus
+
+    # For diskless SBD
+    SBD_DELAY_START = 2 * SBD_WATCHDOG_TIMEOUT + corosync.token + 
corosync.consensus
+...............
+`TimeoutSec` of sbd.service systemd unit:
+...............
+    default_value = 1min30s
+    if SBD_DELAY_START > default_value:
+       TimeoutSec = 1.2 * SBD_DELAY_START
+    else:
+       TimeoutSec = default_value
+...............
+.Pacemaker related timeouts
+`stonith-watchdog-timeout`: (for diskless SBD)
+...............
+    stonith-watchdog-timeout = 2 * SBD_WATCHDOG_TIMEOUT
+...............
+`stonith-timeout`:
+...............
+    # For disk-based SBD
+    value_from_sbd = 1.2 * msgwait
+    stonith-timeout = max(value_from_sbd, 60s) + corosync.token + 
corosync.consensus
+
+    # For diskless SBD
+    value_from_sbd = 1.2 * max(stonith-watchdog-timeout, 
2*SBD_WATCHDOG_TIMEOUT)
+    stonith-timeout = max(value_from_sbd, 60s) + corosync.token + 
corosync.consensus
+
+    # For general scenarios
+    stonith-timeout = 60s + corosync.token + corosync.consensus
+...............
+
 [[topics.Features.Checks,Configuration semantic checks]]
 === Configuration semantic checks
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250731.d3091c0c/test/features/bootstrap_sbd_normal.feature 
new/crmsh-5.0.0+20250815.8d89c3d0/test/features/bootstrap_sbd_normal.feature
--- 
old/crmsh-5.0.0+20250731.d3091c0c/test/features/bootstrap_sbd_normal.feature    
    2025-07-31 10:20:46.000000000 +0200
+++ 
new/crmsh-5.0.0+20250815.8d89c3d0/test/features/bootstrap_sbd_normal.feature    
    2025-08-15 04:59:27.000000000 +0200
@@ -129,6 +129,21 @@
     And     Resource "stonith-sbd" type "fence_sbd" is "Started"
 
   @clean
+  Scenario: Configure diskless sbd on running cluster via stage
+    Given   Cluster service is "stopped" on "hanode1"
+    Given   Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Online nodes are "hanode1 hanode2"
+    When    Run "crm cluster init sbd -S -y" on "hanode1"
+    Then    Expected "Diskless SBD requires cluster with three or more nodes." 
in stderr
+    Then    Service "sbd" is "started" on "hanode1"
+    And     Service "sbd" is "started" on "hanode2"
+    And     Resource "stonith:fence_sbd" not configured
+
+  @clean
   Scenario: Configure sbd on running cluster via stage with ra 
running(bsc#1181906)
     Given   Cluster service is "stopped" on "hanode1"
     Given   Cluster service is "stopped" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_bootstrap.py 
new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_bootstrap.py
--- old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_bootstrap.py  
2025-07-31 10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_bootstrap.py  
2025-08-15 04:59:27.000000000 +0200
@@ -1518,7 +1518,7 @@
         bootstrap.adjust_properties()
         mock_is_active.assert_called_once_with("pacemaker.service")
         mock_adj_pcmk.assert_called_once_with(True)
-        mock_adj_stonith.assert_called_once_with()
+        mock_adj_stonith.assert_called_once_with(with_sbd=False)
         mock_adj_priority.assert_called_once_with(True)
         mock_adj_fence.assert_called_once_with(True)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_qdevice.py 
new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_qdevice.py
--- old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_qdevice.py    
2025-07-31 10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_qdevice.py    
2025-08-15 04:59:27.000000000 +0200
@@ -39,7 +39,7 @@
 def test_evaluate_qdevice_quorum_effect_later(mock_get_dict, mock_quorate, 
mock_parser):
     mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
     mock_quorate.return_value = False
-    mock_parser().is_any_resource_running.return_value = True
+    mock_parser().is_non_stonith_resource_running.return_value = True
     res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
     assert res == qdevice.QdevicePolicy.QDEVICE_RESTART_LATER
     mock_get_dict.assert_called_once_with()
@@ -52,7 +52,7 @@
 def test_evaluate_qdevice_quorum_effect(mock_get_dict, mock_quorate, 
mock_parser):
     mock_get_dict.return_value = {'Expected': '2', 'Total': '2'}
     mock_quorate.return_value = False
-    mock_parser().is_any_resource_running.return_value = False
+    mock_parser().is_non_stonith_resource_running.return_value = False
     res = qdevice.evaluate_qdevice_quorum_effect(qdevice.QDEVICE_REMOVE)
     assert res == qdevice.QdevicePolicy.QDEVICE_RESTART
     mock_get_dict.assert_called_once_with()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_sbd.py 
new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_sbd.py
--- old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_sbd.py        
2025-07-31 10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_sbd.py        
2025-08-15 04:59:27.000000000 +0200
@@ -214,20 +214,20 @@
 
     @patch('crmsh.sbd.ServiceManager')
     @patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
-    def test_get_stonith_watchdog_timeout_default(self, 
mock_get_sbd_watchdog_timeout, mock_ServiceManager):
+    def test_get_stonith_watchdog_timeout_expected_default(self, 
mock_get_sbd_watchdog_timeout, mock_ServiceManager):
         mock_get_sbd_watchdog_timeout.return_value = 1
         mock_ServiceManager.return_value.service_is_active = 
MagicMock(return_value=False)
-        result = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+        result = sbd.SBDTimeout.get_stonith_watchdog_timeout_expected()
         self.assertEqual(result, 2)
 
     @patch('crmsh.utils.get_property')
     @patch('crmsh.sbd.ServiceManager')
     @patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
-    def test_get_stonith_watchdog_timeout(self, mock_get_sbd_watchdog_timeout, 
mock_ServiceManager, mock_get_property):
+    def test_get_stonith_watchdog_timeout_expected(self, 
mock_get_sbd_watchdog_timeout, mock_ServiceManager, mock_get_property):
         mock_get_sbd_watchdog_timeout.return_value = 1
         mock_ServiceManager.return_value.service_is_active = 
MagicMock(return_value=True)
         mock_get_property.return_value = "5"
-        result = sbd.SBDTimeout.get_stonith_watchdog_timeout()
+        result = sbd.SBDTimeout.get_stonith_watchdog_timeout_expected()
         self.assertEqual(result, 5)
 
     @patch('crmsh.sbd.SBDTimeout.get_sbd_watchdog_timeout')
@@ -268,7 +268,8 @@
     @patch('crmsh.sbd.SBDTimeout.adjust_stonith_timeout')
     @patch('crmsh.sbd.SBDTimeout.adjust_sbd_delay_start')
     @patch('crmsh.sbd.SBDTimeout._load_configurations')
-    def test_adjust_sbd_timeout_related_cluster_configuration(self, 
mock_load_configurations, mock_adjust_sbd_delay_start, 
mock_adjust_stonith_timeout, mock_adjust_systemd_start_timeout):
+    def test_adjust_sbd_timeout_related_cluster_configuration(self, 
mock_load_configurations, mock_adjust_sbd_delay_start, 
mock_adjust_stonith_timeout, 
+                                                              
mock_adjust_systemd_start_timeout):
         sbd.SBDTimeout.adjust_sbd_timeout_related_cluster_configuration()
         mock_load_configurations.assert_called_once()
         mock_adjust_sbd_delay_start.assert_called_once()
@@ -416,7 +417,7 @@
             self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
     ):
         mock_ServiceManager.return_value.service_is_active.return_value = True
-        mock_CrmMonXmlParser.return_value.is_any_resource_running.return_value 
= True
+        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= True
         mock_is_dlm_running.return_value = False
         SBDManager.restart_cluster_if_possible()
         
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
@@ -434,7 +435,7 @@
             self, mock_ServiceManager, mock_CrmMonXmlParser, 
mock_is_dlm_running, mock_logger_warning,
     ):
         mock_ServiceManager.return_value.service_is_active.return_value = True
-        mock_CrmMonXmlParser.return_value.is_any_resource_running.return_value 
= True
+        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= True
         mock_is_dlm_running.return_value = True
         SBDManager.restart_cluster_if_possible(with_maintenance_mode=True)
         
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
@@ -446,7 +447,7 @@
     @patch('crmsh.sbd.ServiceManager')
     def test_restart_cluster_if_possible(self, mock_ServiceManager, 
mock_CrmMonXmlParser, mock_logger_warning, mock_restart_cluster):
         mock_ServiceManager.return_value.service_is_active.return_value = True
-        mock_CrmMonXmlParser.return_value.is_any_resource_running.return_value 
= False
+        
mock_CrmMonXmlParser.return_value.is_non_stonith_resource_running.return_value 
= False
         SBDManager.restart_cluster_if_possible()
         
mock_ServiceManager.return_value.service_is_active.assert_called_once_with(constants.PCMK_SERVICE)
         mock_restart_cluster.assert_called_once()
@@ -753,10 +754,12 @@
 
     @patch('crmsh.utils.set_property')
     @patch('crmsh.sbd.ServiceManager')
-    @patch('crmsh.sbd.SBDTimeout.get_stonith_watchdog_timeout')
-    def test_configure_sbd_diskless(self, mock_get_stonith_watchdog_timeout, 
mock_ServiceManager, mock_set_property):
+    @patch('crmsh.sbd.SBDTimeout.get_stonith_watchdog_timeout_expected')
+    @patch('crmsh.sbd.SBDUtils.get_sbd_device_from_config')
+    def test_configure_sbd_diskless(self, mock_get_sbd_device, 
mock_get_stonith_watchdog_timeout, mock_ServiceManager, mock_set_property):
         mock_get_stonith_watchdog_timeout.return_value = 2
-        sbdmanager_instance = SBDManager(diskless_sbd=True)
+        mock_get_sbd_device.return_value = False
+        sbdmanager_instance = SBDManager()
         sbdmanager_instance.configure_sbd()
         mock_set_property.assert_has_calls([
             call("stonith-watchdog-timeout", 2),
@@ -769,7 +772,9 @@
     @patch('crmsh.sbd.xmlutil.CrmMonXmlParser')
     @patch('crmsh.utils.set_property')
     @patch('crmsh.sbd.ServiceManager')
-    def test_configure_sbd(self, mock_ServiceManager, mock_set_property, 
mock_CrmMonXmlParser, mock_cluster_shell, mock_get_property, 
mock_delete_property):
+    @patch('crmsh.sbd.SBDUtils.get_sbd_device_from_config')
+    def test_configure_sbd(self, mock_get_sbd_device, mock_ServiceManager, 
mock_set_property, mock_CrmMonXmlParser, mock_cluster_shell, mock_get_property, 
mock_delete_property):
+        mock_get_sbd_device.return_value = True
         mock_get_property.return_value = -1
         mock_CrmMonXmlParser.return_value.is_resource_configured.return_value 
= False
         mock_cluster_shell.return_value.get_stdout_or_raise_error.return_value 
= "data"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_xmlutil.py 
new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_xmlutil.py
--- old/crmsh-5.0.0+20250731.d3091c0c/test/unittests/test_xmlutil.py    
2025-07-31 10:20:46.000000000 +0200
+++ new/crmsh-5.0.0+20250815.8d89c3d0/test/unittests/test_xmlutil.py    
2025-08-15 04:59:27.000000000 +0200
@@ -48,8 +48,8 @@
         assert self.parser_inst.is_resource_configured("test") is False
         assert 
self.parser_inst.is_resource_configured("ocf:heartbeat:Filesystem") is True
 
-    def test_is_any_resource_running(self):
-        assert self.parser_inst.is_any_resource_running() is True
+    def test_is_non_stonith_resource_running(self):
+        assert self.parser_inst.is_non_stonith_resource_running() is True
 
     def test_is_resource_started(self):
         assert self.parser_inst.is_resource_started("test") is False

Reply via email to