Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-09-04 17:59:33
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.1977 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Thu Sep  4 17:59:33 2025 rev:384 rq:1302675 version:5.0.0+20250904.957a78bf

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-08-28 
17:18:58.783998036 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.1977/crmsh.changes    2025-09-04 
18:02:16.738030139 +0200
@@ -1,0 +2,29 @@
+Thu Sep 04 06:43:51 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20250904.957a78bf:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Minor refactor enable_sbd_service method in SBDManager
+
+-------------------------------------------------------------------
+Mon Sep 01 08:11:16 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20250901.a7c653f5:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: sbd: Check if fence-agents-sbd is installed on join node
+  * Dev: sbd: Check if fence-agents-sbd is installed on interactive mode
+  * Dev: sbd: Check if fence-agents-sbd is installed for non sbd stage
+
+-------------------------------------------------------------------
+Mon Sep 01 06:16:36 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20250901.263ead3c:
+  * Dev: testcases: Adjust testcases for previous commit
+  * Dev: ui_node: Don't unpack node info from node_state for `node show` 
command
+
+-------------------------------------------------------------------
+Fri Aug 29 04:13:06 UTC 2025 - [email protected]
+
+- Update to version 5.0.0+20250829.acc43479:
+  * Fix: sbd: Ensure proper cluster restart when adding diskless SBD 
(bsc#1248874)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20250828.c67318b2.tar.bz2

New:
----
  crmsh-5.0.0+20250904.957a78bf.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.CSOVuy/_old  2025-09-04 18:02:17.438059622 +0200
+++ /var/tmp/diff_new_pack.CSOVuy/_new  2025-09-04 18:02:17.438059622 +0200
@@ -41,7 +41,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20250828.c67318b2
+Version:        5.0.0+20250904.957a78bf
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.CSOVuy/_old  2025-09-04 18:02:17.490061812 +0200
+++ /var/tmp/diff_new_pack.CSOVuy/_new  2025-09-04 18:02:17.494061981 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">c67318b224d101bea417fa2ee130bd56a428c149</param>
+  <param 
name="changesrevision">daf8b5a04eebdc9902bd20ed128f8b4db2d934aa</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20250828.c67318b2.tar.bz2 -> 
crmsh-5.0.0+20250904.957a78bf.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250828.c67318b2/crmsh/bootstrap.py 
new/crmsh-5.0.0+20250904.957a78bf/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20250828.c67318b2/crmsh/bootstrap.py        2025-08-28 
05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/crmsh/bootstrap.py        2025-09-04 
08:17:13.000000000 +0200
@@ -242,8 +242,11 @@
             if ServiceManager().service_is_active(constants.SBD_SERVICE) and 
not config.core.force:
                 utils.fatal("Can't configure stage sbd: sbd.service already 
running! Please use crm option '-F' if need to redeploy")
 
-        elif with_sbd_option and not utils.package_is_installed("sbd"):
-            utils.fatal(SBDManager.SBD_NOT_INSTALLED_MSG)
+        elif with_sbd_option:
+            if not utils.package_is_installed("sbd"):
+                utils.fatal(SBDManager.SBD_NOT_INSTALLED_MSG)
+            if self.sbd_devices and not 
utils.package_is_installed("fence-agents-sbd"):
+                utils.fatal(SBDManager.FENCE_SBD_NOT_INSTALLED_MSG)
 
     def _validate_nodes_option(self):
         """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250828.c67318b2/crmsh/sbd.py 
new/crmsh-5.0.0+20250904.957a78bf/crmsh/sbd.py
--- old/crmsh-5.0.0+20250828.c67318b2/crmsh/sbd.py      2025-08-28 
05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/crmsh/sbd.py      2025-09-04 
08:17:13.000000000 +0200
@@ -544,9 +544,11 @@
             logger.debug("Running command: %s", cmd)
             shell.get_stdout_or_raise_error(cmd)
 
-    @staticmethod
-    def enable_sbd_service():
-        cluster_nodes = utils.list_cluster_nodes() or [utils.this_node()]
+    def enable_sbd_service(self):
+        if self.cluster_is_running:
+            cluster_nodes = utils.list_cluster_nodes()
+        else:
+            cluster_nodes = [utils.this_node()]
         service_manager = ServiceManager()
 
         for node in cluster_nodes:
@@ -658,9 +660,14 @@
         configured_devices = SBDUtils.get_sbd_device_from_config()
         # return empty list if already configured and user doesn't want to 
overwrite
         if configured_devices and not 
self._wants_to_overwrite(configured_devices):
-            return []
+            return_devices = []
+        else:
+            return_devices = self._prompt_for_sbd_device()
 
-        return self._prompt_for_sbd_device()
+        if not self.diskless_sbd and not 
utils.package_is_installed("fence-agents-sbd"):
+            utils.fatal(self.FENCE_SBD_NOT_INSTALLED_MSG)
+
+        return return_devices
 
     def get_sbd_device_from_bootstrap(self):
         '''
@@ -704,12 +711,28 @@
         with utils.leverage_maintenance_mode() as enabled:
             self.initialize_sbd()
             self.update_configuration()
-            SBDManager.enable_sbd_service()
+            self.enable_sbd_service()
 
             if self.cluster_is_running:
+
+                # If diskless SBD is being added and sbd.service is not 
running, like running:
+                #     crm cluster init sbd -S -y
+                # the cluster must be restarted first to activate sbd.service 
on all nodes.
+                # Only then should additional properties be configured,
+                # because the stonith-watchdog-timeout property requires 
sbd.service to be active.
+                restart_cluster_first = self.diskless_sbd and not 
ServiceManager().service_is_active(constants.SBD_SERVICE)
+                if restart_cluster_first:
+                    
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
+
                 self.configure_sbd()
                 bootstrap.adjust_properties(with_sbd=True)
-                
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
+
+                # In other cases, it is better to restart the cluster
+                # after modifying SBD-related configurations.
+                # This helps prevent unexpected issues, such as nodes being 
fenced
+                # due to large SBD_WATCHDOG_TIMEOUT values combined with 
smaller timeouts.
+                if not restart_cluster_first:
+                    
SBDManager.restart_cluster_if_possible(with_maintenance_mode=enabled)
 
     def join_sbd(self, remote_user, peer_host):
         '''
@@ -722,18 +745,23 @@
             service_manager.disable_service(constants.SBD_SERVICE)
             return
 
+        if not utils.package_is_installed("sbd"):
+            utils.fatal(self.SBD_NOT_INSTALLED_MSG)
+        dev_list = SBDUtils.get_sbd_device_from_config()
+        if dev_list and not utils.package_is_installed("fence-agents-sbd"):
+            utils.fatal(self.FENCE_SBD_NOT_INSTALLED_MSG)
+
         from .watchdog import Watchdog
         self._watchdog_inst = Watchdog(remote_user=remote_user, 
peer_host=peer_host)
         self._watchdog_inst.join_watchdog()
 
-        dev_list = SBDUtils.get_sbd_device_from_config()
         if dev_list:
             SBDUtils.verify_sbd_device(dev_list, [peer_host])
         else:
             self._warn_diskless_sbd(peer_host)
 
         logger.info("Got {}SBD configuration".format("" if dev_list else 
"diskless "))
-        service_manager.enable_service(constants.SBD_SERVICE)
+        self.enable_sbd_service()
 
 
 def cleanup_existing_sbd_resource():
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250828.c67318b2/crmsh/ui_node.py 
new/crmsh-5.0.0+20250904.957a78bf/crmsh/ui_node.py
--- old/crmsh-5.0.0+20250828.c67318b2/crmsh/ui_node.py  2025-08-28 
05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/crmsh/ui_node.py  2025-09-04 
08:17:13.000000000 +0200
@@ -306,7 +306,6 @@
             return False
 
         cfg_nodes = cib.xpath('/cib/configuration/nodes/node')
-        node_states = cib.xpath('/cib/status/node_state')
 
         def find(it, lst):
             for n in lst:
@@ -316,18 +315,14 @@
 
         def do_print(uname):
             xml = find(uname, cfg_nodes)
-            state = find(uname, node_states)
-            if xml is not None or state is not None:
-                is_offline = state is not None and \
-                    (state.get("crmd") == "offline" or \
-                        (state.get("crmd").isdigit() and 
int(state.get("crmd")) == 0))
-                print_node(*unpack_node_xmldata(xml if xml is not None else 
state, is_offline))
+            if xml is not None:
+                is_offline = not 
xmlutil.CrmMonXmlParser().is_node_online(uname)
+                print_node(*unpack_node_xmldata(xml, is_offline))
 
         if node is not None:
             do_print(node)
         else:
-            all_nodes = set([n.get("uname") for n in cfg_nodes + node_states])
-            for uname in sorted(all_nodes):
+            for uname in [n.get("uname") for n in cfg_nodes]:
                 do_print(uname)
         return True
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250828.c67318b2/test/testcases/node.exp 
new/crmsh-5.0.0+20250904.957a78bf/test/testcases/node.exp
--- old/crmsh-5.0.0+20250828.c67318b2/test/testcases/node.exp   2025-08-28 
05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/test/testcases/node.exp   2025-09-04 
08:17:13.000000000 +0200
@@ -1,7 +1,7 @@
 .TRY node show
-node1: member
+node1: member(offline)
 .TRY node show node1
-node1: member
+node1: member(offline)
 .SETENV showobj=node1
 .TRY configure primitive p5 Dummy
 .EXT crm_resource --show-metadata ocf:heartbeat:Dummy
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250828.c67318b2/test/unittests/test_bootstrap.py 
new/crmsh-5.0.0+20250904.957a78bf/test/unittests/test_bootstrap.py
--- old/crmsh-5.0.0+20250828.c67318b2/test/unittests/test_bootstrap.py  
2025-08-28 05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/test/unittests/test_bootstrap.py  
2025-09-04 08:17:13.000000000 +0200
@@ -173,7 +173,6 @@
     def test_validate_sbd_option_error_sbd_stage(self, mock_check_all, 
mock_installed, mock_list, mock_fatal):
         mock_fatal.side_effect = ValueError
         mock_list.return_value = ["node1", "node2"]
-        options = mock.Mock(stage="sbd", diskless_sbd=True, 
cluster_is_running=True)
         mock_installed.side_effect = [True, False]
         ctx = crmsh.bootstrap.Context()
         ctx.stage = "sbd"
@@ -188,6 +187,47 @@
         ])
 
     @mock.patch('crmsh.utils.fatal')
+    @mock.patch('crmsh.utils.package_is_installed')
+    @mock.patch('crmsh.utils.list_cluster_nodes')
+    @mock.patch('crmsh.utils.check_all_nodes_reachable')
+    def test_validate_sbd_option_sbd_package_not_installed(self, 
mock_check_all, mock_list, mock_installed, mock_fatal):
+        mock_fatal.side_effect = ValueError
+        mock_list.return_value = ["node1", "node2"]
+        mock_installed.return_value = False
+        ctx = crmsh.bootstrap.Context()
+        ctx.stage = "sbd"
+        ctx.diskless_sbd = True
+        ctx.cluster_is_running = True
+
+        with self.assertRaises(ValueError):
+            ctx._validate_sbd_option()
+
+        mock_check_all.assert_called_once_with("setup SBD")
+        mock_installed.assert_called_once_with("sbd", "node1")
+        
mock_fatal.assert_called_once_with(sbd.SBDManager.SBD_NOT_INSTALLED_MSG + " on 
node1")
+
+    @mock.patch('crmsh.utils.fatal')
+    @mock.patch('crmsh.utils.package_is_installed')
+    @mock.patch('crmsh.utils.this_node')
+    @mock.patch('crmsh.sbd.SBDUtils.verify_sbd_device')
+    def test_validate_sbd_option_fence_sbd_package_not_installed(self, 
mock_verify, mock_this_node, mock_installed, mock_fatal):
+        mock_fatal.side_effect = ValueError
+        mock_this_node.return_value = "node1"
+        mock_installed.side_effect = [True, False]
+        ctx = crmsh.bootstrap.Context()
+        ctx.sbd_devices = ["/dev/sda1"]
+        ctx.stage = "sbd"
+
+        with self.assertRaises(ValueError):
+            ctx._validate_sbd_option()
+
+        mock_installed.assert_has_calls([
+            mock.call("sbd", "node1"),
+            mock.call("fence-agents-sbd", "node1")
+        ])
+        
mock_fatal.assert_called_once_with(sbd.SBDManager.FENCE_SBD_NOT_INSTALLED_MSG + 
" on node1")
+
+    @mock.patch('crmsh.utils.fatal')
     @mock.patch('socket.gethostbyname')
     @mock.patch('crmsh.utils.InterfacesInfo.ip_in_local')
     def test_validate_cluster_node_same_name(self, mock_ip_in_local, 
mock_gethost, mock_fatal):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250828.c67318b2/test/unittests/test_sbd.py 
new/crmsh-5.0.0+20250904.957a78bf/test/unittests/test_sbd.py
--- old/crmsh-5.0.0+20250828.c67318b2/test/unittests/test_sbd.py        
2025-08-28 05:24:11.000000000 +0200
+++ new/crmsh-5.0.0+20250904.957a78bf/test/unittests/test_sbd.py        
2025-09-04 08:17:13.000000000 +0200
@@ -393,9 +393,11 @@
     @patch('crmsh.sbd.ServiceManager')
     @patch('crmsh.utils.list_cluster_nodes')
     def test_enable_sbd_service(self, mock_list_cluster_nodes, 
mock_ServiceManager, mock_logger_info):
+        mock_bootstrap_ctx = Mock(cluster_is_running=True)
+        sbdmanager_instance = SBDManager(bootstrap_context=mock_bootstrap_ctx)
         mock_list_cluster_nodes.return_value = ['node1', 'node2']
         mock_ServiceManager.return_value.service_is_enabled.side_effect = 
[False, False]
-        SBDManager.enable_sbd_service()
+        sbdmanager_instance.enable_sbd_service()
         mock_logger_info.assert_has_calls([
             call("Enable %s on node %s", constants.SBD_SERVICE, 'node1'),
             call("Enable %s on node %s", constants.SBD_SERVICE, 'node2')
@@ -664,36 +666,42 @@
         mock_exists.assert_called_once_with(sbd.SBDManager.SYSCONFIG_SBD)
         
mock_ServiceManager.return_value.disable_service.assert_called_once_with(constants.SBD_SERVICE)
 
+    @patch('crmsh.utils.package_is_installed')
     @patch('logging.Logger.info')
     @patch('crmsh.sbd.SBDUtils.verify_sbd_device')
     @patch('crmsh.sbd.SBDUtils.get_sbd_device_from_config')
     @patch('crmsh.watchdog.Watchdog')
     @patch('os.path.exists')
     @patch('crmsh.sbd.ServiceManager')
-    def test_join_sbd_diskbased(self, mock_ServiceManager, mock_exists, 
mock_Watchdog, mock_get_sbd_device_from_config, mock_verify_sbd_device, 
mock_logger_info):
+    def test_join_sbd_diskbased(self, mock_ServiceManager, mock_exists, 
mock_Watchdog, mock_get_sbd_device_from_config, mock_verify_sbd_device, 
mock_logger_info, mock_package_is_installed):
+        mock_package_is_installed.side_effect = [True, True]
         mock_exists.return_value = True
         mock_ServiceManager.return_value.service_is_enabled.return_value = True
         mock_Watchdog.return_value.join_watchdog = Mock()
         mock_get_sbd_device_from_config.return_value = ['/dev/sbd_device']
 
         sbdmanager_instance = SBDManager()
+        sbdmanager_instance.enable_sbd_service = Mock()
         sbdmanager_instance.join_sbd("remote_user", "peer_host")
 
         mock_logger_info.assert_called_once_with("Got SBD configuration")
 
+    @patch('crmsh.utils.package_is_installed')
     @patch('logging.Logger.info')
     @patch('crmsh.sbd.SBDUtils.get_sbd_device_from_config')
     @patch('crmsh.watchdog.Watchdog')
     @patch('os.path.exists')
     @patch('crmsh.sbd.ServiceManager')
-    def test_join_sbd_diskless(self, mock_ServiceManager, mock_exists, 
mock_Watchdog, mock_get_sbd_device_from_config, mock_logger_info):
+    def test_join_sbd_diskless(self, mock_ServiceManager, mock_exists, 
mock_Watchdog, mock_get_sbd_device_from_config, mock_logger_info, 
mock_package_is_installed):
         mock_exists.return_value = True
+        mock_package_is_installed.return_value = True
         mock_ServiceManager.return_value.service_is_enabled.return_value = True
         mock_Watchdog.return_value.join_watchdog = Mock()
         mock_get_sbd_device_from_config.return_value = []
 
         sbdmanager_instance = SBDManager()
         sbdmanager_instance._warn_diskless_sbd = Mock()
+        sbdmanager_instance.enable_sbd_service = Mock()
         sbdmanager_instance.join_sbd("remote_user", "peer_host")
 
         mock_logger_info.assert_called_once_with("Got diskless SBD 
configuration")

Reply via email to