Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2025-05-20 17:05:20
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.30101 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Tue May 20 17:05:20 2025 rev:368 rq:1278747 version:5.0.0+20250520.6bc90213

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2025-05-06 
16:42:53.063853382 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.30101/crmsh.changes   2025-05-20 
17:05:28.740995657 +0200
@@ -1,0 +2,45 @@
+Tue May 20 09:59:39 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250520.6bc90213:
+  * Dev: behave: Adjust functional test for previous commit
+  * Dev: ui_cluster: Add firewalld stage to crm cluster init help info
+
+-------------------------------------------------------------------
+Tue May 20 07:18:35 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250520.2e297cfa:
+  * Dev: unittests: Adjust unit test for previous commit
+  * Dev: bootstrap: Add high-availability firewalld service on geo arbitrator
+  * Dev: bootstrap: Apply firewalld changes to both runtime and permanent 
configurations without reload
+  * Dev: behave: Add functional test for deployment of high-availability 
firewalld service
+  * Dev: bootstrap: Manage high-availability service in firewalld (bsc#1242494)
+  * Dev: Makefile.am: Update Makefile for high-availability.xml
+  * Dev: crmsh.spec: Improve firewalld support in spec file
+  * Dev: qdevice: Enable qnetd port in firewalld
+  * Dev: bootstrap: Remove codes of configuring ports in firewalld
+  * Dev: spec: Update spec file for high-availability.xml
+  * Dev: Add high-availability.xml for service of firewalld (bsc#1242494)
+  * Dev: ui_configure: Add ':' suffix to order kind completer
+
+-------------------------------------------------------------------
+Fri May 16 05:24:12 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250516.bb49ed3c:
+  * Dev: behave: Fix cross-network isolation issue
+  * Dev: main: Ignore crm flag options to get completion
+
+-------------------------------------------------------------------
+Wed May 07 07:31:18 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250507.0735a244:
+  * Dev: behave: Add functional test for previous commit
+  * Dev: ui_cluster: Refactor the `do_restart` function
+  * Dev: ui_cluster: Skip stopping cluster if dlm_controld is running in 
maintenance mode
+
+-------------------------------------------------------------------
+Tue May 06 09:23:36 UTC 2025 - xli...@suse.com
+
+- Update to version 5.0.0+20250506.d5a19e23:
+  * Dev: migration: add a message about how to upgrade cib schema version 
(jsc#PED-8252)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-5.0.0+20250506.7ca341fa.tar.bz2

New:
----
  crmsh-5.0.0+20250520.6bc90213.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.Z2sx9d/_old  2025-05-20 17:05:29.445025527 +0200
+++ /var/tmp/diff_new_pack.Z2sx9d/_new  2025-05-20 17:05:29.445025527 +0200
@@ -32,11 +32,16 @@
 %define pkg_group Productivity/Clustering/HA
 %endif
 
+%define use_firewalld 1
+%if %{use_firewalld}
+%define _fwdefdir %{_prefix}/lib/firewalld/services
+%endif
+
 Name:           crmsh
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        5.0.0+20250506.7ca341fa
+Version:        5.0.0+20250520.6bc90213
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2
@@ -63,6 +68,9 @@
 # only require csync2 on SUSE since bootstrap
 # only works for SUSE at the moment anyway
 Requires:       csync2
+%if %{use_firewalld}
+BuildRequires:  firewall-macros
+%endif
 %endif
 
 %if 0%{?suse_version}
@@ -221,12 +229,20 @@
        rm %{buildroot}%{_bindir}/crm
 fi
 
+%if %{use_firewalld}
+install -Dm0644 high-availability.xml \
+       %{buildroot}%{_fwdefdir}/high-availability.xml
+%endif
+
 %if 0%{?suse_version}
 %fdupes %{buildroot}
 %endif
 
 %post
 %tmpfiles_create %{_tmpfilesdir}/%{name}.conf
+%if %{use_firewalld}
+%firewalld_reload
+%endif
 
 %if %{with regression_tests}
 # Run regression tests after installing the package
@@ -275,6 +291,12 @@
 %dir %attr (770, %{uname}, %{gname}) %{_var}/log/crmsh
 %{_datadir}/bash-completion/completions/crm
 
+%if %{use_firewalld}
+%dir %{_prefix}/lib/firewalld
+%dir %{_fwdefdir}
+%{_fwdefdir}/high-availability.xml
+%endif
+
 %files scripts
 %defattr(-,root,root)
 %{_datadir}/%{name}/scripts

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.Z2sx9d/_old  2025-05-20 17:05:29.501027902 +0200
+++ /var/tmp/diff_new_pack.Z2sx9d/_new  2025-05-20 17:05:29.521028750 +0200
@@ -9,7 +9,7 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">7ca341fa4236a1efef9078a5f2955ff9a76c0e2b</param>
+  <param 
name="changesrevision">03c6269c85c4ba8bf19ecc268085813bb13927d0</param>
 </service>
 </servicedata>
 (No newline at EOF)

++++++ crmsh-5.0.0+20250506.7ca341fa.tar.bz2 -> 
crmsh-5.0.0+20250520.6bc90213.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/.github/workflows/crmsh-ci.yml 
new/crmsh-5.0.0+20250520.6bc90213/.github/workflows/crmsh-ci.yml
--- old/crmsh-5.0.0+20250506.7ca341fa/.github/workflows/crmsh-ci.yml    
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/.github/workflows/crmsh-ci.yml    
2025-05-20 11:34:23.000000000 +0200
@@ -154,6 +154,20 @@
         token: ${{ secrets.CODECOV_TOKEN }}
         flags: integration
 
+  functional_test_bootstrap_firewalld:
+    runs-on: ubuntu-24.04
+    timeout-minutes: 40
+    steps:
+    - uses: actions/checkout@v4
+    - name: functional test for bootstrap firewalld
+      run:  |
+        index=`$GET_INDEX_OF bootstrap_firewalld`
+        $CONTAINER_SCRIPT $index
+    - uses: codecov/codecov-action@v4
+      with:
+        token: ${{ secrets.CODECOV_TOKEN }}
+        flags: integration
+
   functional_test_corosync_ui:
     runs-on: ubuntu-24.04
     timeout-minutes: 40
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/Makefile.am 
new/crmsh-5.0.0+20250520.6bc90213/Makefile.am
--- old/crmsh-5.0.0+20250506.7ca341fa/Makefile.am       2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/Makefile.am       2025-05-20 
11:34:23.000000000 +0200
@@ -32,6 +32,9 @@
 asciiman       = doc/crm.8.adoc doc/crmsh_crm_report.8.adoc doc/profiles.adoc
 help_DATA      = doc/crm.8.adoc
 
+firewalld_servicedir=$(prefix)/lib/firewalld/services
+firewalld_service_DATA=high-availability.xml
+
 generated_docs =
 generated_mans =
 if BUILD_ASCIIDOC
@@ -54,6 +57,9 @@
        install -D -m $$(test -x $$d && echo 0755 || echo 0644) $$d 
$(DESTDIR)$(datadir)/@PACKAGE@/$$d; done; \
        mv $(DESTDIR)$(datadir)/@PACKAGE@/test 
$(DESTDIR)$(datadir)/@PACKAGE@/tests; \
        cp test/testcases/xmlonly.sh 
$(DESTDIR)$(datadir)/@PACKAGE@/tests/testcases/configbasic-xml.filter
+       if [ -d $(DESTDIR)$(firewalld_servicedir) ]; then \
+               install -D -m 0644 $(srcdir)/$(firewalld_service_DATA) 
$(DESTDIR)$(firewalld_servicedir)/$(firewalld_service_DATA); \
+       fi
 
 # Python module installation
 all-local:
@@ -76,6 +82,7 @@
 uninstall-hook:
        @echo "Removing installed data files..."
        rm -rf $(DESTDIR)$(datadir)/@PACKAGE@
+       rm -f $(DESTDIR)$(firewalld_servicedir)/$(firewalld_service_DATA)
        @echo "Uninstallation complete."
 
 clean-local:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/codecov.yml 
new/crmsh-5.0.0+20250520.6bc90213/codecov.yml
--- old/crmsh-5.0.0+20250506.7ca341fa/codecov.yml       2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/codecov.yml       2025-05-20 
11:34:23.000000000 +0200
@@ -8,7 +8,7 @@
         threshold: 0.35%
 codecov:
   notify:
-    after_n_builds: 30
+    after_n_builds: 31
 comment:
-  after_n_builds: 30
+  after_n_builds: 31
   layout: "condensed_header, flags, files, condensed_footer"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/bootstrap.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/bootstrap.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/bootstrap.py        2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/bootstrap.py        2025-05-20 
11:34:23.000000000 +0200
@@ -72,10 +72,10 @@
         "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, 
SBDManager.SYSCONFIG_SBD, PCMK_REMOTE_AUTH, watchdog.Watchdog.WATCHDOG_CFG,
         PROFILES_FILE, CRM_CFG, SBDManager.SBD_SYSTEMD_DELAY_START_DIR)
 
-INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", 
"ocfs2", "gfs2", "admin", "qdevice")
+INIT_STAGES_EXTERNAL = ("ssh", "firewalld", "csync2", "corosync", "sbd", 
"cluster", "ocfs2", "gfs2", "admin", "qdevice")
 INIT_STAGES_INTERNAL = ("csync2_remote", "qnetd_remote")
 INIT_STAGES_ALL = INIT_STAGES_EXTERNAL + INIT_STAGES_INTERNAL
-JOIN_STAGES_EXTERNAL = ("ssh", "csync2", "ssh_merge", "cluster")
+JOIN_STAGES_EXTERNAL = ("ssh", "firewalld", "csync2", "ssh_merge", "cluster")
 
 
 class Context(object):
@@ -635,7 +635,6 @@
         if not confirm("Do you want to continue anyway?"):
             return False
 
-    firewall_open_basic_ports()
     return True
 
 
@@ -665,85 +664,11 @@
         _context.default_ip_list = 
[_context.interfaces_inst.nic_first_ip(_context.default_nic)]
 
 
-def configure_firewall(tcp=None, udp=None):
-    if tcp is None:
-        tcp = []
-    if udp is None:
-        udp = []
-
-    def init_firewall_firewalld(tcp, udp):
-        has_firewalld = ServiceManager().service_is_active("firewalld")
-        cmdbase = 'firewall-cmd --zone=public --permanent ' if has_firewalld 
else 'firewall-offline-cmd --zone=public '
-
-        def cmd(args):
-            if not invokerc(cmdbase + args):
-                utils.fatal("Failed to configure firewall.")
-
-        for p in tcp:
-            cmd("--add-port={}/tcp".format(p))
-
-        for p in udp:
-            cmd("--add-port={}/udp".format(p))
-
-        if has_firewalld:
-            if not invokerc("firewall-cmd --reload"):
-                utils.fatal("Failed to reload firewall configuration.")
-
-    def init_firewall_ufw(tcp, udp):
-        """
-        try configuring firewall with ufw
-        """
-        for p in tcp:
-            if not invokerc("ufw allow {}/tcp".format(p)):
-                utils.fatal("Failed to configure firewall (ufw)")
-        for p in udp:
-            if not invokerc("ufw allow {}/udp".format(p)):
-                utils.fatal("Failed to configure firewall (ufw)")
-
-    if utils.package_is_installed("firewalld"):
-        init_firewall_firewalld(tcp, udp)
-    elif utils.package_is_installed("ufw"):
-        init_firewall_ufw(tcp, udp)
-
-
-def firewall_open_basic_ports():
-    """
-    Open ports for csync2, hawk & dlm respectively
-    """
-    configure_firewall(tcp=[
-        constants.CSYNC2_PORT,
-        constants.HAWK_PORT,
-        constants.DLM_PORT
-        ])
-
-
-def firewall_open_corosync_ports():
-    """
-    Have to do this separately, as we need general firewall config early
-    so csync2 works, but need corosync config *after* corosync.conf has
-    been created/updated.
-
-    Please note corosync uses two UDP ports mcastport (for mcast
-    receives) and mcastport - 1 (for mcast sends).
-
-    Also open QNetd/QDevice port if configured.
-    """
-    # all mcastports defined in corosync config
-    udp = corosync.get_values("totem.interface.mcastport") or 
[constants.COROSYNC_PORT]
-    udp.extend([str(int(p) - 1) for p in udp])
-
-    tcp = corosync.get_values("totem.quorum.device.net.port")
-
-    configure_firewall(tcp=tcp, udp=udp)
-
-
 def init_cluster_local():
     # Caller should check this, but I'm paranoid...
     if ServiceManager().service_is_active("corosync.service"):
         utils.fatal("corosync service is running!")
 
-    firewall_open_corosync_ports()
-
     # reset password, but only if it's not already set
     # (We still need the hacluster for the hawk).
     _rc, outp = ShellUtils().get_stdout("passwd -S hacluster")
@@ -1277,6 +1202,83 @@
     utils.chmod(PCMK_REMOTE_AUTH, 0o640)
 
 
+class FirewallManager:
+
+    SERVICE_NAME = "high-availability"
+
+    def __init__(self, peer=None):
+        self.shell = None
+        self.peer = peer
+        self.firewalld_running = False
+        self.firewall_cmd = None
+        self.firewall_cmd_permanent_option = ""
+        self.peer_msg = ""
+        self.firewalld_installed = utils.package_is_installed("firewalld", 
self.peer)
+
+        if self.firewalld_installed:
+            self.shell = sh.cluster_shell()
+            rc, _, _ = 
self.shell.get_rc_stdout_stderr_without_input(self.peer, "firewall-cmd --state")
+            self.firewalld_running = rc == 0
+            self.firewall_cmd = "firewall-cmd" if self.firewalld_running else 
"firewall-offline-cmd"
+            self.firewall_cmd_permanent_option = " --permanent" if 
self.firewalld_running else ""
+            self.peer_msg = f"on {self.peer}" if self.peer else f"on 
{utils.this_node()}"
+
+    def _service_is_available(self) -> bool:
+        cmd = f"{self.firewall_cmd} --info-service={self.SERVICE_NAME}"
+        rc, _, _ = self.shell.get_rc_stdout_stderr_without_input(self.peer, 
cmd)
+        if rc != 0:
+            logger.warning("Firewalld service %s is not available %s", 
self.SERVICE_NAME, self.peer_msg)
+            return False
+        return True
+
+    def add_service(self):
+        if not self.firewalld_installed or not self._service_is_available():
+            return
+        cmd = f"{self.firewall_cmd}{self.firewall_cmd_permanent_option} 
--add-service={self.SERVICE_NAME}"
+        rc, _, err = self.shell.get_rc_stdout_stderr_without_input(self.peer, 
cmd)
+        if rc != 0:
+            logger.error("Failed to add firewalld service %s %s: %s", 
self.SERVICE_NAME, self.peer_msg, err)
+            return
+        if self.firewalld_running:
+            cmd = f"{self.firewall_cmd} --add-service={self.SERVICE_NAME}"
+            self.shell.get_rc_stdout_stderr_without_input(self.peer, cmd)
+        logger.info("Added firewalld service %s %s", self.SERVICE_NAME, 
self.peer_msg)
+
+    def remove_service(self):
+        if not self.firewalld_installed or not self._service_is_available():
+            return
+        cmd = f"{self.firewall_cmd}{self.firewall_cmd_permanent_option} 
--remove-service={self.SERVICE_NAME}"
+        rc, _, err = self.shell.get_rc_stdout_stderr_without_input(self.peer, 
cmd)
+        if rc != 0:
+            logger.error("Failed to remove firewalld service %s %s: %s", 
self.SERVICE_NAME, self.peer_msg, err)
+            return
+        if self.firewalld_running:
+            cmd = f"{self.firewall_cmd} --remove-service={self.SERVICE_NAME}"
+            self.shell.get_rc_stdout_stderr_without_input(self.peer, cmd)
+        logger.info("Removed firewalld service %s %s", self.SERVICE_NAME, 
self.peer_msg)
+
+    @classmethod
+    def firewalld_stage_finished(cls) -> bool:
+        inst = cls()
+        if not inst.firewalld_installed or not inst._service_is_available():
+            return True
+        cmd = f"{inst.firewall_cmd} --list-services"
+        _, outp, _ = inst.shell.get_rc_stdout_stderr_without_input(None, cmd)
+        return inst.SERVICE_NAME in outp.split()
+
+
+def init_firewalld():
+    if _context.cluster_is_running:
+        for node in utils.list_cluster_nodes():
+            FirewallManager(node).add_service()
+    else:
+        FirewallManager().add_service()
+
+
+def join_firewalld(*_):
+    FirewallManager().add_service()
+
+
 class Validation(object):
     """
     Class to validate values from interactive inputs
@@ -2183,6 +2185,8 @@
     # Trigger corosync config reload to ensure expected_votes is propagated
     invoke("corosync-cfgtool -R")
 
+    FirewallManager(peer=node).remove_service()
+
 
 def ssh_stage_finished():
     """
@@ -2208,6 +2212,7 @@
 
 INIT_STAGE_CHECKER = {
         "ssh": ssh_stage_finished,
+        "firewalld": FirewallManager.firewalld_stage_finished,
         "csync2": csync2_stage_finished,
         "corosync": corosync_stage_finished,
         "sbd": lambda: True,
@@ -2217,6 +2222,7 @@
 
 JOIN_STAGE_CHECKER = {
         "ssh": ssh_stage_finished,
+        "firewalld": FirewallManager.firewalld_stage_finished,
         "csync2": csync2_stage_finished,
         "ssh_merge": lambda: True,
         "cluster": is_online
@@ -2270,6 +2276,7 @@
         globals()["init_" + stage]()
     else:
         init_ssh()
+        init_firewalld()
         if _context.skip_csync2:
             ServiceManager().stop_service(CSYNC2_SERVICE, disable=True)
         else:
@@ -2378,6 +2385,7 @@
                 _context.node_list_in_cluster = 
utils.fetch_cluster_node_list_from_node(cluster_node)
                 setup_passwordless_with_other_nodes(cluster_node)
                 _context.skip_csync2 = not 
service_manager.service_is_active(CSYNC2_SERVICE, cluster_node)
+                join_firewalld()
                 if _context.skip_csync2:
                     service_manager.stop_service(CSYNC2_SERVICE, disable=True)
                     retrieve_all_config_files(cluster_node)
@@ -2518,6 +2526,7 @@
         qdevice.QDevice.remove_certification_files_on_qnetd()
         qdevice.QDevice.remove_qdevice_db([utils.this_node()])
         rm_configuration_files()
+        FirewallManager().remove_service()
 
 
 def init_common_geo():
@@ -2725,6 +2734,7 @@
         user_by_host.add(local_user, utils.this_node())
         user_by_host.add(remote_user, node)
         user_by_host.save_local()
+    init_firewalld()
     geo_fetch_config(node)
     if not os.path.isfile(BOOTH_CFG):
         utils.fatal("Failed to copy {} from {}".format(BOOTH_CFG, 
_context.cluster_node))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/constants.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/constants.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/constants.py        2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/constants.py        2025-05-20 
11:34:23.000000000 +0200
@@ -437,11 +437,6 @@
 
 COROSYNC_STATUS_TYPES = ("ring", "quorum", "qdevice", "qnetd", "cpg")
 
-COROSYNC_PORT = 5405
-CSYNC2_PORT = 30865
-HAWK_PORT = 7630
-DLM_PORT = 21064
-
 NO_SSH_ERROR_MSG = "ssh-related operations are disabled. crmsh works in local 
mode."
 
 PCMK_SERVICE = "pacemaker.service"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/main.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/main.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/main.py     2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/main.py     2025-05-20 
11:34:23.000000000 +0200
@@ -282,6 +282,12 @@
     # point = int(args[0])
     line = args[1]
 
+    exclude_flag_options = ('-F', '-n', '-w', '-d', '--force', '--no', 
'--wait', '--debug')
+    for opt in exclude_flag_options:
+        pattern = f'{opt} '
+        if pattern in line:
+            line = line.replace(pattern, '')
+
     # remove [*]crm from commandline
     idx = line.find('crm')
     if idx >= 0:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/migration.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/migration.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/migration.py        2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/migration.py        2025-05-20 
11:34:23.000000000 +0200
@@ -703,6 +703,7 @@
             "The CIB is not validated with the latest schema version.", [
                 f'* Latest version:  {".".join(str(i) for i in 
latest_schema_version)}',
                 f'* Current version: {".".join(str(i) for i in version)}',
+                'Please run "crm configure upgrade force" to upgrade to the 
latest version.',
             ]
         )
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/qdevice.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/qdevice.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/qdevice.py  2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/qdevice.py  2025-05-20 
11:34:23.000000000 +0200
@@ -590,6 +590,23 @@
             if self.qdevice_reload_policy == QdevicePolicy.QDEVICE_RELOAD:
                 utils.cluster_run_cmd("crm corosync reload")
 
+    def config_qnetd_port(self):
+        """
+        Enable qnetd port in firewalld
+        """
+        if not ServiceManager().service_is_active("firewalld.service", 
self.qnetd_addr):
+            return
+        if utils.check_port_open(self.qnetd_addr, self.port):
+            return
+        shell = sh.cluster_shell()
+        cmd = f"firewall-cmd --add-port={self.port}/tcp --permanent"
+        rc, out, err = 
shell.get_rc_stdout_stderr_without_input(self.qnetd_addr, cmd)
+        if rc != 0 and err:
+            logger.error("Failed to add port {} to firewalld on {}: 
{}".format(self.port, self.qnetd_addr, err))
+            return
+        logger.info("Add port {} to firewalld on {}".format(self.port, 
self.qnetd_addr))
+        shell.get_stdout_or_raise_error("firewall-cmd --reload", 
self.qnetd_addr)
+
     def start_qdevice_service(self):
         """
         Start qdevice and qnetd service
@@ -635,6 +652,7 @@
         self.adjust_sbd_watchdog_timeout_with_qdevice()
         self.qdevice_reload_policy = 
evaluate_qdevice_quorum_effect(QDEVICE_ADD, self.using_diskless_sbd, 
self.is_stage)
         self.config_qdevice()
+        self.config_qnetd_port()
         self.start_qdevice_service()
 
     @staticmethod
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/ui_cluster.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/ui_cluster.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/ui_cluster.py       2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/ui_cluster.py       2025-05-20 
11:34:23.000000000 +0200
@@ -254,6 +254,11 @@
             return
         logger.debug(f"stop node list: {node_list}")
 
+        if utils.is_cluster_in_maintenance_mode() and utils.is_dlm_running():
+            logger.info("The cluster is in maintenance mode")
+            logger.error("Stopping pacemaker/corosync will trigger unexpected 
node fencing when 'dlm_controld' is running in maintenance mode.")
+            return False
+
         utils.wait_for_dc(node_list[0])
 
         self._set_dlm(node_list[0])
@@ -270,14 +275,17 @@
         for node in node_list:
             logger.info("The cluster stack stopped on {}".format(node))
 
+        return True
+
     @command.skill_level('administrator')
     def do_restart(self, context, *args):
         '''
         Restarts the cluster stack on all nodes or specific node(s)
         '''
-        parse_option_for_nodes(context, *args)
-        self.do_stop(context, *args)
-        self.do_start(context, *args)
+        stop_rc = self.do_stop(context, *args)
+        if stop_rc is False:
+            return False
+        return self.do_start(context, *args)
 
     @command.skill_level('administrator')
     def do_enable(self, context, *args):
@@ -331,6 +339,7 @@
 
 Stage can be one of:
     ssh         Create SSH keys for passwordless SSH between cluster nodes
+    firewalld   Add high-availability service to firewalld
     csync2      Configure csync2
     corosync    Configure corosync
     sbd         Configure SBD (requires -s <dev>)
@@ -496,6 +505,7 @@
 
 Stage can be one of:
     ssh         Obtain SSH keys from existing cluster node (requires -c <host>)
+    firewalld   Add high-availability service to firewalld
     csync2      Configure csync2 (requires -c <host>)
     ssh_merge   Merge root's SSH known_hosts across all nodes (csync2 must
                 already be configured).
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/ui_configure.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/ui_configure.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/ui_configure.py     2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/ui_configure.py     2025-05-20 
11:34:23.000000000 +0200
@@ -1142,7 +1142,11 @@
 
     @command.skill_level('administrator')
     @command.completers_repeating(compl.attr_id,
-                                  compl.call(schema.rng_attr_values, 
'rsc_order', 'kind'),
+                                  compl.call(
+                                      lambda *args: [v + ":" for v in 
schema.rng_attr_values(*args)],
+                                      'rsc_order',
+                                      'kind'
+                                  ),
                                   top_rsc_tmpl_id_list)
     def do_order(self, context, *args):
         """usage: order <id> [kind]: <rsc>[:<action>] <rsc>[:<action>] ...
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh/utils.py 
new/crmsh-5.0.0+20250520.6bc90213/crmsh/utils.py
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh/utils.py    2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh/utils.py    2025-05-20 
11:34:23.000000000 +0200
@@ -2795,6 +2795,11 @@
     return False
 
 
+def is_cluster_in_maintenance_mode() -> bool:
+    maintenance_mode = get_property("maintenance-mode")
+    return maintenance_mode and is_boolean_true(maintenance_mode)
+
+
 @contextmanager
 def leverage_maintenance_mode() -> typing.Generator[bool, None, None]:
     """
@@ -2803,8 +2808,7 @@
     Yield True if cluster is in maintenance mode or already in maintenance mode
     Yield False if not using -F/--force option or DC is not IDLE
     """
-    maintenance_mode = get_property("maintenance-mode")
-    if maintenance_mode and is_boolean_true(maintenance_mode):
+    if is_cluster_in_maintenance_mode():
         logger.info("Cluster is already in maintenance mode")
         yield True
         return
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/crmsh.spec.in 
new/crmsh-5.0.0+20250520.6bc90213/crmsh.spec.in
--- old/crmsh-5.0.0+20250506.7ca341fa/crmsh.spec.in     2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/crmsh.spec.in     2025-05-20 
11:34:23.000000000 +0200
@@ -32,6 +32,11 @@
 %define pkg_group Productivity/Clustering/HA
 %endif
 
+%define use_firewalld 1
+%if %{use_firewalld}
+%define _fwdefdir %{_prefix}/lib/firewalld/services
+%endif
+
 Name:           crmsh
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
@@ -63,6 +68,9 @@
 # only require csync2 on SUSE since bootstrap
 # only works for SUSE at the moment anyway
 Requires:       csync2
+%if %{use_firewalld}
+BuildRequires:  firewall-macros
+%endif
 %endif
 
 %if 0%{?suse_version}
@@ -221,12 +229,20 @@
        rm %{buildroot}%{_bindir}/crm
 fi
 
+%if %{use_firewalld}
+install -Dm0644 high-availability.xml \
+       %{buildroot}%{_fwdefdir}/high-availability.xml
+%endif
+
 %if 0%{?suse_version}
 %fdupes %{buildroot}
 %endif
 
 %post
 %tmpfiles_create %{_tmpfilesdir}/%{name}.conf
+%if %{use_firewalld}
+%firewalld_reload
+%endif
 
 %if %{with regression_tests}
 # Run regression tests after installing the package
@@ -275,6 +291,12 @@
 %dir %attr (770, %{uname}, %{gname}) %{_var}/log/crmsh
 %{_datadir}/bash-completion/completions/crm
 
+%if %{use_firewalld}
+%dir %{_prefix}/lib/firewalld
+%dir %{_fwdefdir}
+%{_fwdefdir}/high-availability.xml
+%endif
+
 %files scripts
 %defattr(-,root,root)
 %{_datadir}/%{name}/scripts
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/data-manifest 
new/crmsh-5.0.0+20250520.6bc90213/data-manifest
--- old/crmsh-5.0.0+20250506.7ca341fa/data-manifest     2025-05-06 
07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/data-manifest     2025-05-20 
11:34:23.000000000 +0200
@@ -66,6 +66,7 @@
 test/descriptions
 test/evaltest.sh
 test/features/bootstrap_bugs.feature
+test/features/bootstrap_firewalld.feature
 test/features/bootstrap_init_join_remove.feature
 test/features/bootstrap_options.feature
 test/features/bootstrap_sbd_delay.feature
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-5.0.0+20250506.7ca341fa/high-availability.xml 
new/crmsh-5.0.0+20250520.6bc90213/high-availability.xml
--- old/crmsh-5.0.0+20250506.7ca341fa/high-availability.xml     1970-01-01 
01:00:00.000000000 +0100
+++ new/crmsh-5.0.0+20250520.6bc90213/high-availability.xml     2025-05-20 
11:34:23.000000000 +0200
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<service>
+  <short>SUSE High Availability Cluster ports</short>
+  <description>This allows you to open various ports related to SUSE High 
Availability Cluster. Ports are opened for pacemaker-remote, qnetd, corosync, 
hawk2, booth, dlm, csync2, fence_kdump_send and drbd.</description>
+  <port protocol="tcp" port="3121"/> <!-- pacemaker-remote -->
+  <port protocol="tcp" port="5403"/> <!-- corosync qnetd-->
+  <port protocol="udp" port="5404"/> <!-- corosync -->
+  <port protocol="udp" port="5405-5412"/> <!-- corosync -->
+  <port protocol="tcp" port="7630"/> <!-- hawk2 -->
+  <port protocol="tcp" port="9929"/> <!-- booth -->
+  <port protocol="udp" port="9929"/> <!-- booth -->
+  <port protocol="tcp" port="21064"/> <!-- dlm -->
+  <port protocol="sctp" port="21064"/> <!-- dlm -->
+  <port protocol="tcp" port="30865"/> <!-- csync2 -->
+  <port protocol="udp" port="7410"/> <!-- fence_kdump_send -->
+  <port protocol="tcp" port="7788-7789"/> <!-- drbd -->
+</service>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_bugs.feature 
new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_bugs.feature
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_bugs.feature      
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_bugs.feature      
2025-05-20 11:34:23.000000000 +0200
@@ -12,6 +12,9 @@
     Then    Except "ERROR: cluster.init: Please run 'ssh' stage first"
     When    Run "crm cluster init ssh -y" on "hanode1"
     When    Try "crm cluster init cluster -y" on "hanode1"
+    Then    Except "ERROR: cluster.init: Please run 'firewalld' stage first"
+    When    Try "crm cluster init firewalld -y" on "hanode1"
+    When    Try "crm cluster init cluster -y" on "hanode1"
     Then    Except "ERROR: cluster.init: Please run 'csync2' stage first"
     When    Run "crm cluster init csync2 -y" on "hanode1"
     When    Try "crm cluster init cluster -y" on "hanode1"
@@ -24,6 +27,9 @@
     Then    Except "ERROR: cluster.join: Please run 'ssh' stage first"
     When    Try "crm cluster join ssh -c hanode1 -y" on "hanode2"
     When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
+    Then    Except "ERROR: cluster.join: Please run 'firewalld' stage first"
+    When    Try "crm cluster join firewalld -c hanode1 -y" on "hanode2"
+    When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
     Then    Except "ERROR: cluster.join: Please run 'csync2' stage first"
     When    Try "crm cluster join csync2 -c hanode1 -y" on "hanode2"
     When    Try "crm cluster join cluster -c hanode1 -y" on "hanode2"
@@ -66,11 +72,13 @@
     Given   Cluster service is "stopped" on "hanode2"
     When    Run "crm cluster init -i eth0 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
+    When    Run "iptables -A INPUT -i eth1 -s @hanode1.ip.0 -j DROP" on 
"hanode2"
     When    Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
     Then    Cluster service is "stopped" on "hanode2"
     And     Except "Cannot see peer node "hanode1", please check the 
communication IP" in stderr
     When    Run "crm cluster join -c hanode1 -i eth0 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
+    When    Run "iptables -D INPUT -i eth1 -s @hanode1.ip.0 -j DROP" on 
"hanode2"
 
   @clean
   Scenario: Remove correspond nodelist in corosync.conf while 
remove(bsc#1165644)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_firewalld.feature 
new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_firewalld.feature
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_firewalld.feature 
1970-01-01 01:00:00.000000000 +0100
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_firewalld.feature 
2025-05-20 11:34:23.000000000 +0200
@@ -0,0 +1,72 @@
+@bootstrap
+Feature: Test deployment of high-availability firewalld service
+
+  Need nodes: hanode1 hanode2 qnetd-node
+
+  Scenario: The high-availability service is available
+    Given   The "high-availability" firewalld service is available on "hanode1"
+    And     The "high-availability" firewalld service is available on "hanode2"
+
+  Scenario: The high-availability service is added after setup cluster while 
firewalld is offline
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    And     Service "firewalld" is "stopped" on "hanode1"
+    And     Service "firewalld" is "stopped" on "hanode2"
+    And     The "high-availability" firewalld service is not added on "hanode1"
+    And     The "high-availability" firewalld service is not added on "hanode2"
+
+    When    Run "crm cluster init -y" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    Then    The "high-availability" firewalld service is added on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    Then    The "high-availability" firewalld service is added on "hanode2"
+
+    When    Run "crm cluster remove hanode2 -y" on "hanode1"
+    Then    Cluster service is "stopped" on "hanode2"
+    Then    The "high-availability" firewalld service is not added on "hanode2"
+    When    Run "crm cluster remove hanode1 -y --force" on "hanode1"
+    Then    Cluster service is "stopped" on "hanode1"
+    Then    The "high-availability" firewalld service is not added on "hanode1"
+
+  Scenario: The high-availability service is added after setup cluster while 
firewalld is running
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    And     Service "firewalld" is "stopped" on "hanode1"
+    And     Service "firewalld" is "stopped" on "hanode2"
+    And     The "high-availability" firewalld service is not added on "hanode1"
+    And     The "high-availability" firewalld service is not added on "hanode2"
+    # open behave agent port
+    When    Run "firewall-offline-cmd --add-port=1122/tcp" on "hanode1"
+    When    Run "firewall-offline-cmd --add-port=1122/tcp" on "hanode2"
+    When    Run "systemctl start firewalld" on "hanode2"
+    When    Run "systemctl start firewalld" on "hanode1"
+    Then    Service "firewalld" is "started" on "hanode2"
+    Then    Service "firewalld" is "started" on "hanode1"
+
+    When    Run "crm cluster init -y -N hanode2" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    Then    Cluster service is "started" on "hanode2"
+    Then    The "high-availability" firewalld service is added on "hanode1"
+    Then    The "high-availability" firewalld service is added on "hanode2"
+
+    When    Run "firewall-cmd --permanent --remove-service=high-availability; 
firewall-cmd --reload" on "hanode1"
+    Then    The "high-availability" firewalld service is not added on "hanode1"
+    When    Run "firewall-cmd --permanent --remove-service=high-availability; 
firewall-cmd --reload" on "hanode2"
+    Then    The "high-availability" firewalld service is not added on "hanode2"
+    When    Run "crm cluster init firewalld -y" on "hanode1"
+    Then    The "high-availability" firewalld service is added on "hanode1"
+    Then    The "high-availability" firewalld service is added on "hanode2"
+
+  Scenario: Verify qnetd server port
+    Given   Cluster service is "started" on "hanode1"
+    And     Cluster service is "started" on "hanode2"
+    And     Service "firewalld" is "stopped" on "qnetd-node"
+    When    Run "firewall-offline-cmd --add-port=1122/tcp" on "qnetd-node"
+    When    Run "systemctl start firewalld" on "qnetd-node"
+    Then    Service "firewalld" is "started" on "qnetd-node"
+    When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on 
"hanode1"
+    Then    Service "corosync-qdevice" is "started" on "hanode1"
+    Then    Service "corosync-qdevice" is "started" on "hanode2"
+    Then    Service "corosync-qnetd" is "started" on "qnetd-node"
+    Then    Port "5403" protocol "tcp" is opened on "qnetd-node"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_options.feature 
new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_options.feature
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/bootstrap_options.feature   
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/bootstrap_options.feature   
2025-05-20 11:34:23.000000000 +0200
@@ -44,9 +44,9 @@
   @clean
   Scenario: Stage validation
     When    Try "crm cluster init fdsf -y" on "hanode1"
-    Then    Expected "Invalid stage: fdsf(available stages: ssh, csync2, 
corosync, sbd, cluster, ocfs2, gfs2, admin, qdevice)" in stderr
+    Then    Expected "Invalid stage: fdsf(available stages: ssh, firewalld, 
csync2, corosync, sbd, cluster, ocfs2, gfs2, admin, qdevice)" in stderr
     When    Try "crm cluster join fdsf -y" on "hanode1"
-    Then    Expected "Invalid stage: fdsf(available stages: ssh, csync2, 
ssh_merge, cluster)" in stderr
+    Then    Expected "Invalid stage: fdsf(available stages: ssh, firewalld, 
csync2, ssh_merge, cluster)" in stderr
     When    Try "crm cluster join ssh -y" on "hanode1"
     Then    Expected "Can't use stage(ssh) without specifying cluster node" in 
stderr
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/ocfs2.feature 
new/crmsh-5.0.0+20250520.6bc90213/test/features/ocfs2.feature
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/ocfs2.feature       
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/ocfs2.feature       
2025-05-20 11:34:23.000000000 +0200
@@ -59,3 +59,8 @@
   And     Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started"
   And     Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is 
"Started"
   And     Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started"
+  # When cluster in maintenance mode and dlm is running, cannot do stop
+  When    Run "crm maintenance on" on "hanode1"
+  And     Try "crm cluster stop" on "hanode1"
+  Then    Expected return code is "1"
+  Then    Expected "Stopping pacemaker/corosync will trigger unexpected node 
fencing" in stderr
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/const.py 
new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/const.py
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/const.py      
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/const.py      
2025-05-20 11:34:23.000000000 +0200
@@ -154,6 +154,7 @@
 
 Stage can be one of:
     ssh         Create SSH keys for passwordless SSH between cluster nodes
+    firewalld   Add high-availability service to firewalld
     csync2      Configure csync2
     corosync    Configure corosync
     sbd         Configure SBD (requires -s <dev>)
@@ -249,6 +250,7 @@
 
 Stage can be one of:
     ssh         Obtain SSH keys from existing cluster node (requires -c <host>)
+    firewalld   Add high-availability service to firewalld
     csync2      Configure csync2 (requires -c <host>)
     ssh_merge   Merge root's SSH known_hosts across all nodes (csync2 must
                 already be configured).
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/step_implementation.py 
new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/step_implementation.py
--- 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/step_implementation.py    
    2025-05-06 07:27:46.000000000 +0200
+++ 
new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/step_implementation.py    
    2025-05-20 11:34:23.000000000 +0200
@@ -658,3 +658,47 @@
     rc, schema, _ = ShellUtils().get_stdout_stderr("crm configure schema")
     assert rc == 0
     assert schema == context.schema_latest
+
+
+def firewall_cmd(context, node):
+    rc, _, _ = run_command_local_or_remote(context, "firewall-cmd --state", 
node, exit_on_fail=False)
+    return "firewall-cmd" if rc == 0 else "firewall-offline-cmd"
+
+
+@then('Port "{port}" protocol "{proto}" is opened on "{node}"')
+def step_impl(context, port, proto, node):
+    cmd = f"{firewall_cmd(context, node)} --list-ports"
+    rc, out, _ = run_command_local_or_remote(context, cmd, node, 
exit_on_fail=False)
+    assert rc == 0
+    assert f"{port}/{proto}" in out.split()
+
+
+@given('The "{service}" firewalld service is available on "{node}"')
+def step_impl(context, service, node):
+    cmd = f"{firewall_cmd(context, node)} --info-service={service}"
+    rc, _, _ = run_command_local_or_remote(context, cmd, node, 
exit_on_fail=False)
+    assert rc == 0
+
+
+def firewalld_list_services(context, node):
+    cmd = f"{firewall_cmd(context, node)} --list-services"
+    _, out, _ = run_command_local_or_remote(context, cmd, node, 
exit_on_fail=False)
+    return out or ""
+
+
+@then('The "{service}" firewalld service is added on "{node}"')
+def step_impl(context, service, node):
+    services = firewalld_list_services(context, node)
+    assert service in services.split()
+
+
+@then('The "{service}" firewalld service is not added on "{node}"')
+def step_impl(context, service, node):
+    services = firewalld_list_services(context, node)
+    assert service not in services.split()
+
+
+@given('The "{service}" firewalld service is not added on "{node}"')
+def step_impl(context, service, node):
+    services = firewalld_list_services(context, node)
+    assert service not in services.split()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/utils.py 
new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/utils.py
--- old/crmsh-5.0.0+20250506.7ca341fa/test/features/steps/utils.py      
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/features/steps/utils.py      
2025-05-20 11:34:23.000000000 +0200
@@ -112,7 +112,7 @@
                 context.logger.error("Failed to run %s on %s@%s :%s", cmd, 
os.geteuid(), host, err)
                 raise ValueError("{}".format(err))
             else:
-                return
+                return rc, out, err
     return 0, out, err
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_bootstrap.py 
new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_bootstrap.py
--- old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_bootstrap.py  
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_bootstrap.py  
2025-05-20 11:34:23.000000000 +0200
@@ -1983,6 +1983,7 @@
             ])
         mock_error.assert_called_once_with("Removing the node node1 from {} 
failed".format(bootstrap.CSYNC2_CFG))
 
+    @mock.patch('crmsh.bootstrap.FirewallManager')
     @mock.patch.object(NodeMgmt, 'call_delnode')
     @mock.patch('crmsh.service_manager.ServiceManager.service_is_active')
     @mock.patch('crmsh.bootstrap.rm_configuration_files')
@@ -1999,13 +2000,16 @@
     @mock.patch('crmsh.bootstrap.get_cluster_node_ip')
     def test_remove_node_from_cluster_hostname(self, mock_get_ip, mock_stop, 
mock_status,
             mock_invoke, mock_invokerc, mock_error, mock_get_values, mock_del, 
mock_csync2,
-            mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, 
mock_is_active, mock_cal_delnode):
+            mock_adjust_priority, mock_adjust_fence_delay, mock_rm_conf_files, 
mock_is_active, mock_cal_delnode, mock_firewall):
         mock_get_ip.return_value = "10.10.10.1"
         mock_cal_delnode.return_value = True
         mock_invoke.side_effect = [(True, None, None)]
         mock_invokerc.return_value = True
         mock_get_values.return_value = ["10.10.10.1"]
         mock_is_active.return_value = False
+        mock_firewall_inst = mock.Mock()
+        mock_firewall.return_value = mock_firewall_inst
+        mock_firewall_inst.remove_service = mock.Mock()
 
         bootstrap._context = mock.Mock(cluster_node="node1", rm_list=["file1", 
"file2"])
         bootstrap.remove_node_from_cluster('node1')
@@ -2028,3 +2032,93 @@
             mock.call(bootstrap.CSYNC2_CFG),
             mock.call("/etc/corosync/corosync.conf")
             ])
+
+
+class TestFirewallManager(unittest.TestCase):
+
+    @mock.patch('crmsh.utils.this_node')
+    @mock.patch('crmsh.utils.package_is_installed')
+    @mock.patch('crmsh.sh.cluster_shell')
+    def setUp(self, mock_shell, mock_installed, mock_this_node):
+        mock_shell_inst = mock.Mock()
+        mock_shell.return_value = mock_shell_inst
+        mock_shell_inst.get_rc_stdout_stderr_without_input.return_value = (0, 
'', '')
+        mock_installed.return_value = True
+        mock_this_node.return_value = "node1"
+        self.firewall_manager_inst = bootstrap.FirewallManager()
+
+    @mock.patch('logging.Logger.warning')
+    def test_service_is_available_false(self, mock_warning):
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.return_value
 = (1, '', '')
+        self.assertFalse(self.firewall_manager_inst._service_is_available())
+        mock_warning.assert_called_once_with('Firewalld service %s is not 
available %s', 'high-availability', 'on node1')
+
+    def test_service_is_available_true(self):
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.return_value
 = (0, '', '')
+        self.assertTrue(self.firewall_manager_inst._service_is_available())
+
+    def test_add_service_not_available(self):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=False)
+        self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input = 
mock.Mock()
+        self.firewall_manager_inst.add_service()
+        
self.firewall_manager_inst._service_is_available.assert_called_once_with()
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.assert_not_called()
+
+    @mock.patch('logging.Logger.error')
+    def test_add_service_error(self, mock_error):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=True)
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.return_value
 = (1, '', 'error')
+        self.firewall_manager_inst.add_service()
+        mock_error.assert_called_once_with('Failed to add firewalld service %s 
%s: %s', 'high-availability', 'on node1', 'error')
+
+    @mock.patch('logging.Logger.info')
+    def test_add_service_success(self, mock_info):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=True)
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.side_effect 
= [(0, '', ''), (0, '', '')]
+        self.firewall_manager_inst.add_service()
+        mock_info.assert_called_once_with('Added firewalld service %s %s', 
'high-availability', 'on node1')
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.assert_has_calls([
+            mock.call(None, 'firewall-cmd --permanent 
--add-service=high-availability'),
+            mock.call(None, 'firewall-cmd --add-service=high-availability')
+        ])
+
+    def test_remove_service_not_available(self):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=False)
+        self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input = 
mock.Mock()
+        self.firewall_manager_inst.remove_service()
+        
self.firewall_manager_inst._service_is_available.assert_called_once_with()
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.assert_not_called()
+
+    @mock.patch('logging.Logger.error')
+    def test_remove_service_error(self, mock_error):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=True)
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.return_value
 = (1, '', 'error')
+        self.firewall_manager_inst.remove_service()
+        mock_error.assert_called_once_with('Failed to remove firewalld service 
%s %s: %s', 'high-availability', 'on node1', 'error')
+
+    @mock.patch('logging.Logger.info')
+    def test_remove_service_success(self, mock_info):
+        self.firewall_manager_inst._service_is_available = 
mock.Mock(return_value=True)
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.side_effect 
= [(0, '', ''), (0, '', '')]
+        self.firewall_manager_inst.remove_service()
+        mock_info.assert_called_once_with('Removed firewalld service %s %s', 
'high-availability', 'on node1')
+        
self.firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.assert_has_calls([
+            mock.call(None, 'firewall-cmd --permanent 
--remove-service=high-availability'),
+            mock.call(None, 'firewall-cmd --remove-service=high-availability')
+        ])
+
+    @staticmethod
+    def test_firewalld_stage_finished_not_installed():
+        firewall_manager_inst = mock.Mock()
+        firewall_manager_inst.firewalld_installed = False
+        res = bootstrap.FirewallManager.firewalld_stage_finished()
+        assert res is True
+
+    @staticmethod
+    def test_firewalld_stage_finished():
+        firewall_manager_inst = mock.Mock()
+        firewall_manager_inst.firewalld_installed = True
+        firewall_manager_inst._service_is_available = 
mock.Mock(return_value=True)
+        
firewall_manager_inst.shell.get_rc_stdout_stderr_without_input.return_value = 
(0, 'server1 high-availability server2', '')
+        res = bootstrap.FirewallManager.firewalld_stage_finished()
+        assert res is True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_migration.py 
new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_migration.py
--- old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_migration.py  
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_migration.py  
2025-05-20 11:34:23.000000000 +0200
@@ -111,5 +111,6 @@
             "The CIB is not validated with the latest schema version.", [
                 '* Latest version:  3.10',
                 '* Current version: 3.9',
+                'Please run "crm configure upgrade force" to upgrade to the 
latest version.',
             ]
         )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_qdevice.py 
new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_qdevice.py
--- old/crmsh-5.0.0+20250506.7ca341fa/test/unittests/test_qdevice.py    
2025-05-06 07:27:46.000000000 +0200
+++ new/crmsh-5.0.0+20250520.6bc90213/test/unittests/test_qdevice.py    
2025-05-20 11:34:23.000000000 +0200
@@ -790,6 +790,7 @@
         mock_status_long.return_value.__exit__ = mock.Mock()
         self.qdevice_with_ip.certificate_process_on_init = mock.Mock()
         self.qdevice_with_ip.adjust_sbd_watchdog_timeout_with_qdevice = 
mock.Mock()
+        self.qdevice_with_ip.config_qnetd_port = mock.Mock()
         self.qdevice_with_ip.config_qdevice = mock.Mock()
         self.qdevice_with_ip.start_qdevice_service = mock.Mock()
 
@@ -802,6 +803,40 @@
         self.qdevice_with_ip.config_qdevice.assert_called_once_with()
         self.qdevice_with_ip.start_qdevice_service.assert_called_once_with()
 
+    @mock.patch('crmsh.utils.check_port_open')
+    @mock.patch('crmsh.qdevice.ServiceManager')
+    def test_config_qnetd_port_no_firewall(self, mock_service, 
mock_check_port):
+        mock_service_instance = mock.Mock()
+        mock_service.return_value = mock_service_instance
+        mock_service_instance.service_is_active.return_value = False
+
+        self.qdevice_with_ip.config_qnetd_port()
+
+        
mock_service_instance.service_is_active.assert_called_once_with("firewalld.service",
 "10.10.10.123")
+        mock_check_port.assert_not_called()
+
+    @mock.patch('logging.Logger.info')
+    @mock.patch('crmsh.sh.cluster_shell')
+    @mock.patch('crmsh.utils.check_port_open')
+    @mock.patch('crmsh.qdevice.ServiceManager')
+    def test_config_qnetd_port(self, mock_service, mock_check_port, 
mock_cluster_shell, mock_info):
+        mock_service_instance = mock.Mock()
+        mock_service.return_value = mock_service_instance
+        mock_service_instance.service_is_active.return_value = True
+        mock_check_port.return_value = False
+        mock_cluster_shell_instance = mock.Mock()
+        mock_cluster_shell.return_value = mock_cluster_shell_instance
+        mock_cluster_shell_instance.get_rc_stdout_stderr_without_input = 
mock.Mock(return_value=(0, None, None))
+        mock_cluster_shell_instance.get_stdout_or_raise_error = 
mock.Mock(return_value=None)
+        self.qdevice_with_ip.port = 5403
+
+        self.qdevice_with_ip.config_qnetd_port()
+
+        
mock_service_instance.service_is_active.assert_called_once_with("firewalld.service",
 "10.10.10.123")
+        mock_check_port.assert_called_once_with("10.10.10.123", 5403)
+        mock_info.assert_called_once_with("Add port 5403 to firewalld on 
10.10.10.123")
+        
mock_cluster_shell_instance.get_stdout_or_raise_error.assert_called_once_with("firewall-cmd
 --reload", "10.10.10.123")
+
     @mock.patch('crmsh.utils.set_property')
     @mock.patch('crmsh.sbd.SBDTimeout.get_stonith_timeout')
     @mock.patch('crmsh.sbd.SBDManager.update_sbd_configuration')

Reply via email to