Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2021-12-07 00:00:00
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.31177 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Tue Dec  7 00:00:00 2021 rev:228 rq:935925 version:4.3.1+20211206.894f84bb

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2021-12-02 
02:11:13.259400727 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.31177/crmsh.changes   2021-12-07 
00:01:18.372156141 +0100
@@ -1,0 +2,28 @@
+Mon Dec 06 07:54:42 UTC 2021 - [email protected]
+
+- Update to version 4.3.1+20211206.894f84bb:
+  * Dev: unittest: Adjust unit test based on previous changes
+  * Dev: ui_cluster: check if qdevice service started when starting cluster if 
qdevice configured
+  * Dev: ui_cluster: Remove node from node list if node is unreachable
+
+-------------------------------------------------------------------
+Fri Dec 03 13:00:31 UTC 2021 - [email protected]
+
+- Update to version 4.3.1+20211203.5b4e12b3:
+  * Dev: idmgmt: Avoid leading with number for ID
+  * Dev: ui_node: Improve node standby/online methods
+
+-------------------------------------------------------------------
+Fri Dec 03 07:48:26 UTC 2021 - [email protected]
+
+- Update to version 4.3.1+20211203.9b8a9910:
+  * Dev: ui_cluster: Check service is available before enable/disable qdevice
+
+-------------------------------------------------------------------
+Fri Dec 03 07:24:09 UTC 2021 - [email protected]
+
+- Update to version 4.3.1+20211203.9786bf28:
+  * Dev: unittest: Adjust unit test based on previous changes
+  * Dev: Give warning when no-quorum-policy not set as freeze while using DLM
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.3.1+20211129.a8e22584.tar.bz2

New:
----
  crmsh-4.3.1+20211206.894f84bb.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.JpzJiF/_old  2021-12-07 00:01:19.104153552 +0100
+++ /var/tmp/diff_new_pack.JpzJiF/_new  2021-12-07 00:01:19.112153524 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.3.1+20211129.a8e22584
+Version:        4.3.1+20211206.894f84bb
 Release:        0
 URL:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.JpzJiF/_old  2021-12-07 00:01:19.180153283 +0100
+++ /var/tmp/diff_new_pack.JpzJiF/_new  2021-12-07 00:01:19.184153270 +0100
@@ -9,6 +9,6 @@
 </service>
 <service name="tar_scm">
   <param name="url">https://github.com/ClusterLabs/crmsh.git</param>
-  <param 
name="changesrevision">47b4e4763688dee8ab0af253c8e0b73adaf95354</param>
+  <param 
name="changesrevision">894f84bb524938abbf7a87a5b2c427ab6a14ef42</param>
 </service>
 </servicedata>
\ No newline at end of file

++++++ crmsh-4.3.1+20211129.a8e22584.tar.bz2 -> 
crmsh-4.3.1+20211206.894f84bb.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/cibconfig.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/cibconfig.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/cibconfig.py        2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/cibconfig.py        2021-12-06 
08:41:09.000000000 +0100
@@ -2599,6 +2599,8 @@
             if is_live_cib():
                 self.last_commit_time = t
             self.refresh()
+
+            utils.check_no_quorum_policy_with_dlm()
         return rc
 
     def _update_schema(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/constants.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/constants.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/constants.py        2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/constants.py        2021-12-06 
08:41:09.000000000 +0100
@@ -508,20 +508,7 @@
 CIB_RAW_FILE = "/var/lib/pacemaker/cib/cib.xml"
 XML_NODE_PATH = "/cib/configuration/nodes/node"
 XML_STATUS_PATH = "/cib/status/node_state"
-XML_NODE_QUERY_STANDBY_PATH = 
"//nodes/node[@id='{node_id}']/instance_attributes/nvpair[@name='standby']/@value"
-XML_STATUS_QUERY_STANDBY_PATH = 
"//status/node_state[@id='{node_id}']/transient_attributes/instance_attributes/nvpair[@name='standby']/@value"
-STANDBY_TEMPLATE = """
-<instance_attributes id="nodes-{node_id}">
-  <nvpair id="nodes-{node_id}-standby" name="standby" value="{value}"/>
-</instance_attributes>
-"""
-STANDBY_TEMPLATE_REBOOT = """
-<transient_attributes id="{node_id}">
-  <instance_attributes id="status-{node_id}">
-    <nvpair id="status-{node_id}-standby" name="standby" value="{value}"/>
-  </instance_attributes>
-</transient_attributes>
-"""
-STANDBY_NV_RE = r'(<nvpair.*{node_id}.*name="standby".*)value="{value}"(.*)'
+XML_NODE_QUERY_STANDBY_PATH = 
"//nodes/node[@id='{node_id}']/instance_attributes/nvpair[@name='standby']"
+XML_STATUS_QUERY_STANDBY_PATH = 
"//status/node_state[@id='{node_id}']/transient_attributes/instance_attributes/nvpair[@name='standby']"
 CRM_MON_ONE_SHOT = "crm_mon -1"
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/idmgmt.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/idmgmt.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/idmgmt.py   2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/idmgmt.py   2021-12-06 
08:41:09.000000000 +0100
@@ -3,6 +3,7 @@
 #
 # Make sure that ids are unique.
 
+import re
 import copy
 from . import constants
 from . import xmlutil
@@ -45,6 +46,8 @@
     '''
     Create a unique id for the xml node.
     '''
+    if re.search(r'^\d+$', pfx) and node.tag != "node":
+        pfx = "num-{}".format(pfx)
     name = node.get("name")
     if node.tag == "nvpair":
         node_id = "%s-%s" % (pfx, name)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/ocfs2.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/ocfs2.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/ocfs2.py    2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/ocfs2.py    2021-12-06 
08:41:09.000000000 +0100
@@ -297,17 +297,17 @@
         self._dynamic_verify()
         self.exist_ra_id_list = utils.all_exist_id()
 
+        no_quorum_policy_value = utils.get_property("no-quorum-policy")
+        if not no_quorum_policy_value or no_quorum_policy_value != "freeze":
+            utils.set_property(no_quorum_policy="freeze")
+            logger.info("  'no-quorum-policy' is changed to \"freeze\"")
+
         if self.use_cluster_lvm2:
             self._config_resource_stack_lvm2()
         else:
             self._config_resource_stack_ocfs2_along()
         logger.info("  OCFS2 device %s mounted on %s", self.target_device, 
self.mount_point)
 
-        res = utils.get_stdout_or_raise_error("crm configure get_property 
no-quorum-policy")
-        if res != "freeze":
-            utils.get_stdout_or_raise_error("crm configure property 
no-quorum-policy=freeze")
-            logger.info("  'no-quorum-policy' is changed to \"freeze\"")
-
     def _find_target_on_join(self, peer):
         """
         Find device name from OCF Filesystem param on peer node
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/ui_cluster.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/ui_cluster.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/ui_cluster.py       2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/ui_cluster.py       2021-12-06 
08:41:09.000000000 +0100
@@ -79,8 +79,15 @@
     for node in args:
         if node not in member_list:
             context.fatal_error("Node \"{}\" is not a cluster 
node".format(node))
-    # return node list
-    return member_list if options.all else args
+
+    node_list = member_list if options.all else args
+    for node in node_list:
+        try:
+            utils.ping_node(node)
+        except ValueError as err:
+            logger.warning(str(err))
+            node_list.remove(node)
+    return node_list
 
 
 def _remove_completer(args):
@@ -142,15 +149,21 @@
         '''
         Starts the cluster services on all nodes or specific node(s)
         '''
+        service_check_list = ["pacemaker.service"]
+        start_qdevice = False
+        if utils.is_qdevice_configured():
+            start_qdevice = True
+            service_check_list.append("corosync-qdevice.service")
+
         node_list = parse_option_for_nodes(context, *args)
         for node in node_list[:]:
-            if utils.service_is_active("pacemaker.service", remote_addr=node):
+            if all([utils.service_is_active(srv, remote_addr=node) for srv in 
service_check_list]):
                 logger.info("Cluster services already started on 
{}".format(node))
                 node_list.remove(node)
         if not node_list:
             return
 
-        if utils.is_qdevice_configured():
+        if start_qdevice:
             utils.start_service("corosync-qdevice", node_list=node_list)
         bootstrap.start_pacemaker(node_list)
         for node in node_list:
@@ -208,7 +221,7 @@
         node_list = parse_option_for_nodes(context, *args)
         action = context.get_command_name()
         utils.cluster_run_cmd("systemctl {} pacemaker.service".format(action), 
node_list)
-        if utils.is_qdevice_configured():
+        if utils.service_is_available("corosync-qdevice.service") and 
(utils.is_qdevice_configured() or action == "disable"):
             utils.cluster_run_cmd("systemctl {} 
corosync-qdevice.service".format(action), node_list)
         for node in node_list:
             logger.info("Cluster services %s on %s", action+'d', node)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/ui_node.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/ui_node.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/ui_node.py  2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/ui_node.py  2021-12-06 
08:41:09.000000000 +0100
@@ -3,7 +3,6 @@
 # See COPYING for license information.
 
 import re
-from lxml import etree
 from . import config
 from . import command
 from . import completers as compl
@@ -303,47 +302,45 @@
 
         # Parse node option
         node_list = parse_option_for_nodes(context, *args)
-        for node in node_list[:]:
-            if utils.is_standby(node):
-                logger.info("Node %s already standby", node)
-                node_list.remove(node)
         if not node_list:
             return
 
         # For default "forever" lifetime, under "nodes" section
         xml_path = constants.XML_NODE_PATH
         xml_query_path = constants.XML_NODE_QUERY_STANDBY_PATH
-        standby_template = constants.STANDBY_TEMPLATE
+        xml_query_path_oppsite = constants.XML_STATUS_QUERY_STANDBY_PATH
         # For "reboot" lifetime, under "status" section
         if lifetime_opt == "reboot":
             xml_path = constants.XML_STATUS_PATH
             xml_query_path = constants.XML_STATUS_QUERY_STANDBY_PATH
-            standby_template = constants.STANDBY_TEMPLATE_REBOOT
+            xml_query_path_oppsite = constants.XML_NODE_QUERY_STANDBY_PATH
 
         cib = xmlutil.cibdump2elem()
         xml_item_list = cib.xpath(xml_path)
         for xml_item in xml_item_list:
             if xml_item.get("uname") in node_list:
                 node_id = xml_item.get('id')
-                # If the standby nvpair already exists, continue
-                if cib.xpath(xml_query_path.format(node_id=node_id)):
+                # Remove possible oppsite lifetime standby nvpair
+                item_to_del = 
cib.xpath(xml_query_path_oppsite.format(node_id=node_id))
+                if item_to_del:
+                    xmlutil.rmnodes(item_to_del)
+                # If the standby nvpair already exists, set and continue
+                item = cib.xpath(xml_query_path.format(node_id=node_id))
+                if item and item[0].get("value") != "on":
+                    item[0].set("value", "on")
                     continue
                 # Create standby nvpair
-                standby_template_str = 
standby_template.format(node_id=node_id, value="on")
-                xml_item.append(etree.fromstring(standby_template_str))
+                interface_item = xml_item
+                if lifetime_opt == "reboot":
+                    res_item = xmlutil.get_set_nodes(xml_item, 
"transient_attributes", create=True)
+                    interface_item = res_item[0]
+                res_item = xmlutil.get_set_nodes(interface_item, 
"instance_attributes", create=True)
+                xmlutil.set_attr(res_item[0], "standby", "on")
 
-        cib_str = xmlutil.xml_tostring(cib)
-        # Consider both "nodes" and "status" section might contain different 
standby value at the same time
-        # Should replace all possible "off" values here
-        for node in node_list:
-            node_id = utils.get_nodeid_from_name(node)
-            cib_str = re.sub(constants.STANDBY_NV_RE.format(node_id=node_id, 
value="off"), r'\1value="on"\2', cib_str)
-
-        cmd = constants.CIB_REPLACE.format(xmlstr=cib_str)
+        cmd = constants.CIB_REPLACE.format(xmlstr=xmlutil.xml_tostring(cib))
         utils.get_stdout_or_raise_error(cmd)
         for node in node_list:
-            logger.info("Standby node %s", node)
-
+            logger.info("standby node %s", node)
 
     @command.wait
     @command.completers(compl.nodes)
@@ -354,23 +351,21 @@
         """
         # Parse node option
         node_list = parse_option_for_nodes(context, *args)
-        for node in node_list[:]:
-            if not utils.is_standby(node):
-                logger.info("Node %s already online", node)
-                node_list.remove(node)
         if not node_list:
             return
 
         cib = xmlutil.cibdump2elem()
-        cib_str = xmlutil.xml_tostring(cib)
         for node in node_list:
             node_id = utils.get_nodeid_from_name(node)
-            cib_str = re.sub(constants.STANDBY_NV_RE.format(node_id=node_id, 
value="on"), r'\1value="off"\2', cib_str)
+            for query_path in [constants.XML_NODE_QUERY_STANDBY_PATH, 
constants.XML_STATUS_QUERY_STANDBY_PATH]:
+                item = cib.xpath(query_path.format(node_id=node_id))
+                if item and item[0].get("value") != "off":
+                    item[0].set("value", "off")
 
-        cmd = constants.CIB_REPLACE.format(xmlstr=cib_str)
+        cmd = constants.CIB_REPLACE.format(xmlstr=xmlutil.xml_tostring(cib))
         utils.get_stdout_or_raise_error(cmd)
         for node in node_list:
-            logger.info("Online node %s", node)
+            logger.info("online node %s", node)
 
     @command.wait
     @command.completers(compl.nodes)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.3.1+20211129.a8e22584/crmsh/utils.py 
new/crmsh-4.3.1+20211206.894f84bb/crmsh/utils.py
--- old/crmsh-4.3.1+20211129.a8e22584/crmsh/utils.py    2021-11-29 
04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/crmsh/utils.py    2021-12-06 
08:41:09.000000000 +0100
@@ -798,11 +798,11 @@
     return proc.returncode, to_ascii(stdout_data).strip()
 
 
-def get_stdout_stderr(cmd, input_s=None, shell=True, raw=False):
+def get_stdout_stderr(cmd, input_s=None, shell=True, raw=False, no_reg=False):
     '''
     Run a cmd, return (rc, stdout, stderr)
     '''
-    if options.regression_tests:
+    if options.regression_tests and not no_reg:
         print(".EXT", cmd)
     proc = subprocess.Popen(cmd,
                             shell=shell,
@@ -2680,7 +2680,7 @@
     """
     if remote:
         cmd = "ssh {} root@{} \"{}\"".format(SSH_OPTION, remote, cmd)
-    rc, out, err = get_stdout_stderr(cmd)
+    rc, out, err = get_stdout_stderr(cmd, no_reg=True)
     if rc not in success_val_list:
         raise ValueError("Failed to run \"{}\": {}".format(cmd, err))
     return out
@@ -2973,4 +2973,35 @@
         return res.group(1) == "Yes"
     else:
         raise ValueError("Failed to get quorate status from 
corosync-quorumtool")
+
+
+def get_property(name):
+    """
+    Get cluster properties
+    """
+    cmd = "crm configure get_property " + name
+    rc, stdout, _ = get_stdout_stderr(cmd)
+    return stdout if rc == 0 else None
+
+
+def set_property(**kwargs):
+    """
+    Set cluster properties
+    """
+    set_str = ""
+    for key, value in kwargs.items():
+        set_str += "{}={} ".format(key, value)
+    cmd = "crm configure property " + set_str.strip().replace('_', '-')
+    get_stdout_or_raise_error(cmd)
+
+
+def check_no_quorum_policy_with_dlm():
+    """
+    Give warning when no-quorum-policy not freeze while configured DLM
+    """
+    if not is_dlm_configured():
+        return
+    res = get_property("no-quorum-policy")
+    if not res or res != "freeze":
+        logger.warning("The DLM cluster best practice suggests to set the 
cluster property \"no-quorum-policy=freeze\"")
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_ocfs2.py 
new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_ocfs2.py
--- old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_ocfs2.py      
2021-11-29 04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_ocfs2.py      
2021-12-06 08:41:09.000000000 +0100
@@ -373,48 +373,47 @@
         mock_mkfs.assert_called_once_with("/dev/sdb2")
         mock_fs.assert_called_once_with()
 
-    @mock.patch('crmsh.utils.get_stdout_or_raise_error')
     @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2')
+    @mock.patch('crmsh.utils.set_property')
+    @mock.patch('crmsh.utils.get_property')
     @mock.patch('crmsh.utils.all_exist_id')
     @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
     @mock.patch('logging.Logger.info')
-    def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, 
mock_all_id, mock_lvm2, mock_run):
+    def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, 
mock_all_id, mock_get, mock_set, mock_lvm2):
         mock_all_id.return_value = []
-        mock_run.return_value = "freeze"
+        mock_get.return_value = None
         self.ocfs2_inst7.mount_point = "/data"
         self.ocfs2_inst7.target_device = "/dev/vg1/lv1"
         self.ocfs2_inst7.init_ocfs2()
         mock_status.assert_has_calls([
             mock.call("Configuring OCFS2"),
+            mock.call('  \'no-quorum-policy\' is changed to "freeze"'),
             mock.call('  OCFS2 device %s mounted on %s', '/dev/vg1/lv1', 
'/data')
             ])
         mock_dynamic_verify.assert_called_once_with()
         mock_all_id.assert_called_once_with()
         mock_lvm2.assert_called_once_with()
 
-    @mock.patch('crmsh.utils.get_stdout_or_raise_error')
     @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along')
+    @mock.patch('crmsh.utils.set_property')
+    @mock.patch('crmsh.utils.get_property')
     @mock.patch('crmsh.utils.all_exist_id')
     @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify')
     @mock.patch('logging.Logger.info')
-    def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, 
mock_ocfs2, mock_run):
+    def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, 
mock_get, mock_set, mock_ocfs2):
         mock_all_id.return_value = []
-        mock_run.side_effect = ["stop", None]
+        mock_get.return_value = None
         self.ocfs2_inst3.mount_point = "/data"
         self.ocfs2_inst3.target_device = "/dev/sda1"
         self.ocfs2_inst3.init_ocfs2()
         mock_status.assert_has_calls([
             mock.call("Configuring OCFS2"),
-            mock.call('  OCFS2 device %s mounted on %s', '/dev/sda1', '/data'),
-            mock.call('  \'no-quorum-policy\' is changed to "freeze"')
+            mock.call('  \'no-quorum-policy\' is changed to "freeze"'),
+            mock.call('  OCFS2 device %s mounted on %s', '/dev/sda1', '/data')
             ])
         mock_dynamic_verify.assert_called_once_with()
         mock_all_id.assert_called_once_with()
         mock_ocfs2.assert_called_once_with()
-        mock_run.assert_has_calls([
-            mock.call("crm configure get_property no-quorum-policy"),
-            mock.call("crm configure property no-quorum-policy=freeze")
-            ])
 
     @mock.patch('crmsh.utils.get_stdout_or_raise_error')
     def test_find_target_on_join_none(self, mock_run):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_qdevice.py 
new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_qdevice.py
--- old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_qdevice.py    
2021-11-29 04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_qdevice.py    
2021-12-06 08:41:09.000000000 +0100
@@ -443,7 +443,7 @@
         mock_conf.assert_called_once_with()
 
     @mock.patch("crmsh.log.LoggerUtils.log_only_to_file")
-    @mock.patch("crmsh.utils.get_stdout_stderr")
+    @mock.patch("crmsh.utils.get_stdout_or_raise_error")
     @mock.patch("crmsh.corosync.conf")
     @mock.patch("crmsh.corosync.get_value")
     def test_create_ca_request(self, mock_get_value, mock_conf, 
mock_stdout_stderr, mock_log):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_ui_cluster.py 
new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_ui_cluster.py
--- old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_ui_cluster.py 
2021-11-29 04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_ui_cluster.py 
2021-12-06 08:41:09.000000000 +0100
@@ -39,7 +39,9 @@
     @mock.patch('logging.Logger.info')
     @mock.patch('crmsh.utils.service_is_active')
     @mock.patch('crmsh.ui_cluster.parse_option_for_nodes')
-    def test_do_start_already_started(self, mock_parse_nodes, mock_active, 
mock_info):
+    @mock.patch('crmsh.utils.is_qdevice_configured')
+    def test_do_start_already_started(self, mock_qdevice_configured, 
mock_parse_nodes, mock_active, mock_info):
+        mock_qdevice_configured.return_value = False
         context_inst = mock.Mock()
         mock_parse_nodes.return_value = ["node1", "node2"]
         mock_active.side_effect = [True, True]
@@ -63,12 +65,15 @@
     def test_do_start(self, mock_parse_nodes, mock_active, mock_start, 
mock_qdevice_configured, mock_info, mock_start_pacemaker):
         context_inst = mock.Mock()
         mock_parse_nodes.return_value = ["node1"]
-        mock_active.return_value = False
+        mock_active.side_effect = [False, False]
         mock_qdevice_configured.return_value = True
 
         self.ui_cluster_inst.do_start(context_inst, "node1")
 
-        mock_active.assert_called_once_with("pacemaker.service", 
remote_addr="node1")
+        mock_active.assert_has_calls([
+            mock.call("pacemaker.service", remote_addr="node1"),
+            mock.call("corosync-qdevice.service", remote_addr="node1")
+            ])
         mock_start.assert_called_once_with("corosync-qdevice", 
node_list=["node1"])
         mock_qdevice_configured.assert_called_once_with()
         mock_info.assert_called_once_with("Cluster services started on node1")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_utils.py 
new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_utils.py
--- old/crmsh-4.3.1+20211129.a8e22584/test/unittests/test_utils.py      
2021-11-29 04:15:41.000000000 +0100
+++ new/crmsh-4.3.1+20211206.894f84bb/test/unittests/test_utils.py      
2021-12-06 08:41:09.000000000 +0100
@@ -1196,7 +1196,7 @@
     with pytest.raises(ValueError) as err:
         utils.get_stdout_or_raise_error("cmd")
     assert str(err.value) == 'Failed to run "cmd": error data'
-    mock_run.assert_called_once_with("cmd")
+    mock_run.assert_called_once_with("cmd", no_reg=True)
 
 
 @mock.patch("crmsh.utils.get_stdout_stderr")
@@ -1204,7 +1204,7 @@
     mock_run.return_value = (0, "output data", None)
     res = utils.get_stdout_or_raise_error("cmd", remote="node1")
     assert res == "output data"
-    mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no 
root@node1 \"cmd\"")
+    mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no 
root@node1 \"cmd\"", no_reg=True)
 
 
 @mock.patch("crmsh.utils.get_stdout_or_raise_error")
@@ -1603,3 +1603,35 @@
         mock.call(constants.XML_NODE_PATH),
         
mock.call("//primitive[@id='node1']/instance_attributes/nvpair[@name='server']")
         ])
+
+
[email protected]('crmsh.utils.get_stdout_stderr')
+def test_get_property(mock_run):
+    mock_run.return_value = (0, "data", None)
+    assert utils.get_property("no-quorum-policy") == "data"
+    mock_run.assert_called_once_with("crm configure get_property 
no-quorum-policy")
+
+
[email protected]('crmsh.utils.get_stdout_or_raise_error')
+def test_set_property(mock_run):
+    utils.set_property(no_quorum_policy="stop")
+    mock_run.assert_called_once_with("crm configure property 
no-quorum-policy=stop")
+
+
[email protected]('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm_return(mock_dlm):
+    mock_dlm.return_value = False
+    utils.check_no_quorum_policy_with_dlm()
+    mock_dlm.assert_called_once_with()
+
+
[email protected]('logging.Logger.warning')
[email protected]('crmsh.utils.get_property')
[email protected]('crmsh.utils.is_dlm_configured')
+def test_check_no_quorum_policy_with_dlm(mock_dlm, mock_get_property, 
mock_warn):
+    mock_dlm.return_value = True
+    mock_get_property.return_value = "stop"
+    utils.check_no_quorum_policy_with_dlm()
+    mock_dlm.assert_called_once_with()
+    mock_get_property.assert_called_once_with("no-quorum-policy")
+    mock_warn.assert_called_once_with('The DLM cluster best practice suggests 
to set the cluster property "no-quorum-policy=freeze"')

Reply via email to