Hello community,

here is the log from the commit of package crmsh for openSUSE:Leap:15.2 checked 
in at 2020-03-29 14:55:58
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Leap:15.2/crmsh (Old)
 and      /work/SRC/openSUSE:Leap:15.2/.crmsh.new.3160 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Sun Mar 29 14:55:58 2020 rev:71 rq:789167 version:4.2.0+git.1585096577.f3257c89

Changes:
--------
--- /work/SRC/openSUSE:Leap:15.2/crmsh/crmsh.changes    2020-03-15 
07:11:54.304982884 +0100
+++ /work/SRC/openSUSE:Leap:15.2/.crmsh.new.3160/crmsh.changes  2020-03-29 
14:55:59.259171688 +0200
@@ -1,0 +2,8 @@
+Wed Mar 25 01:12:05 UTC 2020 - [email protected]
+
+- Update to version 4.2.0+git.1585096577.f3257c89:
+  * Low: corosync: Improve qdevice configure process
+  * Fix: bootstrap: Change condition to add stonith-sbd resource(bsc#1166967)
+  * Fix: bootstrap: use csync2 '-f' option correctly(bsc#1166684)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.2.0+git.1584013187.b45cfcb6.tar.bz2

New:
----
  crmsh-4.2.0+git.1585096577.f3257c89.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.YKwqaB/_old  2020-03-29 14:55:59.603171956 +0200
+++ /var/tmp/diff_new_pack.YKwqaB/_new  2020-03-29 14:55:59.607171959 +0200
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.2.0+git.1584013187.b45cfcb6
+Version:        4.2.0+git.1585096577.f3257c89
 Release:        0
 Url:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _service ++++++
--- /var/tmp/diff_new_pack.YKwqaB/_old  2020-03-29 14:55:59.655171997 +0200
+++ /var/tmp/diff_new_pack.YKwqaB/_new  2020-03-29 14:55:59.655171997 +0200
@@ -4,7 +4,7 @@
     <param name="scm">git</param>
     <param name="filename">crmsh</param>
     <param name="versionformat">4.2.0+git.%ct.%h</param>
-    <param name="revision">b45cfcb6</param>
+    <param name="revision">f3257c89</param>
     <param name="changesgenerate">enable</param>
   </service>
 

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.YKwqaB/_old  2020-03-29 14:55:59.679172015 +0200
+++ /var/tmp/diff_new_pack.YKwqaB/_new  2020-03-29 14:55:59.679172015 +0200
@@ -5,4 +5,4 @@
                 <param 
name="url">https://github.com/liangxin1300/crmsh.git</param>
               <param 
name="changesrevision">d8dc51b4cb34964aa72e918999ebc7f03b48f3c9</param></service><service
 name="tar_scm">
                 <param 
name="url">https://github.com/ClusterLabs/crmsh.git</param>
-              <param 
name="changesrevision">b256772e34cde162c8245581097d13070ae51254</param></service></servicedata>
\ No newline at end of file
+              <param 
name="changesrevision">f3257c89ff67f53ee9bd78c8a91f7553000172ec</param></service></servicedata>
\ No newline at end of file

++++++ crmsh-4.2.0+git.1584013187.b45cfcb6.tar.bz2 -> 
crmsh-4.2.0+git.1585096577.f3257c89.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1584013187.b45cfcb6/.travis.yml 
new/crmsh-4.2.0+git.1585096577.f3257c89/.travis.yml
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/.travis.yml 2020-03-12 
12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/.travis.yml 2020-03-25 
01:36:17.000000000 +0100
@@ -72,6 +72,12 @@
         - $FUNCTIONAL_TEST qdevice before_install
       script:
         - $FUNCTIONAL_TEST qdevice run validate
+    
+    - name: "functional test for qdevice - user case"
+      before_install:
+        - $FUNCTIONAL_TEST qdevice before_install
+      script:
+        - $FUNCTIONAL_TEST qdevice run usercase
 
     - name: "functional test for resource subcommand"
       before_install:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/bootstrap.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/bootstrap.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/bootstrap.py  2020-03-12 
12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/bootstrap.py  2020-03-25 
01:36:17.000000000 +0100
@@ -660,7 +660,7 @@
 
     # for cluster join, diskless_sbd flag is set in join_cluster() if
     # sbd is running on seed host
-    if configured_sbd_device() or _context.diskless_sbd:
+    if (configured_sbd_device() and _context.sbd_device) or 
_context.diskless_sbd:
         invoke("systemctl enable sbd.service")
     else:
         invoke("systemctl disable sbd.service")
@@ -784,9 +784,17 @@
 
 
 def csync2_update(path):
-    invoke("csync2 -rm %s" % (path))
-    invoke("csync2 -rf %s" % (path))
-    invoke("csync2 -rxv %s" % (path))
+    '''
+    Sync path to all peers
+
+    If there was a conflict, use '-f' to force this side to win
+    '''
+    invoke("csync2 -rm {}".format(path))
+    if invoke("csync2 -rxv {}".format(path)):
+        return
+    invoke("csync2 -rf {}".format(path))
+    if not invoke("csync2 -rxv {}".format(path)):
+        warn("{} was not synced".format(path))
 
 
 def init_csync2_remote():
@@ -1488,7 +1496,7 @@
 rsc_defaults rsc-options: resource-stickiness=1 migration-threshold=3
 """)
 
-    if configured_sbd_device():
+    if configured_sbd_device() and _context.sbd_device:
         if not invoke("crm configure primitive stonith-sbd 
stonith:external/sbd pcmk_delay_max=30s"):
             error("Can't create stonith-sbd primitive")
         if not invoke("crm configure property stonith-enabled=true"):
@@ -1694,8 +1702,8 @@
     # authorized_keys file (again, to help with the case where the
     # user has done manual initial setup without the assistance of
     # ha-cluster-init).
-    if not invoke("ssh root@%s crm cluster init ssh_remote" % (seed_host)):
-        error("Can't invoke crm cluster init ssh_remote on %s" % (seed_host))
+    if not invoke("ssh root@{} crm cluster init -i {} 
ssh_remote".format(seed_host, _context.nic)):
+        error("Can't invoke crm cluster init -i {} ssh_remote on 
{}".format(_context.nic, seed_host))
 
 
 def join_csync2(seed_host):
@@ -1716,8 +1724,8 @@
 
     # If we *were* updating /etc/hosts, the next line would have 
"\"$hosts_line\"" as
     # the last arg (but this requires re-enabling this functionality in 
ha-cluster-init)
-    if not invoke("ssh -o StrictHostKeyChecking=no root@{} crm cluster init 
csync2_remote {}".format(seed_host, utils.this_node())):
-        error("Can't invoke crm cluster init init csync2_remote on 
{}".format(seed_host))
+    if not invoke("ssh -o StrictHostKeyChecking=no root@{} crm cluster init -i 
{} csync2_remote {}".format(seed_host, _context.nic, utils.this_node())):
+        error("Can't invoke crm cluster init -i {} init csync2_remote on 
{}".format(_context.nic, seed_host))
 
     # This is necessary if syncing /etc/hosts (to ensure everyone's got the
     # same list of hosts)
@@ -1740,7 +1748,7 @@
     # they haven't gone to all nodes in the cluster, which means a
     # subseqent join of another node can fail its sync of corosync.conf
     # when it updates expected_votes.  Grrr...
-    if not invoke('ssh -o StrictHostKeyChecking=no root@%s "csync2 -mr / ; 
csync2 -fr / ; csync2 -xv"' % (seed_host)):
+    if not invoke('ssh -o StrictHostKeyChecking=no root@{} "csync2 -rm /; 
csync2 -rxv || csync2 -rf / && csync2 -rxv"'.format(seed_host)):
         print("")
         warn("csync2 run failed - some files may not be sync'd")
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/corosync.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/corosync.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/corosync.py   2020-03-12 
12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/corosync.py   2020-03-25 
01:36:17.000000000 +0100
@@ -89,8 +89,8 @@
     qdevice_path = "/etc/corosync/qdevice/net"
     qdevice_db_path = "/etc/corosync/qdevice/net/nssdb"
 
-    def __init__(self, ip, port=5403, algo="ffsplit",
-                 tie_breaker="lowest", tls="on", cluster_node=None, cmds=None):
+    def __init__(self, ip, port=5403, algo="ffsplit", tie_breaker="lowest",
+            tls="on", cluster_node=None, cmds=None, mode=None):
         self.ip = ip
         self.port = port
         self.algo = algo
@@ -99,6 +99,7 @@
         self.cluster_node = cluster_node
         self.askpass = False
         self.cmds = cmds
+        self.mode = mode
 
     @property
     def qnetd_cacert_on_qnetd(self):
@@ -137,6 +138,8 @@
         return "{}/{}/{}".format(self.qdevice_path, self.cluster_node, 
self.qdevice_p12_filename)
 
     def valid_attr(self):
+        if not bootstrap.package_is_installed("corosync-qdevice"):
+            raise ValueError("Package \"corosync-qdevice\" not installed on 
this node")
         if self.ip == utils.this_node() or self.ip in utils.ip_in_local():
             raise ValueError("host for qnetd must be a remote one")
         if not utils.resolve_hostnames([self.ip])[0]:
@@ -145,24 +148,31 @@
             raise ValueError("ssh service on \"{}\" not 
available".format(self.ip))
         if not utils.valid_port(self.port):
             raise ValueError("invalid qdevice port range(1024 - 65535)")
-        if self.algo not in ["ffsplit", "lms"]:
-            raise ValueError("invalid qdevice algorithm(ffsplit/lms)")
         if self.tie_breaker not in ["lowest", "highest"] and not 
utils.valid_nodeid(self.tie_breaker):
             raise ValueError("invalid qdevice 
tie_breaker(lowest/highest/valid_node_id)")
-        if self.tls not in ["on", "off", "required"]:
-            raise ValueError("invalid qdevice tls(on/off/required)")
         if self.cmds:
             for cmd in self.cmds.strip(';').split(';'):
                 if not cmd.startswith('/'):
                     raise ValueError("commands for heuristics should be 
absolute path")
                 if not os.path.exists(cmd.split()[0]):
-                    raise ValueError("command {} not 
exists".format(cmd.split()[0]))
+                    raise ValueError("command {} not 
exist".format(cmd.split()[0]))
 
     def valid_qnetd(self):
         if self.check_ssh_passwd_need():
             self.askpass = True
+
+        exception_msg = ""
+        suggest = ""
         if self.remote_running_cluster():
-            raise ValueError("host for qnetd must be a non-cluster node")
+            exception_msg = "host for qnetd must be a non-cluster node"
+            suggest = "change to another host or stop cluster service on 
{}".format(self.ip)
+        elif not self.qnetd_installed():
+            exception_msg = "Package \"corosync-qnetd\" not installed on 
{}".format(self.ip)
+            suggest = "install \"corosync-qnetd\" on {}".format(self.ip)
+
+        if exception_msg:
+            exception_msg += "\nCluster service already successfully started 
on this node\nIf you still want to use qdevice, {}\nThen run command \"crm 
cluster init qdevice --qnetd-hostname={}\"\nThis command will setup qdevice 
separately".format(suggest, self.ip)
+            raise ValueError(exception_msg)
 
     def check_ssh_passwd_need(self):
         return utils.check_ssh_passwd_need([self.ip])
@@ -178,6 +188,17 @@
         else:
             return True
 
+    def qnetd_installed(self):
+        cmd = "rpm -q --quiet corosync-qnetd"
+        if self.askpass:
+            print("Checking whether corosync-qnetd installed on 
node({})".format(self.ip))
+        try:
+            parallax.parallax_call([self.ip], cmd, self.askpass)
+        except ValueError:
+            return False
+        else:
+            return True
+
     def manage_qnetd(self, action):
         cmd = "systemctl {} {}".format(action, self.qnetd_service)
         if self.askpass:
@@ -460,9 +481,10 @@
         p.set('quorum.device.net.tie_breaker', self.tie_breaker)
         if self.cmds:
             p.add('quorum.device', make_section('quorum.device.heuristics', 
[]))
-            p.set('quorum.device.heuristics.mode', 'sync')
+            p.set('quorum.device.heuristics.mode', self.mode)
             for i, cmd in enumerate(self.cmds.strip(';').split(';')):
-                exec_name = 
"exec_{}{}".format(os.path.basename(cmd.split()[0]), i)
+                cmd_name = re.sub("[.-]", "_", 
os.path.basename(cmd.split()[0]))
+                exec_name = "exec_{}{}".format(cmd_name, i)
                 p.set('quorum.device.heuristics.{}'.format(exec_name), cmd)
 
         f = open(conf(), 'w')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/ui_cluster.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/ui_cluster.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/crmsh/ui_cluster.py 2020-03-12 
12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/ui_cluster.py 2020-03-25 
01:36:17.000000000 +0100
@@ -2,6 +2,7 @@
 # Copyright (C) 2013 Kristoffer Gronlund <[email protected]>
 # See COPYING for license information.
 
+import sys
 import re
 from argparse import ArgumentParser, RawDescriptionHelpFormatter
 from . import command
@@ -231,24 +232,22 @@
                                    help="Configure corosync with second 
heartbeat line")
         network_group.add_argument("-I", "--ipv6", action="store_true", 
dest="ipv6",
                                    help="Configure corosync use IPv6")
-        network_group.add_argument("--qnetd-hostname",
-                                   dest="qdevice", metavar="HOST",
+
+        qdevice_group = parser.add_argument_group("QDevice configuration", 
"Options for configuring QDevice and QNetd.")
+        qdevice_group.add_argument("--qnetd-hostname", dest="qdevice", 
metavar="HOST",
                                    help="HOST or IP of the QNetd server to be 
used")
-        network_group.add_argument("--qdevice-port",
-                                   dest="qdevice_port", metavar="PORT", 
type=int, default=5403,
+        qdevice_group.add_argument("--qdevice-port", dest="qdevice_port", 
metavar="PORT", type=int, default=5403,
                                    help="TCP PORT of QNetd 
server(default:5403)")
-        network_group.add_argument("--qdevice-algo",
-                                   dest="qdevice_algo", metavar="ALGORITHM", 
default="ffsplit",
+        qdevice_group.add_argument("--qdevice-algo", dest="qdevice_algo", 
metavar="ALGORITHM", default="ffsplit", choices=['ffsplit', 'lms'],
                                    help="QNetd decision ALGORITHM(ffsplit/lms, 
default:ffsplit)")
-        network_group.add_argument("--qdevice-tie-breaker",
-                                   dest="qdevice_tie_breaker", 
metavar="TIE_BREAKER", default="lowest",
+        qdevice_group.add_argument("--qdevice-tie-breaker", 
dest="qdevice_tie_breaker", metavar="TIE_BREAKER", default="lowest",
                                    help="QNetd 
TIE_BREAKER(lowest/highest/valid_node_id, default:lowest)")
-        network_group.add_argument("--qdevice-tls",
-                                   dest="qdevice_tls", metavar="TLS", 
default="on",
+        qdevice_group.add_argument("--qdevice-tls", dest="qdevice_tls", 
metavar="TLS", default="on", choices=['on', 'off', 'required'],
                                    help="Whether using TLS on 
QDevice/QNetd(on/off/required, default:on)")
-        network_group.add_argument("--qdevice-heuristics",
-                                   dest="qdevice_heuristics", 
metavar="COMMAND",
+        qdevice_group.add_argument("--qdevice-heuristics", 
dest="qdevice_heuristics", metavar="COMMAND",
                                    help="COMMAND to run with absolute path. 
For multiple commands, use \";\" to separate(details about heuristics can see 
man 8 corosync-qdevice)")
+        qdevice_group.add_argument("--qdevice-heuristics-mode", 
dest="qdevice_heuristics_mode", metavar="MODE", choices=['on', 'sync', 'off'],
+                                   help="MODE of operation of 
heuristics(on/sync/off, default:sync)")
 
         storage_group = parser.add_argument_group("Storage configuration", 
"Options for configuring shared storage.")
         storage_group.add_argument("-p", "--partition-device", 
dest="shared_device", metavar="DEVICE",
@@ -276,13 +275,19 @@
 
         qdevice = None
         if options.qdevice:
+            if options.qdevice_heuristics_mode and not 
options.qdevice_heuristics:
+                parser.error("Option --qdevice-heuristics is required if want 
to configure heuristics mode")
+            options.qdevice_heuristics_mode = options.qdevice_heuristics_mode 
or "sync"
             qdevice = corosync.QDevice(
                 options.qdevice,
                 port=options.qdevice_port,
                 algo=options.qdevice_algo,
                 tie_breaker=options.qdevice_tie_breaker,
                 tls=options.qdevice_tls,
-                cmds=options.qdevice_heuristics)
+                cmds=options.qdevice_heuristics,
+                mode=options.qdevice_heuristics_mode)
+        elif re.search("--qdevice-.*", ' '.join(sys.argv)):
+            parser.error("Option --qnetd-hostname is required if want to 
configure qdevice")
 
         bootstrap.bootstrap_init(
             cluster_name=options.name,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1584013187.b45cfcb6/data-manifest 
new/crmsh-4.2.0+git.1585096577.f3257c89/data-manifest
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/data-manifest       2020-03-12 
12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/data-manifest       2020-03-25 
01:36:17.000000000 +0100
@@ -71,6 +71,7 @@
 test/features/environment.py
 test/features/qdevice_options.feature
 test/features/qdevice_setup_remove.feature
+test/features/qdevice_usercase.feature
 test/features/qdevice_validate.feature
 test/features/resource_failcount.feature
 test/features/resource_set.feature
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/docker_scripts.sh 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/docker_scripts.sh
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/docker_scripts.sh      
2020-03-12 12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/test/docker_scripts.sh      
2020-03-25 01:36:17.000000000 +0100
@@ -9,7 +9,7 @@
 
   # deploy first node hanode1
   docker run -d --name=hanode1 --hostname hanode1 \
-             --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
${Docker_image}
+             --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
--shm-size="1g" ${Docker_image}
   docker network connect --ip=10.10.10.2 second_net hanode1
   docker exec -t hanode1 /bin/sh -c "echo \"10.10.10.3 hanode2\" >> /etc/hosts"
   if [ x"$1" == x"qdevice" ];then
@@ -21,7 +21,7 @@
 
   # deploy second node hanode2
   docker run -d --name=hanode2 --hostname hanode2 \
-             --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
${Docker_image}
+             --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
--shm-size="1g" ${Docker_image}
   docker network connect --ip=10.10.10.3 second_net hanode2
   docker exec -t hanode2 /bin/sh -c "echo \"10.10.10.2 hanode1\" >> /etc/hosts"
   if [ x"$1" == x"qdevice" ];then
@@ -34,9 +34,9 @@
   if [ x"$1" == x"qdevice" ];then
     # deploy node qnetd-node for qnetd service
     docker run -d --name=qnetd-node --hostname qnetd-node \
-              --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro ${Docker_image}
+              --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro --shm-size="1g" 
${Docker_image}
     docker network connect --ip=10.10.10.9 second_net qnetd-node
-    docker exec -t qnetd-node /bin/sh -c "zypper -n in corosync-qnetd"
+    docker exec -t qnetd-node /bin/sh -c "zypper ref;zypper -n in 
corosync-qnetd"
     docker exec -t qnetd-node /bin/sh -c "systemctl start sshd.service"
 
     # deploy node without ssh.service running for validation
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_setup_remove.feature
 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_setup_remove.feature
--- 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_setup_remove.feature
  2020-03-12 12:39:47.000000000 +0100
+++ 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_setup_remove.feature
  2020-03-25 01:36:17.000000000 +0100
@@ -32,6 +32,8 @@
     Then    Cluster service is "started" on "hanode2"
     And     Online nodes are "hanode1 hanode2"
     And     Service "corosync-qdevice" is "stopped" on "hanode2"
+    When    Run "echo "# This is a test for bsc#1166684" >> 
/etc/corosync/corosync.conf" on "hanode1"
+    When    Run "scp /etc/corosync/corosync.conf root@hanode2:/etc/corosync" 
on "hanode1"
     When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node -y" on 
"hanode1"
     Then    Service "corosync-qdevice" is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode2"
@@ -40,6 +42,24 @@
     And     Show corosync qdevice configuration
 
   @clean
+  Scenario: Setup qdevice with heuristics
+    When    Run "crm cluster init -y --no-overwrite-sshkey 
--qnetd-hostname=qnetd-node --qdevice-heuristics="/usr/bin/test -f 
/tmp/heuristics.txt" --qdevice-heuristics-mode="on"" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "started" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    And     Online nodes are "hanode1 hanode2"
+    And     Service "corosync-qdevice" is "started" on "hanode2"
+    And     Service "corosync-qnetd" is "started" on "qnetd-node"
+    And     Show corosync qdevice configuration
+    When    Run "crm corosync status qnetd" on "hanode1"
+    Then    Expected "Heuristics:              Fail" in stdout
+    When    Run "touch /tmp/heuristics.txt" on "hanode1"
+    When    Run "sleep 30" on "hanode1"
+    When    Run "crm corosync status qnetd" on "hanode1"
+    Then    Expected "Heuristics:              Pass" in stdout
+
+  @clean
   Scenario: Remove qdevice from a two nodes cluster
     When    Run "crm cluster init --qnetd-hostname=qnetd-node -y 
--no-overwrite-sshkey" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_usercase.feature 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_usercase.feature
--- 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_usercase.feature  
    1970-01-01 01:00:00.000000000 +0100
+++ 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_usercase.feature  
    2020-03-25 01:36:17.000000000 +0100
@@ -0,0 +1,63 @@
+@qdevice
+Feature: Verify usercase master survive when split-brain
+
+  Steps to setup a two-nodes cluster with heuristics qdevice,
+  started with a promotable clone resource, and make sure master side always 
with quorum:
+  1. Setup a two-nodes cluster
+  2. Generate script to check whether this node is master
+  3. Add a promotable clone resource
+  4. Setup qdevice with heuristics
+  5. Use iptables command to simulate split-brain
+  6. Check whether hanode1 has quorum, while hanode2 doesn't
+
+  Tag @clean means need to stop cluster service if the service is available
+
+  Background: Cluster and qdevice service are stopped
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    And     Service "corosync-qdevice" is "stopped" on "hanode1"
+    And     Service "corosync-qdevice" is "stopped" on "hanode2"
+
+  @clean
+  Scenario: Master survive when split-brain
+    # Setup a two-nodes cluster
+    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    When    Run "crm cluster join -c hanode1 -y" on "hanode2"
+    Then    Cluster service is "started" on "hanode2"
+
+    # Generate script to check whether this node is master
+    When    Write multi lines to file "/etc/corosync/qdevice/check_master.sh"
+      """
+      #!/usr/bin/sh
+      crm_resource --locate -r promotable-1 2>&1 | grep Master | grep 
`crm_node -n` >/dev/null 2>&1
+      """
+    And     Run "chmod +x /etc/corosync/qdevice/check_master.sh" on "hanode1"
+    And     Run "scp -p /etc/corosync/qdevice/check_master.sh 
root@hanode2:/etc/corosync/qdevice" on "hanode1"
+    # Add a promotable clone resource and make sure hanode1 is master
+    And     Run "crm configure primitive stateful-1 ocf:pacemaker:Stateful op 
monitor_Slave interval=10s op monitor_Master interval=5s" on "hanode1"
+    And     Run "crm configure clone promotable-1 stateful-1 meta 
promotable=true" on "hanode1"
+    And     Run "sleep 5" on "hanode1"
+    Then    Show cluster status on "hanode1"
+
+    # Setup qdevice with heuristics
+    When    Run "crm cluster init qdevice --qnetd-hostname=qnetd-node 
--qdevice-heuristics=/etc/corosync/qdevice/check_master.sh -y" on "hanode1"
+    Then    Service "corosync-qdevice" is "started" on "hanode1"
+    And     Service "corosync-qdevice" is "started" on "hanode2"
+    When    Run "sleep 5" on "hanode1"
+    Then    Show status from qnetd
+    When    Run "corosync-quorumtool -s" on "hanode1"
+    Then    Expected "Quorate:          Yes" in stdout
+    When    Run "ssh root@hanode2 corosync-quorumtool -s" on "hanode1"
+    Then    Expected "Quorate:          Yes" in stdout
+    # Use iptables command to simulate split-brain
+    When    Run "iptables -I INPUT -s 172.17.0.3 -j DROP; iptables -I OUTPUT 
-d 172.17.0.3 -j DROP" on "hanode1"
+    And     Run "iptables -I INPUT -s 172.17.0.2 -j DROP; iptables -I OUTPUT 
-d 172.17.0.2 -j DROP" on "hanode2"
+    # Check whether hanode1 has quorum, while hanode2 doesn't
+    And     Run "sleep 20" on "hanode1"
+    When    Run "corosync-quorumtool -s" on "hanode1"
+    Then    Expected "Quorate:          Yes" in stdout
+    When    Run "ssh root@hanode2 corosync-quorumtool -s" on "hanode1"
+    Then    Expected "Quorate:          No" in stdout
+    And     Show cluster status on "hanode1"
+    And     Show cluster status on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_validate.feature 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_validate.feature
--- 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/qdevice_validate.feature  
    2020-03-12 12:39:47.000000000 +0100
+++ 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/qdevice_validate.feature  
    2020-03-25 01:36:17.000000000 +0100
@@ -29,26 +29,34 @@
     Then    Except "ERROR: cluster.init: invalid qdevice port range(1024 - 
65535)"
 
   @clean
-  Scenario: Option "--qdevice-algo" set wrong value
-    When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-algo=wrongalgo"
-    Then    Except "ERROR: cluster.init: invalid qdevice 
algorithm(ffsplit/lms)"
-
-  @clean
   Scenario: Option "--qdevice-tie-breaker" set wrong value
     When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tie-breaker=wrongtiebreaker"
     Then    Except "ERROR: cluster.init: invalid qdevice 
tie_breaker(lowest/highest/valid_node_id)"
 
   @clean
-  Scenario: Option "--qdevice-tls" set wrong value
-    When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tls=wrong"
-    Then    Except "ERROR: cluster.init: invalid qdevice tls(on/off/required)"
-
-  @clean
   Scenario: Option "--qdevice-heuristics" set wrong value
     When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics='ls /opt'"
     Then    Except "ERROR: cluster.init: commands for heuristics should be 
absolute path"
-    When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics='/bin/not_exists_cmd /opt'"
-    Then    Except "ERROR: cluster.init: command /bin/not_exists_cmd not 
exists"
+    When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics='/bin/not_exist_cmd /opt'"
+    Then    Except "ERROR: cluster.init: command /bin/not_exist_cmd not exist"
+
+  @clean
+  Scenario: Option "--qnetd-hostname" is required by other qdevice options
+    When    Try "crm cluster init --qdevice-port=1234"
+    Then    Except multiple lines
+      """
+      usage: init [options] [STAGE]
+      crm: error: Option --qnetd-hostname is required if want to configure 
qdevice
+      """
+
+  @clean
+  Scenario: Option --qdevice-heuristics is required if want to configure 
heuristics mode
+    When    Try "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics-mode="on""
+    Then    Except multiple lines
+      """
+      usage: init [options] [STAGE]
+      crm: error: Option --qdevice-heuristics is required if want to configure 
heuristics mode
+      """
 
   @clean
   Scenario: Node for qnetd is a cluster node
@@ -56,7 +64,30 @@
     When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
     When    Try "crm cluster init --qnetd-hostname=hanode2 -y 
--no-overwrite-sshkey"
-    Then    Except "ERROR: cluster.init: host for qnetd must be a non-cluster 
node"
+    Then    Except multiple lines
+      """"
+      ERROR: cluster.init: host for qnetd must be a non-cluster node
+      Cluster service already successfully started on this node
+      If you still want to use qdevice, change to another host or stop cluster 
service on hanode2
+      Then run command "crm cluster init qdevice --qnetd-hostname=hanode2"
+      This command will setup qdevice separately
+      """
+    And     Cluster service is "started" on "hanode1"
+    When    Run "crm cluster stop" on "hanode2"
+
+  @clean
+  Scenario: Node for qnetd not installed corosync-qnetd
+    Given   Cluster service is "stopped" on "hanode2"
+    When    Try "crm cluster init --qnetd-hostname=hanode2 -y 
--no-overwrite-sshkey"
+    Then    Except multiple lines
+      """"
+      ERROR: cluster.init: Package "corosync-qnetd" not installed on hanode2
+      Cluster service already successfully started on this node
+      If you still want to use qdevice, install "corosync-qnetd" on hanode2
+      Then run command "crm cluster init qdevice --qnetd-hostname=hanode2"
+      This command will setup qdevice separately
+      """
+    And     Cluster service is "started" on "hanode1"
 
   @clean
   Scenario: Run qdevice stage on inactive cluster node
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/steps/const.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/steps/const.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/steps/const.py        
2020-03-12 12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/steps/const.py        
2020-03-25 01:36:17.000000000 +0100
@@ -91,6 +91,10 @@
   -M, --multi-heartbeats
                         Configure corosync with second heartbeat line
   -I, --ipv6            Configure corosync use IPv6
+
+QDevice configuration:
+  Options for configuring QDevice and QNetd.
+
   --qnetd-hostname HOST
                         HOST or IP of the QNetd server to be used
   --qdevice-port PORT   TCP PORT of QNetd server(default:5403)
@@ -105,6 +109,9 @@
                         COMMAND to run with absolute path. For multiple
                         commands, use ";" to separate(details about heuristics
                         can see man 8 corosync-qdevice)
+  --qdevice-heuristics-mode MODE
+                        MODE of operation of heuristics(on/sync/off,
+                        default:sync)
 
 Storage configuration:
   Options for configuring shared storage.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/steps/step_implenment.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/steps/step_implenment.py
--- 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/features/steps/step_implenment.py  
    2020-03-12 12:39:47.000000000 +0100
+++ 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/features/steps/step_implenment.py  
    2020-03-25 01:36:17.000000000 +0100
@@ -6,6 +6,10 @@
                   run_command_local_or_remote
 import const
 
+@when('Write multi lines to file "{f}"')
+def step_impl(context, f):
+    with open(f, 'w') as fd:
+        fd.write(context.text)
 
 @given('Cluster service is "{state}" on "{addr}"')
 def step_impl(context, state, addr):
@@ -74,6 +78,12 @@
     context.command_error_output = None
 
 
+@then('Except multiple lines')
+def step_impl(context):
+    assert context.command_error_output.split('\n') == context.text.split('\n')
+    context.command_error_output = None
+
+
 @then('Except "{msg}" in stderr')
 def step_impl(context, msg):
     assert msg in context.command_error_output
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/unittests/test_bootstrap.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/unittests/test_bootstrap.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/unittests/test_bootstrap.py    
2020-03-12 12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/test/unittests/test_bootstrap.py    
2020-03-25 01:36:17.000000000 +0100
@@ -282,3 +282,25 @@
         mock_csync2.assert_not_called()
         mock_stop_service.assert_not_called()
         mock_error.assert_not_called()
+
+    @mock.patch('crmsh.bootstrap.invoke')
+    def test_csync2_update_no_conflicts(self, mock_invoke):
+        mock_invoke.side_effect = [True, True]
+        bootstrap.csync2_update("/etc/corosync.conf")
+        mock_invoke.assert_has_calls([
+            mock.call("csync2 -rm /etc/corosync.conf"),
+            mock.call("csync2 -rxv /etc/corosync.conf")
+            ])
+
+    @mock.patch('crmsh.bootstrap.warn')
+    @mock.patch('crmsh.bootstrap.invoke')
+    def test_csync2_update(self, mock_invoke, mock_warn):
+        mock_invoke.side_effect = [True, False, True, False]
+        bootstrap.csync2_update("/etc/corosync.conf")
+        mock_invoke.assert_has_calls([
+            mock.call("csync2 -rm /etc/corosync.conf"),
+            mock.call("csync2 -rxv /etc/corosync.conf"),
+            mock.call("csync2 -rf /etc/corosync.conf"),
+            mock.call("csync2 -rxv /etc/corosync.conf")
+            ])
+        mock_warn.assert_called_once_with("/etc/corosync.conf was not synced")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/unittests/test_corosync.py 
new/crmsh-4.2.0+git.1585096577.f3257c89/test/unittests/test_corosync.py
--- old/crmsh-4.2.0+git.1584013187.b45cfcb6/test/unittests/test_corosync.py     
2020-03-12 12:39:47.000000000 +0100
+++ new/crmsh-4.2.0+git.1585096577.f3257c89/test/unittests/test_corosync.py     
2020-03-25 01:36:17.000000000 +0100
@@ -225,10 +225,10 @@
         self.qdevice_with_ip = corosync.QDevice("10.10.10.123")
         self.qdevice_with_hostname = corosync.QDevice("node.qnetd")
         self.qdevice_with_invalid_port = corosync.QDevice("10.10.10.123", 
port=100)
-        self.qdevice_with_invalid_algo = corosync.QDevice("10.10.10.123", 
algo="wrong")
         self.qdevice_with_invalid_tie_breaker = 
corosync.QDevice("10.10.10.123", tie_breaker="wrong")
-        self.qdevice_with_invalid_tls = corosync.QDevice("10.10.10.123", 
tls="wrong")
         self.qdevice_with_ip_cluster_node = corosync.QDevice("10.10.10.123", 
cluster_node="node1.com")
+        self.qdevice_with_invalid_cmds_relative_path = 
corosync.QDevice("10.10.10.123", cmds="ls")
+        self.qdevice_with_invalid_cmds_not_exist = 
corosync.QDevice("10.10.10.123", cmds="/not_exist")
 
     def tearDown(self):
         """
@@ -241,23 +241,36 @@
         Global tearDown.
         """
 
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_not_installed(self, mock_installed):
+        mock_installed.return_value = False
+        with self.assertRaises(ValueError) as err:
+            self.qdevice_with_ip.valid_attr()
+        self.assertEqual("Package \"corosync-qdevice\" not installed on this 
node", str(err.exception))
+        mock_installed.assert_called_once_with("corosync-qdevice")
+
     @mock.patch("crmsh.utils.this_node")
     @mock.patch("crmsh.utils.ip_in_local")
-    def test_valid_attr_remote_exception(self, mock_ip_in_local, 
mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_remote_exception(self, mock_installed, 
mock_ip_in_local, mock_this_node):
+        mock_installed.return_value = True
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.123"]
         mock_this_node.return_value = "node1.com"
 
         with self.assertRaises(ValueError) as err:
             self.qdevice_with_ip.valid_attr()
-
         self.assertEqual("host for qnetd must be a remote one", 
str(err.exception))
+
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
 
     @mock.patch("crmsh.utils.this_node")
     @mock.patch("crmsh.utils.ip_in_local")
     @mock.patch("crmsh.utils.resolve_hostnames")
-    def test_valid_attr_unreachable_exception(self, mock_resolve, 
mock_ip_in_local, mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_unreachable_exception(self, mock_installed, 
mock_resolve, mock_ip_in_local, mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (False, "node.qnetd")
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.123"]
         mock_this_node.return_value = "node1.com"
@@ -266,6 +279,7 @@
             self.qdevice_with_hostname.valid_attr()
         self.assertEqual("host \"node.qnetd\" is unreachable", 
str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["node.qnetd"])
@@ -274,8 +288,10 @@
     @mock.patch("crmsh.utils.ip_in_local")
     @mock.patch("crmsh.utils.resolve_hostnames")
     @mock.patch("crmsh.utils.check_port_open")
-    def test_valid_attr_ssh_service_exception(self, mock_port_open, 
mock_resolve,
-                                              mock_ip_in_local, 
mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_ssh_service_exception(self, mock_installed, 
mock_port_open,
+            mock_resolve, mock_ip_in_local, mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (True, None)
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.11"]
         mock_this_node.return_value = "node1.com"
@@ -285,6 +301,7 @@
             self.qdevice_with_ip.valid_attr()
         self.assertEqual("ssh service on \"10.10.10.123\" not available", 
str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["10.10.10.123"])
@@ -295,8 +312,10 @@
     @mock.patch("crmsh.utils.resolve_hostnames")
     @mock.patch("crmsh.utils.check_port_open")
     @mock.patch("crmsh.utils.valid_port")
-    def test_valid_attr_invalid_port_exception(self, mock_valid_port, 
mock_port_open,
-                                               mock_resolve, mock_ip_in_local, 
mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_invalid_port_exception(self, mock_installed, 
mock_valid_port,
+            mock_port_open, mock_resolve, mock_ip_in_local, mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (True, None)
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.11"]
         mock_this_node.return_value = "node1.com"
@@ -307,6 +326,7 @@
             self.qdevice_with_invalid_port.valid_attr()
         self.assertEqual("invalid qdevice port range(1024 - 65535)", 
str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["10.10.10.123"])
@@ -318,23 +338,29 @@
     @mock.patch("crmsh.utils.resolve_hostnames")
     @mock.patch("crmsh.utils.check_port_open")
     @mock.patch("crmsh.utils.valid_port")
-    def test_valid_attr_invalid_port_exception(self, mock_valid_port, 
mock_port_open,
-                                               mock_resolve, mock_ip_in_local, 
mock_this_node):
+    @mock.patch("crmsh.utils.valid_nodeid")
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_invalid_nodeid_exception(self, mock_installed, 
mock_valid_nodeid,
+            mock_valid_port, mock_port_open, mock_resolve, mock_ip_in_local, 
mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (True, None)
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.11"]
         mock_this_node.return_value = "node1.com"
         mock_port_open.return_value = True
         mock_valid_port.return_value = True
+        mock_valid_nodeid.return_value = False
 
         with self.assertRaises(ValueError) as err:
-            self.qdevice_with_invalid_algo.valid_attr()
-        self.assertEqual("invalid qdevice algorithm(ffsplit/lms)", 
str(err.exception))
+            self.qdevice_with_invalid_tie_breaker.valid_attr()
+        self.assertEqual("invalid qdevice 
tie_breaker(lowest/highest/valid_node_id)", str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["10.10.10.123"])
         mock_port_open.assert_called_once_with("10.10.10.123", 22)
         mock_valid_port.assert_called_once_with(5403)
+        mock_valid_nodeid.assert_called_once_with("wrong")
 
     @mock.patch("crmsh.utils.this_node")
     @mock.patch("crmsh.utils.ip_in_local")
@@ -342,25 +368,28 @@
     @mock.patch("crmsh.utils.check_port_open")
     @mock.patch("crmsh.utils.valid_port")
     @mock.patch("crmsh.utils.valid_nodeid")
-    def test_valid_attr_invalid_nodeid_exception(self, mock_valid_nodeid, 
mock_valid_port, mock_port_open,
-                                                 mock_resolve, 
mock_ip_in_local, mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_invalid_cmds_relative_path(self, mock_installed, 
mock_valid_nodeid,
+            mock_valid_port, mock_port_open, mock_resolve, mock_ip_in_local, 
mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (True, None)
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.11"]
         mock_this_node.return_value = "node1.com"
         mock_port_open.return_value = True
         mock_valid_port.return_value = True
-        mock_valid_nodeid.return_value = False
+        mock_valid_nodeid.return_value = True
 
         with self.assertRaises(ValueError) as err:
-            self.qdevice_with_invalid_tie_breaker.valid_attr()
-        self.assertEqual("invalid qdevice 
tie_breaker(lowest/highest/valid_node_id)", str(err.exception))
+            self.qdevice_with_invalid_cmds_relative_path.valid_attr()
+        self.assertEqual("commands for heuristics should be absolute path", 
str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["10.10.10.123"])
         mock_port_open.assert_called_once_with("10.10.10.123", 22)
         mock_valid_port.assert_called_once_with(5403)
-        mock_valid_nodeid.assert_called_once_with("wrong")
+        mock_valid_nodeid.assert_not_called()
 
     @mock.patch("crmsh.utils.this_node")
     @mock.patch("crmsh.utils.ip_in_local")
@@ -368,8 +397,10 @@
     @mock.patch("crmsh.utils.check_port_open")
     @mock.patch("crmsh.utils.valid_port")
     @mock.patch("crmsh.utils.valid_nodeid")
-    def test_valid_attr_invalid_tls_exception(self, mock_valid_nodeid, 
mock_valid_port, mock_port_open,
-                                              mock_resolve, mock_ip_in_local, 
mock_this_node):
+    @mock.patch("crmsh.bootstrap.package_is_installed")
+    def test_valid_attr_invalid_cmds_not_exist(self, mock_installed, 
mock_valid_nodeid,
+            mock_valid_port, mock_port_open, mock_resolve, mock_ip_in_local, 
mock_this_node):
+        mock_installed.return_value = True
         mock_resolve.return_value = (True, None)
         mock_ip_in_local.return_value = ["192.168.1.1", "10.10.10.11"]
         mock_this_node.return_value = "node1.com"
@@ -378,9 +409,10 @@
         mock_valid_nodeid.return_value = True
 
         with self.assertRaises(ValueError) as err:
-            self.qdevice_with_invalid_tls.valid_attr()
-        self.assertEqual("invalid qdevice tls(on/off/required)", 
str(err.exception))
+            self.qdevice_with_invalid_cmds_not_exist.valid_attr()
+        self.assertEqual("command /not_exist not exist", str(err.exception))
 
+        mock_installed.assert_called_once_with("corosync-qdevice")
         mock_ip_in_local.assert_called_once_with()
         mock_this_node.assert_called_once_with()
         mock_resolve.assert_called_once_with(["10.10.10.123"])
@@ -388,17 +420,34 @@
         mock_valid_port.assert_called_once_with(5403)
         mock_valid_nodeid.assert_not_called()
 
-    def test_valid_qnetd_exception(self):
+    def test_valid_qnetd_remote_cluster_node(self):
         self.qdevice_with_ip.check_ssh_passwd_need = 
mock.Mock(return_value=True)
         self.qdevice_with_ip.remote_running_cluster = 
mock.Mock(return_value=True)
+        excepted_err_string = 'host for qnetd must be a non-cluster 
node\nCluster service already successfully started on this node\nIf you still 
want to use qdevice, change to another host or stop cluster service on 
10.10.10.123\nThen run command "crm cluster init qdevice 
--qnetd-hostname=10.10.10.123"\nThis command will setup qdevice separately'
+        self.maxDiff = None
 
         with self.assertRaises(ValueError) as err:
             self.qdevice_with_ip.valid_qnetd()
-        self.assertEqual("host for qnetd must be a non-cluster node", 
str(err.exception))
+        self.assertEqual(excepted_err_string, str(err.exception))
 
         self.qdevice_with_ip.check_ssh_passwd_need.assert_called_once_with()
         self.qdevice_with_ip.remote_running_cluster.assert_called_once_with()
 
+    def test_valid_qnetd_not_installed(self):
+        self.qdevice_with_ip.check_ssh_passwd_need = 
mock.Mock(return_value=True)
+        self.qdevice_with_ip.remote_running_cluster = 
mock.Mock(return_value=False)
+        self.qdevice_with_ip.qnetd_installed = mock.Mock(return_value=False)
+        excepted_err_string = 'Package "corosync-qnetd" not installed on 
10.10.10.123\nCluster service already successfully started on this node\nIf you 
still want to use qdevice, install "corosync-qnetd" on 10.10.10.123\nThen run 
command "crm cluster init qdevice --qnetd-hostname=10.10.10.123"\nThis command 
will setup qdevice separately'
+        self.maxDiff = None
+
+        with self.assertRaises(ValueError) as err:
+            self.qdevice_with_ip.valid_qnetd()
+        self.assertEqual(excepted_err_string, str(err.exception))
+
+        self.qdevice_with_ip.check_ssh_passwd_need.assert_called_once_with()
+        self.qdevice_with_ip.remote_running_cluster.assert_called_once_with()
+        self.qdevice_with_ip.qnetd_installed.assert_called_once_with()
+
     @mock.patch("crmsh.utils.check_ssh_passwd_need")
     def test_check_ssh_passwd_need(self, mock_ssh_passwd):
         mock_ssh_passwd.return_value = True
@@ -418,6 +467,18 @@
         mock_call.assert_called_once_with(["10.10.10.123"], "systemctl -q 
is-active pacemaker", False)
 
     @mock.patch("crmsh.parallax.parallax_call")
+    def test_qnetd_installed_false(self, mock_call):
+        mock_call.side_effect = ValueError(mock.Mock(), "Failed on 
10.10.10.123: error happen")
+        self.assertFalse(self.qdevice_with_ip.qnetd_installed())
+        mock_call.assert_called_once_with(["10.10.10.123"], "rpm -q --quiet 
corosync-qnetd", False)
+
+    @mock.patch("crmsh.parallax.parallax_call")
+    def test_qnetd_installed_true(self, mock_call):
+        mock_call.return_value = ["10.10.10.123", (0, None, None)]
+        self.assertTrue(self.qdevice_with_ip.qnetd_installed())
+        mock_call.assert_called_once_with(["10.10.10.123"], "rpm -q --quiet 
corosync-qnetd", False)
+
+    @mock.patch("crmsh.parallax.parallax_call")
     def test_manage_qnetd(self, mock_call):
         mock_call.return_value = ["10.10.10.123", (0, None, None)]
         self.qdevice_with_ip.manage_qnetd("test")


Reply via email to