Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2020-03-27 21:57:41
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.3160 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Fri Mar 27 21:57:41 2020 rev:180 rq:788734 version:4.2.0+git.1585276059.882beb65

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2020-03-25 
23:47:24.256066504 +0100
+++ /work/SRC/openSUSE:Factory/.crmsh.new.3160/crmsh.changes    2020-03-27 
21:58:01.326818044 +0100
@@ -1,0 +2,7 @@
+Fri Mar 27 02:39:28 UTC 2020 - xli...@suse.com
+
+- Update to version 4.2.0+git.1585276059.882beb65:
+  * Dev: behave: add functional test for geo cluster setup
+  * Low: bootstrap: Simplify bootstrap context
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.2.0+git.1585096577.f3257c89.tar.bz2

New:
----
  crmsh-4.2.0+git.1585276059.882beb65.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.HPW1xA/_old  2020-03-27 21:58:02.778818889 +0100
+++ /var/tmp/diff_new_pack.HPW1xA/_new  2020-03-27 21:58:02.782818891 +0100
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.2.0+git.1585096577.f3257c89
+Version:        4.2.0+git.1585276059.882beb65
 Release:        0
 Url:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.HPW1xA/_old  2020-03-27 21:58:02.838818924 +0100
+++ /var/tmp/diff_new_pack.HPW1xA/_new  2020-03-27 21:58:02.842818926 +0100
@@ -5,4 +5,4 @@
                 <param 
name="url">https://github.com/liangxin1300/crmsh.git</param>
               <param 
name="changesrevision">d8dc51b4cb34964aa72e918999ebc7f03b48f3c9</param></service><service
 name="tar_scm">
                 <param 
name="url">https://github.com/ClusterLabs/crmsh.git</param>
-              <param 
name="changesrevision">f3257c89ff67f53ee9bd78c8a91f7553000172ec</param></service></servicedata>
\ No newline at end of file
+              <param 
name="changesrevision">25123c13b0eb32e24e39d87cce11b5267f95ebe7</param></service></servicedata>
\ No newline at end of file

++++++ crmsh-4.2.0+git.1585096577.f3257c89.tar.bz2 -> 
crmsh-4.2.0+git.1585276059.882beb65.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1585096577.f3257c89/.travis.yml 
new/crmsh-4.2.0+git.1585276059.882beb65/.travis.yml
--- old/crmsh-4.2.0+git.1585096577.f3257c89/.travis.yml 2020-03-25 
01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/.travis.yml 2020-03-27 
03:27:39.000000000 +0100
@@ -13,15 +13,16 @@
   global:
     - 
CC_TEST_REPORTER_ID=a2579335b631ec35473874d7bb4fe983025c0287cea89c9dc34c35f98ee3963d
     - FUNCTIONAL_TEST=$TRAVIS_BUILD_DIR/test/docker_scripts.sh
+    - IMAGE=liangxin1300/hatbw
 
 jobs:
   include:
     - stage: test
       name: "unit test"
       before_install:
-        - docker pull liangxin1300/haleap:15.2
+        - docker pull $IMAGE
       script:
-        - docker run -t -v "$(pwd):/app" liangxin1300/haleap:15.2 /bin/sh -c 
"cd /app; TOXENV=py36-codeclimate; tox"
+        - docker run -t -v "$(pwd):/app" $IMAGE /bin/sh -c "cd /app; 
TOXENV=py38-codeclimate; tox"
 
     - name: "original regression test"
       before_script:
@@ -29,9 +30,9 @@
         - chmod +x ./cc-test-reporter
         - ./cc-test-reporter before-build
       before_install:
-        - docker pull liangxin1300/haleap:15.2
+        - docker pull $IMAGE
       script:
-        - docker run -t -v "$(pwd):/app" liangxin1300/haleap:15.2 /bin/sh -c 
"cd /app; ./test/run-in-travis.sh"
+        - docker run -t -v "$(pwd):/app" $IMAGE /bin/sh -c "cd /app; 
./test/run-in-travis.sh"
       after_failure:
         - sudo cat $TRAVIS_BUILD_DIR/crmtestout/regression.out 
$TRAVIS_BUILD_DIR/crmtestout/crm.*
       after_script:
@@ -55,6 +56,12 @@
       script:
         - $FUNCTIONAL_TEST bootstrap run options
 
+    - name: "functional test for geo cluster"
+      before_install:
+        - $FUNCTIONAL_TEST geo before_install
+      script:
+        - $FUNCTIONAL_TEST geo run setup
+
     - name: "functional test for qdevice - setup and remove"
       before_install:
         - $FUNCTIONAL_TEST qdevice before_install
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/bootstrap.py 
new/crmsh-4.2.0+git.1585276059.882beb65/crmsh/bootstrap.py
--- old/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/bootstrap.py  2020-03-25 
01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/crmsh/bootstrap.py  2020-03-27 
03:27:39.000000000 +0100
@@ -50,27 +50,59 @@
     Context object used to avoid having to pass these variables
     to every bootstrap method.
     """
-    def __init__(self, quiet, yes_to_all, nic=None, ip_address=None, 
ip_network=None):
-        self.quiet = quiet
-        self.yes_to_all = yes_to_all
-        self.nic = nic
-        self.ip_address = ip_address
-        self.ip_network = ip_network
+    def __init__(self):
+        '''
+        Initialize attributes
+        '''
+        self.quiet = None
+        self.yes_to_all = None
+        self.template = None
         self.cluster_name = None
-        self.cluster_node = None
-        self.ocfs2_device = None
-        self.shared_device = None
-        self.sbd_device = None
-        self.diskless_sbd = False  # if True, enable SBD for diskless operation
+        self.diskless_sbd = None
+        self.watchdog = None
+        self.no_overwrite_sshkey = None
+        self.nic = None
         self.unicast = None
-        self.ipv6 = None
         self.admin_ip = None
-        self.watchdog = None
-        self.host_status = None
-        self.connect_name = None
         self.second_hb = None
-        self.ui_context = None
+        self.ipv6 = None
         self.qdevice = None
+        self.qdevice_host = None
+        self.qdevice_port = None
+        self.qdevice_algo = None
+        self.qdevice_tie_breaker = None
+        self.qdevice_tls = None
+        self.qdevice_heuristics = None
+        self.qdevice_heuristics_mode = None
+        self.shared_device = None
+        self.sbd_device = None
+        self.ocfs2_device = None
+        self.cluster_node = None
+        self.force = None
+        self.arbitrator = None
+        self.clusters = None
+        self.tickets = None
+        self.ip_address = None
+        self.ip_network = None
+
+    @classmethod
+    def set_context(cls, options):
+        ctx = cls()
+        for opt in vars(options):
+            setattr(ctx, opt, getattr(options, opt))
+        return ctx
+
+    def init_qdevice(self):
+        if not self.qdevice_host:
+            return
+        self.qdevice = corosync.QDevice(
+                self.qdevice_host,
+                port=self.qdevice_port,
+                algo=self.qdevice_algo,
+                tie_breaker=self.qdevice_tie_breaker,
+                tls=self.qdevice_tls,
+                cmds=self.qdevice_heuristics,
+                mode=self.qdevice_heuristics_mode)
 
 
 _context = None
@@ -1724,8 +1756,9 @@
 
     # If we *were* updating /etc/hosts, the next line would have 
"\"$hosts_line\"" as
     # the last arg (but this requires re-enabling this functionality in 
ha-cluster-init)
-    if not invoke("ssh -o StrictHostKeyChecking=no root@{} crm cluster init -i 
{} csync2_remote {}".format(seed_host, _context.nic, utils.this_node())):
-        error("Can't invoke crm cluster init -i {} init csync2_remote on 
{}".format(_context.nic, seed_host))
+    cmd = "crm cluster init -i {} csync2_remote {}".format(_context.nic, 
utils.this_node())
+    if not invoke("ssh -o StrictHostKeyChecking=no root@{} 
{}".format(seed_host, cmd)):
+        error("Can't invoke \"{}\" on {}".format(cmd, seed_host))
 
     # This is necessary if syncing /etc/hosts (to ensure everyone's got the
     # same list of hosts)
@@ -2215,53 +2248,10 @@
     return nodename == utils.this_node()
 
 
-def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, 
ocfs2_device=None,
-                   shared_device=None, sbd_device=None, diskless_sbd=False, 
quiet=False,
-                   template=None, admin_ip=None, yes_to_all=False, 
no_overwrite_sshkey=False,
-                   unicast=False, second_hb=False, ipv6=False, watchdog=None, 
qdevice=None, stage=None, args=None):
-    """
-    -i <nic>
-    -o <ocfs2-device>
-    -p <shared-device>
-    -s <sbd-device>
-    -S - configure SBD without disk
-    -t <template>
-    -A [<admin-ip>]
-    -q - quiet
-    -y - yes to all
-    -u - unicast
-    <stage>
-
-    stages:
-    ssh
-    ssh_remote
-    csync2
-    csync2_remote
-    corosync
-    storage
-    sbd
-    cluster
-    vgfs
-    admin
-    qdevice
+def bootstrap_init(context):
+    """
+    Init cluster process
     """
-    global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all, nic=nic)
-    _context.cluster_name = cluster_name
-    _context.ocfs2_device = ocfs2_device
-    _context.shared_device = shared_device
-    _context.sbd_device = sbd_device
-    _context.diskless_sbd = diskless_sbd
-    _context.unicast = unicast
-    _context.second_hb = second_hb
-    _context.ipv6 = ipv6
-    _context.admin_ip = admin_ip
-    _context.watchdog = watchdog
-    _context.ui_context = ui_context
-    _context.qdevice = qdevice
-    _context.no_overwrite_sshkey = no_overwrite_sshkey
-    _context.stage = stage
-
     def check_option():
         if _context.admin_ip and not valid_adminIP(_context.admin_ip):
             error("Invalid option: admin_ip")
@@ -2271,6 +2261,11 @@
             except ValueError as err:
                 error(err)
 
+    global _context
+    _context = context
+    _context.init_qdevice()
+
+    stage = _context.stage
     if stage is None:
         stage = ""
 
@@ -2296,6 +2291,7 @@
         if not check_prereqs(stage):
             return
     elif stage == 'csync2_remote':
+        args = _context.args
         log("args: {}".format(args))
         if len(args) != 2:
             error("Expected NODE argument to csync2_remote")
@@ -2306,18 +2302,18 @@
     if stage != "":
         globals()["init_" + stage]()
     else:
-        if watchdog is not None:
+        if _context.watchdog is not None:
             init_watchdog()
         init_ssh()
         init_csync2()
         init_corosync()
         init_remote_auth()
-        if template == 'ocfs2':
-            if sbd_device is None or ocfs2_device is None:
+        if _context.template == 'ocfs2':
+            if _context.sbd_device is None or _context.ocfs2_device is None:
                 init_storage()
         init_sbd()
         init_cluster()
-        if template == 'ocfs2':
+        if _context.template == 'ocfs2':
             init_vgfs()
         init_admin()
         init_qdevice()
@@ -2325,24 +2321,12 @@
     status("Done (log saved to %s)" % (LOG_FILE))
 
 
-def bootstrap_join(cluster_node=None, ui_context=None, nic=None, quiet=False, 
yes_to_all=False, watchdog=None, stage=None):
+def bootstrap_join(context):
     """
-    -c <cluster-node>
-    -i <nic>
-    -q - quiet
-    -y - yes to all
-    <stage>
-    # stages:
-    ssh
-    csync2
-    ssh_merge
-    cluster
+    Join cluster process
     """
     global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all, nic=nic)
-    _context.cluster_node = cluster_node
-    _context.watchdog = watchdog
-    _context.ui_context = ui_context
+    _context = context
 
     check_tty()
 
@@ -2355,10 +2339,11 @@
 
     init()
 
-    if stage != "":
-        globals()["join_" + stage](cluster_node)
+    cluster_node = _context.cluster_node
+    if _context.stage != "":
+        globals()["join_" + _context.stage](cluster_node)
     else:
-        if not yes_to_all and cluster_node is None:
+        if not _context.yes_to_all and cluster_node is None:
             status("""Join This Node to Cluster:
   You will be asked for the IP address of an existing node, from which
   configuration will be copied.  If you have not already configured
@@ -2385,19 +2370,15 @@
     invoke("touch {}".format(PCMK_REMOTE_AUTH))
 
 
-def bootstrap_remove(cluster_node=None, ui_context=None, quiet=False, 
yes_to_all=False, force=False,
-                     qdevice=None):
+def bootstrap_remove(context):
     """
-    -c <cluster-node> - node to remove from cluster
-    -q - quiet
-    -y - yes to all
-    -f - force removal of self
+    Remove node from cluster, or remove qdevice configuration
     """
     global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all)
-    _context.cluster_node = cluster_node
-    _context.ui_context = ui_context
-    _context.qdevice = qdevice
+    _context = context
+    yes_to_all = _context.yes_to_all
+    cluster_node = _context.cluster_node
+    force = _context.force
 
     if _context.qdevice:
         if not utils.is_qdevice_configured():
@@ -2527,13 +2508,12 @@
     os.chmod(BOOTH_CFG, 0o644)
 
 
-def bootstrap_init_geo(quiet, yes_to_all, arbitrator, clusters, tickets, 
ui_context=None):
+def bootstrap_init_geo(context):
     """
     Configure as a geo cluster member.
     """
     global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all)
-    _context.ui_context = ui_context
+    _context = context
 
     if os.path.exists(BOOTH_CFG) and not confirm("This will overwrite {} - 
continue?".format(BOOTH_CFG)):
         return
@@ -2548,18 +2528,18 @@
     # set common.startup.degr-wfc-timeout 120
 
     create_booth_authkey()
-    create_booth_config(arbitrator, clusters, tickets)
+    create_booth_config(_context.arbitrator, _context.clusters, 
_context.tickets)
     status("Sync booth configuration across cluster")
     csync2_update("/etc/booth")
     init_csync2_geo()
-    geo_cib_config(clusters)
+    geo_cib_config(_context.clusters)
 
 
 def geo_fetch_config(node):
     # TODO: clean this up
     status("Retrieving configuration - This may prompt for root@%s:" % (node))
     tmpdir = tmpfiles.create_dir()
-    invoke("scp root@%s:'/etc/booth/*' %s/" % (node, tmpdir))
+    invoke("scp -oStrictHostKeyChecking=no root@%s:'/etc/booth/*' %s/" % 
(node, tmpdir))
     try:
         if os.path.isfile("%s/authkey" % (tmpdir)):
             invoke("mv %s/authkey %s" % (tmpdir, BOOTH_AUTH))
@@ -2589,30 +2569,30 @@
     crm_configure_load("update", crm_template.substitute(iprules=" 
".join(iprule.format(k, v) for k, v in clusters.items())))
 
 
-def bootstrap_join_geo(quiet, yes_to_all, node, clusters, ui_context=None):
+def bootstrap_join_geo(context):
     """
     Run on second cluster to add to a geo configuration.
     It fetches its booth configuration from the other node (cluster node or 
arbitrator).
     """
     global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all)
-    _context.ui_context = ui_context
+    _context = context
     init_common_geo()
     check_tty()
-    geo_fetch_config(node)
+    geo_fetch_config(_context.cluster_node)
     status("Sync booth configuration across cluster")
     csync2_update("/etc/booth")
-    geo_cib_config(clusters)
+    geo_cib_config(_context.clusters)
 
 
-def bootstrap_arbitrator(quiet, yes_to_all, node, ui_context=None):
+def bootstrap_arbitrator(context):
     """
     Configure this machine as an arbitrator.
     It fetches its booth configuration from a cluster node already in the 
cluster.
     """
     global _context
-    _context = Context(quiet=quiet, yes_to_all=yes_to_all)
-    _context.ui_context = ui_context
+    _context = context
+    node = _context.cluster_node
+
     init_common_geo()
     check_tty()
     geo_fetch_config(node)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/ui_cluster.py 
new/crmsh-4.2.0+git.1585276059.882beb65/crmsh/ui_cluster.py
--- old/crmsh-4.2.0+git.1585096577.f3257c89/crmsh/ui_cluster.py 2020-03-25 
01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/crmsh/ui_cluster.py 2020-03-27 
03:27:39.000000000 +0100
@@ -208,7 +208,7 @@
                             help='Answer "yes" to all prompts (use with 
caution, this is destructive, especially during the "storage" stage. The 
/root/.ssh/id_rsa key will be overwritten unless the option 
"--no-overwrite-sshkey" is used)')
         parser.add_argument("-t", "--template", dest="template",
                             help='Optionally configure cluster with template 
"name" (currently only "ocfs2" is valid here)')
-        parser.add_argument("-n", "--name", metavar="NAME", dest="name", 
default="hacluster",
+        parser.add_argument("-n", "--name", metavar="NAME", 
dest="cluster_name", default="hacluster",
                             help='Set the name of the configured cluster.')
         parser.add_argument("-N", "--nodes", metavar="NODES", dest="nodes",
                             help='Additional nodes to add to the created 
cluster. May include the current node, which will always be the initial cluster 
node.')
@@ -234,7 +234,7 @@
                                    help="Configure corosync use IPv6")
 
         qdevice_group = parser.add_argument_group("QDevice configuration", 
"Options for configuring QDevice and QNetd.")
-        qdevice_group.add_argument("--qnetd-hostname", dest="qdevice", 
metavar="HOST",
+        qdevice_group.add_argument("--qnetd-hostname", dest="qdevice_host", 
metavar="HOST",
                                    help="HOST or IP of the QNetd server to be 
used")
         qdevice_group.add_argument("--qdevice-port", dest="qdevice_port", 
metavar="PORT", type=int, default=5403,
                                    help="TCP PORT of QNetd 
server(default:5403)")
@@ -270,45 +270,21 @@
         if options.template and options.template != "ocfs2":
             parser.error("Invalid template (%s)" % (options.template))
 
-        # if options.geo and options.name == "hacluster":
-        #    parser.error("For a geo cluster, each cluster must have a unique 
name (use --name to set)")
-
-        qdevice = None
-        if options.qdevice:
+        if options.qdevice_host:
             if options.qdevice_heuristics_mode and not 
options.qdevice_heuristics:
                 parser.error("Option --qdevice-heuristics is required if want 
to configure heuristics mode")
             options.qdevice_heuristics_mode = options.qdevice_heuristics_mode 
or "sync"
-            qdevice = corosync.QDevice(
-                options.qdevice,
-                port=options.qdevice_port,
-                algo=options.qdevice_algo,
-                tie_breaker=options.qdevice_tie_breaker,
-                tls=options.qdevice_tls,
-                cmds=options.qdevice_heuristics,
-                mode=options.qdevice_heuristics_mode)
         elif re.search("--qdevice-.*", ' '.join(sys.argv)):
             parser.error("Option --qnetd-hostname is required if want to 
configure qdevice")
 
-        bootstrap.bootstrap_init(
-            cluster_name=options.name,
-            ui_context=context,
-            nic=options.nic,
-            ocfs2_device=options.ocfs2_device,
-            shared_device=options.shared_device,
-            sbd_device=options.sbd_device,
-            diskless_sbd=options.diskless_sbd,
-            quiet=options.quiet,
-            template=options.template,
-            admin_ip=options.admin_ip,
-            yes_to_all=options.yes_to_all,
-            no_overwrite_sshkey=options.no_overwrite_sshkey,
-            unicast=options.unicast,
-            second_hb=options.second_hb,
-            ipv6=options.ipv6,
-            watchdog=options.watchdog,
-            qdevice=qdevice,
-            stage=stage,
-            args=args)
+        # if options.geo and options.name == "hacluster":
+        #    parser.error("For a geo cluster, each cluster must have a unique 
name (use --name to set)")
+        boot_context = bootstrap.Context.set_context(options)
+        boot_context.ui_context = context
+        boot_context.stage = stage
+        boot_context.args = args
+
+        bootstrap.bootstrap_init(boot_context)
 
         # if options.geo:
         #    bootstrap.bootstrap_init_geo()
@@ -358,14 +334,11 @@
         if stage not in ("ssh", "csync2", "ssh_merge", "cluster", ""):
             parser.error("Invalid stage (%s)" % (stage))
 
-        bootstrap.bootstrap_join(
-            cluster_node=options.cluster_node,
-            ui_context=context,
-            nic=options.nic,
-            quiet=options.quiet,
-            yes_to_all=options.yes_to_all,
-            watchdog=options.watchdog,
-            stage=stage)
+        join_context = bootstrap.Context.set_context(options)
+        join_context.ui_context = context
+        join_context.stage = stage
+
+        bootstrap.bootstrap_join(join_context)
 
         return True
 
@@ -417,21 +390,16 @@
 
         if options.cluster_node is not None and options.cluster_node not in 
args:
             args = list(args) + [options.cluster_node]
+
+        rm_context = bootstrap.Context.set_context(options)
+        rm_context.ui_context = context
+
         if len(args) == 0:
-            bootstrap.bootstrap_remove(
-                cluster_node=None,
-                ui_context=context,
-                quiet=options.quiet,
-                yes_to_all=options.yes_to_all,
-                qdevice=options.qdevice)
+            bootstrap.bootstrap_remove(rm_context)
         else:
             for node in args:
-                bootstrap.bootstrap_remove(
-                    cluster_node=node,
-                    ui_context=context,
-                    quiet=options.quiet,
-                    yes_to_all=options.yes_to_all,
-                    force=options.force)
+                rm_context.cluster_node = node
+                bootstrap.bootstrap_remove(rm_context)
         return True
 
     @command.skill_level('administrator')
@@ -529,7 +497,13 @@
                 ticketlist = [t for t in re.split('[ ,;]+', options.tickets)]
             except ValueError:
                 parser.error("Invalid ticket list")
-        bootstrap.bootstrap_init_geo(options.quiet, options.yes_to_all, 
options.arbitrator, clustermap, ticketlist, ui_context=context)
+
+        geo_context = bootstrap.Context.set_context(options)
+        geo_context.clusters = clustermap
+        geo_context.tickets = ticketlist
+        geo_context.ui_context = context
+
+        bootstrap.bootstrap_init_geo(geo_context)
         return True
 
     @command.name("geo_join")
@@ -543,13 +517,13 @@
         parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
         parser.add_argument("-q", "--quiet", help="Be quiet (don't describe 
what's happening, just do it)", action="store_true", dest="quiet")
         parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts 
(use with caution)', action="store_true", dest="yes_to_all")
-        parser.add_argument("-c", "--cluster-node", help="IP address of an 
already-configured geo cluster or arbitrator", dest="node", metavar="IP")
+        parser.add_argument("-c", "--cluster-node", help="IP address of an 
already-configured geo cluster or arbitrator", dest="cluster_node", 
metavar="IP")
         parser.add_argument("-s", "--clusters", help="Geo cluster description 
(see geo-init for details)", dest="clusters", metavar="DESC")
         options, args = parse_options(parser, args)
         if options is None or args is None:
             return
         errs = []
-        if options.node is None:
+        if options.cluster_node is None:
             errs.append("The --cluster-node argument is required.")
         if options.clusters is None:
             errs.append("The --clusters argument is required.")
@@ -558,7 +532,12 @@
         clustermap = self._parse_clustermap(options.clusters)
         if clustermap is None:
             parser.error("Invalid cluster description format")
-        bootstrap.bootstrap_join_geo(options.quiet, options.yes_to_all, 
options.node, clustermap, ui_context=context)
+
+        geo_context = bootstrap.Context.set_context(options)
+        geo_context.clusters = clustermap
+        geo_context.ui_context = context
+
+        bootstrap.bootstrap_join_geo(geo_context)
         return True
 
     @command.name("geo_init_arbitrator")
@@ -572,11 +551,15 @@
         parser.add_argument("-h", "--help", action="store_true", dest="help", 
help="Show this help message")
         parser.add_argument("-q", "--quiet", help="Be quiet (don't describe 
what's happening, just do it)", action="store_true", dest="quiet")
         parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts 
(use with caution)', action="store_true", dest="yes_to_all")
-        parser.add_argument("-c", "--cluster-node", help="IP address of an 
already-configured geo cluster", dest="other", metavar="IP")
+        parser.add_argument("-c", "--cluster-node", help="IP address of an 
already-configured geo cluster", dest="cluster_node", metavar="IP")
         options, args = parse_options(parser, args)
         if options is None or args is None:
             return
-        bootstrap.bootstrap_arbitrator(options.quiet, options.yes_to_all, 
options.other, ui_context=context)
+
+        geo_context = bootstrap.Context.set_context(options)
+        geo_context.ui_context = context
+
+        bootstrap.bootstrap_arbitrator(geo_context)
         return True
 
     @command.completers_repeating(compl.call(scripts.param_completion_list, 
'health'))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1585096577.f3257c89/data-manifest 
new/crmsh-4.2.0+git.1585276059.882beb65/data-manifest
--- old/crmsh-4.2.0+git.1585096577.f3257c89/data-manifest       2020-03-25 
01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/data-manifest       2020-03-27 
03:27:39.000000000 +0100
@@ -69,6 +69,7 @@
 test/features/bootstrap_init_join_remove.feature
 test/features/bootstrap_options.feature
 test/features/environment.py
+test/features/geo_setup.feature
 test/features/qdevice_options.feature
 test/features/qdevice_setup_remove.feature
 test/features/qdevice_usercase.feature
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/docker_scripts.sh 
new/crmsh-4.2.0+git.1585276059.882beb65/test/docker_scripts.sh
--- old/crmsh-4.2.0+git.1585096577.f3257c89/test/docker_scripts.sh      
2020-03-25 01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/test/docker_scripts.sh      
2020-03-27 03:27:39.000000000 +0100
@@ -1,7 +1,7 @@
 #!/bin/bash
-Docker_image='liangxin1300/haleap:15.2'
+Docker_image='liangxin1300/hatbw'
 HA_packages='pacemaker corosync corosync-qdevice'
-TEST_TYPE='bootstrap qdevice hb_report'
+TEST_TYPE='bootstrap qdevice hb_report geo'
 
 before() {
   docker pull ${Docker_image}
@@ -16,7 +16,9 @@
     docker exec -t hanode1 /bin/sh -c "echo \"10.10.10.9 qnetd-node\" >> 
/etc/hosts"
     docker exec -t hanode1 /bin/sh -c "echo \"10.10.10.10 node-without-ssh\" 
>> /etc/hosts"
   fi
-  docker exec -t hanode1 /bin/sh -c "zypper -n in ${HA_packages}"
+  if [ x"$1" == x"geo" ];then
+    docker exec -t hanode1 /bin/sh -c "echo \"10.10.10.4 hanode3\" >> 
/etc/hosts"
+  fi
   docker exec -t hanode1 /bin/sh -c "cd /app; ./test/run-in-travis.sh build"
 
   # deploy second node hanode2
@@ -27,7 +29,9 @@
   if [ x"$1" == x"qdevice" ];then
     docker exec -t hanode2 /bin/sh -c "echo \"10.10.10.9 qnetd-node\" >> 
/etc/hosts"
   fi
-  docker exec -t hanode2 /bin/sh -c "zypper -n in ${HA_packages}"
+  if [ x"$1" == x"geo" ];then
+    docker exec -t hanode2 /bin/sh -c "echo \"10.10.10.4 hanode3\" >> 
/etc/hosts"
+  fi
   docker exec -t hanode2 /bin/sh -c "systemctl start sshd.service"
   docker exec -t hanode2 /bin/sh -c "cd /app; ./test/run-in-travis.sh build"
 
@@ -45,6 +49,16 @@
     docker network connect --ip=10.10.10.10 second_net node-without-ssh
     docker exec -t node-without-ssh /bin/sh -c "systemctl stop sshd.service"
   fi
+
+  if [ x"$1" == x"geo" ];then
+    docker run -d --name=hanode3 --hostname hanode3 \
+           --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v "$(pwd):/app" 
--shm-size="1g" ${Docker_image}
+    docker network connect --ip=10.10.10.4 second_net hanode3
+    docker exec -t hanode3 /bin/sh -c "echo \"10.10.10.2 hanode1\" >> 
/etc/hosts"
+    docker exec -t hanode3 /bin/sh -c "echo \"10.10.10.3 hanode2\" >> 
/etc/hosts"
+    docker exec -t hanode3 /bin/sh -c "systemctl start sshd.service"
+    docker exec -t hanode3 /bin/sh -c "cd /app; ./test/run-in-travis.sh build"
+  fi
 }
 
 run() {
@@ -55,6 +69,7 @@
   echo "Usage: ./test/`basename $0` <`echo ${TEST_TYPE// /|}`>"
 }
 
+
 # $1 could be "bootstrap", "hb_report", "qdevice" etc.
 # $2 could be "before_install" or "run"
 # $3 could be suffix of feature file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/features/bootstrap_bugs.feature 
new/crmsh-4.2.0+git.1585276059.882beb65/test/features/bootstrap_bugs.feature
--- 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/features/bootstrap_bugs.feature    
    2020-03-25 01:36:17.000000000 +0100
+++ 
new/crmsh-4.2.0+git.1585276059.882beb65/test/features/bootstrap_bugs.feature    
    2020-03-27 03:27:39.000000000 +0100
@@ -22,7 +22,7 @@
     When    Try "crm -c ' '"
     Then    Except "ERROR: Space value not allowed for dest "cib""
     When    Try "crm cluster init --name ' '"
-    Then    Except "ERROR: cluster.init: Space value not allowed for dest 
"name""
+    Then    Except "ERROR: cluster.init: Space value not allowed for dest 
"cluster_name""
     When    Try "crm cluster join -c ' '"
     Then    Except "ERROR: cluster.join: Space value not allowed for dest 
"cluster_node""
     When    Try "crm cluster remove -c ' '"
@@ -30,9 +30,9 @@
     When    Try "crm cluster geo-init -a ' '"
     Then    Except "ERROR: cluster.geo_init: Space value not allowed for dest 
"arbitrator""
     When    Try "crm cluster geo-join -c ' '"
-    Then    Except "ERROR: cluster.geo_join: Space value not allowed for dest 
"node""
+    Then    Except "ERROR: cluster.geo_join: Space value not allowed for dest 
"cluster_node""
     When    Try "crm cluster geo-init-arbitrator -c ' '"
-    Then    Except "ERROR: cluster.geo_init_arbitrator: Space value not 
allowed for dest "other""
+    Then    Except "ERROR: cluster.geo_init_arbitrator: Space value not 
allowed for dest "cluster_node""
 
   @clean
   Scenario: Setup cluster with crossed network(udpu only)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/features/geo_setup.feature 
new/crmsh-4.2.0+git.1585276059.882beb65/test/features/geo_setup.feature
--- old/crmsh-4.2.0+git.1585096577.f3257c89/test/features/geo_setup.feature     
1970-01-01 01:00:00.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/test/features/geo_setup.feature     
2020-03-27 03:27:39.000000000 +0100
@@ -0,0 +1,28 @@
+@geo
+Feature: geo cluster
+
+  Test geo cluster setup using bootstrap
+  Tag @clean means need to stop cluster service if the service is available
+
+  @clean
+  Scenario: GEO cluster setup
+    Given   Cluster service is "stopped" on "hanode1"
+    And     Cluster service is "stopped" on "hanode2"
+    When    Run "crm cluster init -y --no-overwrite-sshkey -n cluster1" on 
"hanode1"
+    Then    Cluster service is "started" on "hanode1"
+    When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.123" 
on "hanode1"
+
+    When    Run "crm cluster init -y --no-overwrite-sshkey -n cluster2" on 
"hanode2"
+    Then    Cluster service is "started" on "hanode2"
+    When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.124" 
on "hanode2"
+
+    When    Run "crm cluster geo_init -y --clusters "cluster1=10.10.10.123 
cluster2=10.10.10.124" --tickets tickets-geo --arbitrator hanode3" on "hanode1"
+    When    Run "crm cluster geo_join -y --cluster-node hanode1 --clusters 
"cluster1=10.10.10.123 cluster2=10.10.10.124"" on "hanode2"
+
+    Given   Service "booth@booth" is "stopped" on "hanode3"
+    When    Run "crm cluster geo_init_arbitrator -y --cluster-node hanode1" on 
"hanode3"
+    Then    Service "booth@booth" is "started" on "hanode3"
+    When    Run "crm resource start g-booth" on "hanode1"
+    Then    Show cluster status on "hanode1"
+    When    Run "crm resource start g-booth" on "hanode2"
+    Then    Show cluster status on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/run-in-travis.sh 
new/crmsh-4.2.0+git.1585276059.882beb65/test/run-in-travis.sh
--- old/crmsh-4.2.0+git.1585096577.f3257c89/test/run-in-travis.sh       
2020-03-25 01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/test/run-in-travis.sh       
2020-03-27 03:27:39.000000000 +0100
@@ -26,7 +26,7 @@
                configure
                make_install
                exit $?;;
-       bootstrap|qdevice|hb_report|resource)
+       bootstrap|qdevice|hb_report|resource|geo)
                functional_tests $1 $2
                exit $?;;
        *)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1585096577.f3257c89/test/testcases/scripts.exp 
new/crmsh-4.2.0+git.1585276059.882beb65/test/testcases/scripts.exp
--- old/crmsh-4.2.0+git.1585096577.f3257c89/test/testcases/scripts.exp  
2020-03-25 01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/test/testcases/scripts.exp  
2020-03-27 03:27:39.000000000 +0100
@@ -287,7 +287,7 @@
 ** localhost - crm --wait --no configure load update <<temporary file>>
 OK: 10: Configure cluster resources
 .INP: json '["show", "mailto"]'
-{"category": "basic", "longdesc": "Notifies recipient by e-mail in the event 
of a resource takeover.", "name": "mailto", "shortdesc": "E-Mail", "steps": 
[{"longdesc": " This is a resource agent for MailTo. It sends email to a 
sysadmin\nwhenever  a takeover occurs.", "parameters": [{"advanced": false, 
"longdesc": "", "name": "id", "required": true, "shortdesc": "Identifier for 
the cluster resource", "type": "resource", "unique": true}, {"advanced": false, 
"example": "0", "longdesc": " The email address of sysadmin.", "name": "email", 
"required": true, "shortdesc": "Email address", "type": "email", "unique": 
false}, {"advanced": false, "example": "Resource Group", "longdesc": " The 
subject of the email.", "name": "subject", "required": false, "shortdesc": 
"Subject", "type": "string", "unique": false}], "required": true, "shortdesc": 
"Notifies recipients by email in the event of resource takeover"}]}
+{"category": "basic", "longdesc": "Notifies recipient by e-mail in the event 
of a resource takeover.", "name": "mailto", "shortdesc": "E-Mail", "steps": 
[{"longdesc": " This is a resource agent for MailTo. It sends email to a 
sysadmin\nwhenever  a takeover occurs.", "parameters": [{"advanced": false, 
"longdesc": "", "name": "id", "required": true, "shortdesc": "Identifier for 
the cluster resource", "type": "resource", "unique": true}, {"advanced": false, 
"example": "", "longdesc": " The email address of sysadmin.", "name": "email", 
"required": true, "shortdesc": "Email address", "type": "email", "unique": 
false}, {"advanced": false, "example": "Resource Group", "longdesc": " The 
subject of the email.", "name": "subject", "required": false, "shortdesc": 
"Subject", "type": "string", "unique": false}], "required": true, "shortdesc": 
"Notifies recipients by email in the event of resource takeover"}]}
 .INP: json '["verify", "mailto", {"id":"foo", "email":"t...@example.com", 
"subject":"hello"}]'
 {"longdesc": "", "name": "install", "nodes": "", "shortdesc": "Ensure mail 
package is installed", "text": "mailx"}
 {"longdesc": "", "name": "cib", "nodes": "", "shortdesc": "Configure cluster 
resources", "text": "primitive foo 
ocf:heartbeat:MailTo\n\temail=\"t...@example.com\"\n\tsubject=\"hello\"\n\top 
start timeout=\"10\"\n\top stop timeout=\"10\"\n\top monitor interval=\"10\" 
timeout=\"10\"\n\nclone c-foo foo"}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1585096577.f3257c89/tox.ini 
new/crmsh-4.2.0+git.1585276059.882beb65/tox.ini
--- old/crmsh-4.2.0+git.1585096577.f3257c89/tox.ini     2020-03-25 
01:36:17.000000000 +0100
+++ new/crmsh-4.2.0+git.1585276059.882beb65/tox.ini     2020-03-27 
03:27:39.000000000 +0100
@@ -1,6 +1,6 @@
 # content of: tox.ini , put in same dir as setup.py
 [tox]
-envlist = py36
+envlist = py38
 
 [base]
 deps =
@@ -16,7 +16,7 @@
 commands =
     py.test -vv --cov=crmsh --cov-config .coveragerc --cov-report term 
--cov-report html {posargs}
 
-[testenv:py36-codeclimate]
+[testenv:py38-codeclimate]
 passenv = TRAVIS TRAVIS_*
 changedir=test/unittests
 deps =


Reply via email to