Hello community,

here is the log from the commit of package crmsh for openSUSE:Factory checked 
in at 2020-07-08 19:18:21
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/crmsh (Old)
 and      /work/SRC/openSUSE:Factory/.crmsh.new.3060 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "crmsh"

Wed Jul  8 19:18:21 2020 rev:186 rq:819388 version:4.2.0+git.1594199184.309141ea

Changes:
--------
--- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes      2020-07-01 
14:26:55.258809622 +0200
+++ /work/SRC/openSUSE:Factory/.crmsh.new.3060/crmsh.changes    2020-07-08 
19:18:55.556167918 +0200
@@ -1,0 +2,8 @@
+Wed Jul 08 09:18:41 UTC 2020 - xli...@suse.com
+
+- Update to version 4.2.0+git.1594199184.309141ea:
+  * Dev: unittest: unit test for ssh key configuration improvement
+  * Dev: bahave: adjust based on ssh key configuration improvement
+  * High: bootstrap: ssh key configuration improvement(bsc#1169581)
+
+-------------------------------------------------------------------

Old:
----
  crmsh-4.2.0+git.1592790745.eaa14889.tar.bz2

New:
----
  crmsh-4.2.0+git.1594199184.309141ea.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ crmsh.spec ++++++
--- /var/tmp/diff_new_pack.o63zte/_old  2020-07-08 19:18:57.880176164 +0200
+++ /var/tmp/diff_new_pack.o63zte/_new  2020-07-08 19:18:57.884176179 +0200
@@ -36,7 +36,7 @@
 Summary:        High Availability cluster command-line interface
 License:        GPL-2.0-or-later
 Group:          %{pkg_group}
-Version:        4.2.0+git.1592790745.eaa14889
+Version:        4.2.0+git.1594199184.309141ea
 Release:        0
 Url:            http://crmsh.github.io
 Source0:        %{name}-%{version}.tar.bz2

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.o63zte/_old  2020-07-08 19:18:57.924176320 +0200
+++ /var/tmp/diff_new_pack.o63zte/_new  2020-07-08 19:18:57.924176320 +0200
@@ -5,4 +5,4 @@
                 <param 
name="url">https://github.com/liangxin1300/crmsh.git</param>
               <param 
name="changesrevision">d8dc51b4cb34964aa72e918999ebc7f03b48f3c9</param></service><service
 name="tar_scm">
                 <param 
name="url">https://github.com/ClusterLabs/crmsh.git</param>
-              <param 
name="changesrevision">be7cc3b187c4d63b47944e70ac7e6d79776c4668</param></service></servicedata>
\ No newline at end of file
+              <param 
name="changesrevision">7a1d008b1ffef8f266afc845f361f5e47e367e40</param></service></servicedata>
\ No newline at end of file

++++++ crmsh-4.2.0+git.1592790745.eaa14889.tar.bz2 -> 
crmsh-4.2.0+git.1594199184.309141ea.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/bootstrap.py 
new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/bootstrap.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/bootstrap.py  2020-06-22 
03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/bootstrap.py  2020-07-08 
11:06:24.000000000 +0200
@@ -41,6 +41,11 @@
 SYSCONFIG_FW_CLUSTER = "/etc/sysconfig/SuSEfirewall2.d/services/cluster"
 PCMK_REMOTE_AUTH = "/etc/pacemaker/authkey"
 COROSYNC_CONF_ORIG = tmpfiles.create()[1]
+RSA_PRIVATE_KEY = "/root/.ssh/id_rsa"
+RSA_PUBLIC_KEY = "/root/.ssh/id_rsa.pub"
+AUTHORIZED_KEYS_FILE = "/root/.ssh/authorized_keys"
+
+
 INIT_STAGES = ("ssh", "ssh_remote", "csync2", "csync2_remote", "corosync", 
"storage", "sbd", "cluster", "vgfs", "admin", "qdevice")
 
 
@@ -129,6 +134,8 @@
                 error("Maximum number of interface is 2")
             if len(self.nic_list) != len(set(self.nic_list)):
                 error("Duplicated input")
+        if self.no_overwrite_sshkey:
+            warn("--no-overwrite-sshkey option is deprecated since crmsh does 
not overwrite ssh keys by default anymore and will be removed in future 
versions")
 
     def init_sbd_manager(self):
         self.sbd_manager = SBDManager(self.sbd_devices, self.diskless_sbd)
@@ -956,6 +963,14 @@
             tf.write(ff.read())
 
 
+def append_unique(fromfile, tofile):
+    """
+    Append unique content from fromfile to tofile
+    """
+    if not utils.check_file_content_included(fromfile, tofile):
+        append(fromfile, tofile)
+
+
 def rmfile(path, ignore_errors=False):
     """
     Try to remove the given file, and
@@ -987,15 +1002,22 @@
     Configure passwordless SSH.
     """
     start_service("sshd.service")
-    invoke("mkdir -m 700 -p /root/.ssh")
-    if os.path.exists("/root/.ssh/id_rsa"):
-        if _context.yes_to_all and _context.no_overwrite_sshkey or \
-                not confirm("/root/.ssh/id_rsa already exists - overwrite?"):
-            return
-        rmfile("/root/.ssh/id_rsa")
-    status("Generating SSH key")
-    invoke("ssh-keygen -q -f /root/.ssh/id_rsa -C 'Cluster Internal' -N ''")
-    append("/root/.ssh/id_rsa.pub", "/root/.ssh/authorized_keys")
+    configure_local_ssh_key()
+
+
+def configure_local_ssh_key():
+    """
+    Configure ssh rsa key locally
+
+    If /root/.ssh/id_rsa not exist, generate a new one
+    Add /root/.ssh/id_rsa.pub to /root/.ssh/authorized_keys anyway, make sure 
itself authorized
+    """
+    if not os.path.exists(RSA_PRIVATE_KEY):
+        status("Generating SSH key")
+        invoke("ssh-keygen -q -f {} -C 'Cluster Internal on {}' -N 
''".format(RSA_PRIVATE_KEY, utils.this_node()))
+    if not os.path.exists(AUTHORIZED_KEYS_FILE):
+        open(AUTHORIZED_KEYS_FILE, 'w').close()
+    append_unique(RSA_PUBLIC_KEY, AUTHORIZED_KEYS_FILE)
 
 
 def init_ssh_remote():
@@ -1016,6 +1038,15 @@
             append(fn + ".pub", authorized_keys_file)
 
 
+def append_to_remote_file(fromfile, remote_node, tofile):
+    """
+    Append content of fromfile to tofile on remote_node
+    """
+    cmd = "cat {} | ssh -oStrictHostKeyChecking=no root@{} 'cat >> 
{}'".format(fromfile, remote_node, tofile)
+    if not invoke(cmd):
+        error("Failed to run \"{}\"".format(cmd))
+
+
 def init_csync2():
     status("Configuring csync2")
     if os.path.exists(CSYNC2_KEY):
@@ -1739,35 +1770,8 @@
         error("No existing IP/hostname specified (use -c option)")
 
     start_service("sshd.service")
-    invoke("mkdir -m 700 -p /root/.ssh")
-
-    tmpdir = tmpfiles.create_dir()
-    status("Retrieving SSH keys - This may prompt for root@%s:" % (seed_host))
-    if not invoke("scp -oStrictHostKeyChecking=no  root@%s:'/root/.ssh/id_*' 
%s/" % (seed_host, tmpdir)):
-        error("Failed to retrieve ssh keys")
-
-    # This supports all SSH key types, for the case where ha-cluster-init
-    # wasn't used to set up the seed node, and the user has manually
-    # created, for example, DSA keys (bnc#878080)
-    got_keys = 0
-    for key in ("id_rsa", "id_dsa", "id_ecdsa", "id_ed25519"):
-        if not os.path.exists(os.path.join(tmpdir, key)):
-            continue
-
-        if os.path.exists(os.path.join("/root/.ssh", key)):
-            if not confirm("/root/.ssh/%s already exists - overwrite?" % 
(key)):
-                continue
-        invoke("mv %s* /root/.ssh/" % (os.path.join(tmpdir, key)))
-        if not grep_file("/root/.ssh/authorized_keys", 
open("/root/.ssh/%s.pub" % (key)).read()):
-            append("/root/.ssh/%s.pub" % (key), "/root/.ssh/authorized_keys")
-        got_keys += 1
-
-    if got_keys == 0:
-        status("No new SSH keys installed")
-    elif got_keys == 1:
-        status("One new SSH key installed")
-    else:
-        status("%s new SSH keys installed" % (got_keys))
+    configure_local_ssh_key()
+    swap_public_ssh_key(seed_host)
 
     # This makes sure the seed host has its own SSH keys in its own
     # authorized_keys file (again, to help with the case where the
@@ -1777,6 +1781,51 @@
         error("Can't invoke crm cluster init -i {} ssh_remote on 
{}".format(_context.default_nic_list[0], seed_host))
 
 
+def swap_public_ssh_key(remote_node):
+    """
+    Swap public ssh key between remote_node and local
+    """
+    # Detect whether need password to login to remote_node
+    if utils.check_ssh_passwd_need(remote_node):
+        # If no passwordless configured, paste /root/.ssh/id_rsa.pub to 
remote_node's /root/.ssh/authorized_keys
+        status("Configuring SSH passwordless with root@{}".format(remote_node))
+        # After this, login to remote_node is passwordless
+        append_to_remote_file(RSA_PUBLIC_KEY, remote_node, 
AUTHORIZED_KEYS_FILE)
+
+    try:
+        # Fetch public key file from remote_node
+        public_key_file_remote = fetch_public_key_from_remote_node(remote_node)
+    except ValueError as err:
+        warn(err)
+        return
+    # Append public key file from remote_node to local's 
/root/.ssh/authorized_keys
+    # After this, login from remote_node is passwordless
+    # Should do this step even passwordless is True, to make sure we got 
two-way passwordless
+    append_unique(public_key_file_remote, AUTHORIZED_KEYS_FILE)
+
+
+def fetch_public_key_from_remote_node(node):
+    """
+    Fetch public key file from remote node
+    Return a temp file contains public key
+    Return None if no key exist
+    """
+
+    # For dsa, might need to add PubkeyAcceptedKeyTypes=+ssh-dss to config 
file, see
+    # 
https://superuser.com/questions/1016989/ssh-dsa-keys-no-longer-work-for-password-less-authentication
+    for key in ("id_rsa", "id_ecdsa", "id_ed25519", "id_dsa"):
+        public_key_file = "/root/.ssh/{}.pub".format(key)
+        cmd = "ssh -oStrictHostKeyChecking=no root@{} 'test -f 
{}'".format(node, public_key_file)
+        if not invoke(cmd):
+            continue
+        _, temp_public_key_file = tmpfiles.create()
+        cmd = "scp -oStrictHostKeyChecking=no root@{}:{} {}".format(node, 
public_key_file, temp_public_key_file)
+        if not invoke(cmd):
+            error("Failed to run \"{}\"".format(cmd))
+        return temp_public_key_file
+    raise ValueError("No ssh key exist on {}".format(node))
+
+
 def join_csync2(seed_host):
     """
     Csync2 configuration for joining node.
@@ -1827,47 +1876,6 @@
     status_done()
 
 
-def join_ssh_merge(_cluster_node):
-    status("Merging known_hosts")
-
-    me = utils.this_node()
-    hosts = [m.group(1)
-             for m in re.finditer(r"^\s*host\s*([^ ;]+)\s*;", 
open(CSYNC2_CFG).read(), re.M)
-             if m.group(1) != me]
-    if not hosts:
-        hosts = [_cluster_node]
-        warn("Unable to extract host list from %s" % (CSYNC2_CFG))
-
-    try:
-        import parallax
-    except ImportError:
-        error("parallax python library is missing")
-
-    opts = parallax.Options()
-    opts.ssh_options = ['StrictHostKeyChecking=no']
-
-    # The act of using pssh to connect to every host (without strict host key
-    # checking) ensures that at least *this* host has every other host in its
-    # known_hosts
-    known_hosts_new = set()
-    cat_cmd = "[ -e /root/.ssh/known_hosts ] && cat /root/.ssh/known_hosts || 
true"
-    log("parallax.call {} : {}".format(hosts, cat_cmd))
-    results = parallax.call(hosts, cat_cmd, opts)
-    for host, result in results.items():
-        if isinstance(result, parallax.Error):
-            warn("Failed to get known_hosts from {}: {}".format(host, 
str(result)))
-        else:
-            if result[1]:
-                known_hosts_new.update((utils.to_ascii(result[1]) or 
"").splitlines())
-    if known_hosts_new:
-        hoststxt = "\n".join(sorted(known_hosts_new))
-        tmpf = utils.str2tmp(hoststxt)
-        log("parallax.copy {} : {}".format(hosts, hoststxt))
-        results = parallax.copy(hosts, tmpf, "/root/.ssh/known_hosts")
-        for host, result in results.items():
-            if isinstance(result, parallax.Error):
-                warn("scp to {} failed ({}), known_hosts update may be 
incomplete".format(host, str(result)))
-
 def update_expected_votes():
     # get a list of nodes, excluding remote nodes
     nodelist = None
@@ -1947,6 +1955,42 @@
     csync2_update(corosync.conf())
 
 
+def setup_passwordless_with_other_nodes(init_node):
+    """
+    Setup passwordless with other cluster nodes
+
+    Should fetch the node list from init node, then swap the key
+    """
+    # Check whether pacemaker.service is active on init node
+    cmd = "ssh -o StrictHostKeyChecking=no root@{} systemctl -q is-active 
{}".format(init_node, "pacemaker.service")
+    rc, _, _ = utils.get_stdout_stderr(cmd)
+    if rc != 0:
+        error("Cluster is inactive on {}".format(init_node))
+
+    # Fetch cluster nodes list
+    cmd = "ssh -o StrictHostKeyChecking=no root@{} crm_node 
-l".format(init_node)
+    rc, out, err = utils.get_stdout_stderr(cmd)
+    if rc != 0:
+        error("Can't fetch cluster nodes list from {}: {}".format(init_node, 
err))
+    cluster_nodes_list = []
+    for line in out.splitlines():
+        _, node, stat = line.split()
+        if stat == "member":
+            cluster_nodes_list.append(node)
+
+    # Filter out init node from cluster_nodes_list
+    cmd = "ssh -o StrictHostKeyChecking=no root@{} hostname".format(init_node)
+    rc, out, err = utils.get_stdout_stderr(cmd)
+    if rc != 0:
+        error("Can't fetch hostname of {}: {}".format(init_node, err))
+    if out in cluster_nodes_list:
+        cluster_nodes_list.remove(out)
+
+    # Swap ssh public key between join node and other cluster nodes
+    for node in cluster_nodes_list:
+        swap_public_ssh_key(node)
+
+
 def join_cluster(seed_host):
     """
     Cluster configuration for joining node.
@@ -1963,6 +2007,8 @@
         else:
             corosync.set_value("totem.nodeid", nodeid)
 
+    setup_passwordless_with_other_nodes(seed_host)
+
     shutil.copy(corosync.conf(), COROSYNC_CONF_ORIG)
 
     # check if use IPv6
@@ -2375,7 +2421,6 @@
         join_ssh(cluster_node)
         join_remote_auth(cluster_node)
         join_csync2(cluster_node)
-        join_ssh_merge(cluster_node)
         join_cluster(cluster_node)
 
     status("Done (log saved to %s)" % (LOG_FILE))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/corosync.py 
new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/corosync.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/corosync.py   2020-06-22 
03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/corosync.py   2020-07-08 
11:06:24.000000000 +0200
@@ -175,7 +175,7 @@
             raise ValueError(exception_msg)
 
     def check_ssh_passwd_need(self):
-        return utils.check_ssh_passwd_need([self.ip])
+        return utils.check_ssh_passwd_need(self.ip)
 
     def remote_running_cluster(self):
         cmd = "systemctl -q is-active pacemaker"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/ui_cluster.py 
new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/ui_cluster.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/ui_cluster.py 2020-06-22 
03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/ui_cluster.py 2020-07-08 
11:06:24.000000000 +0200
@@ -639,7 +639,11 @@
 
         opts = parallax.Options()
         opts.ssh_options = ['StrictHostKeyChecking=no']
-        opts.askpass = utils.check_ssh_passwd_need(hosts)
+        for host in hosts:
+            res = utils.check_ssh_passwd_need(host)
+            if res:
+                opts.askpass = True
+                break
         for host, result in parallax.call(hosts, cmd, opts).items():
             if isinstance(result, parallax.Error):
                 err_buf.error("[%s]: %s" % (host, result))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/utils.py 
new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/utils.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/crmsh/utils.py      2020-06-22 
03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/crmsh/utils.py      2020-07-08 
11:06:24.000000000 +0200
@@ -2053,14 +2053,14 @@
     return ip_list
 
 
-def check_ssh_passwd_need(hosts):
+def check_ssh_passwd_need(host):
+    """
+    Check whether access to host need password
+    """
     ssh_options = "-o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15"
-    for host in hosts:
-        ssh_cmd = "ssh {} -T -o Batchmode=yes {} true".format(ssh_options, 
host)
-        rc, _, _ = get_stdout_stderr(ssh_cmd)
-        if rc != 0:
-            return True
-    return False
+    ssh_cmd = "ssh {} -T -o Batchmode=yes {} true".format(ssh_options, host)
+    rc, _, _ = get_stdout_stderr(ssh_cmd)
+    return rc != 0
 
 
 def check_port_open(host, port):
@@ -2404,4 +2404,20 @@
             if interface_inst.ip_in_network(addr):
                 return True
         return False
+
+
+def check_file_content_included(source_file, target_file):
+    """
+    Check whether target_file includes contents of source_file
+    """
+    if not os.path.exists(source_file):
+        raise ValueError("File {} not exist".format(source_file))
+    if not os.path.exists(target_file):
+        return False
+
+    with open(target_file, 'r') as target_fd:
+        target_data = target_fd.read()
+    with open(source_file, 'r') as source_fd:
+        source_data = source_fd.read()
+    return source_data in target_data
 # vim:ts=4:sw=4:et:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_bugs.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_bugs.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_bugs.feature    
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_bugs.feature    
    2020-07-08 11:06:24.000000000 +0200
@@ -7,7 +7,7 @@
   Scenario: Set placement-strategy value as "default"(bsc#1129462)
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Show cluster status on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
@@ -38,7 +38,7 @@
   Scenario: Setup cluster with crossed network(udpu only)
     Given   Cluster service is "stopped" on "hanode1"
     Given   Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -u -i eth0 -y --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -u -i eth0 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     When    Try "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
     Then    Cluster service is "stopped" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_init_join_remove.feature
 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_init_join_remove.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_init_join_remove.feature
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_init_join_remove.feature
    2020-07-08 11:06:24.000000000 +0200
@@ -7,7 +7,7 @@
   Background: Setup a two nodes cluster
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Show cluster status on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_options.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_options.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/bootstrap_options.feature 
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/bootstrap_options.feature 
    2020-07-08 11:06:24.000000000 +0200
@@ -33,7 +33,7 @@
   Scenario: Init whole cluster service on node "hanode1" using "--nodes" option
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -y --no-overwrite-sshkey --nodes \"hanode1 
hanode2\"" on "hanode1"
+    When    Run "crm cluster init -y --nodes \"hanode1 hanode2\"" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster service is "started" on "hanode2"
     And     Online nodes are "hanode1 hanode2"
@@ -43,7 +43,7 @@
   Scenario: Bind specific network interface using "-i" option
     Given   Cluster service is "stopped" on "hanode1"
     And     IP "10.10.10.2" is belong to "eth1"
-    When    Run "crm cluster init -i eth1 -y --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -i eth1 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     IP "10.10.10.2" is used by corosync on "hanode1"
     And     Show corosync ring status
@@ -53,7 +53,7 @@
     Given   Cluster service is "stopped" on "hanode1"
     And     IP "172.17.0.2" is belong to "eth0"
     And     IP "10.10.10.2" is belong to "eth1"
-    When    Run "crm cluster init -M -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -M -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     IP "172.17.0.2" is used by corosync on "hanode1"
     And     IP "10.10.10.2" is used by corosync on "hanode1"
@@ -64,7 +64,7 @@
     Given   Cluster service is "stopped" on "hanode1"
     And     IP "172.17.0.2" is belong to "eth0"
     And     IP "10.10.10.2" is belong to "eth1"
-    When    Run "crm cluster init -i eth0 -i eth1 -y --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -i eth0 -i eth1 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     IP "172.17.0.2" is used by corosync on "hanode1"
     And     IP "10.10.10.2" is used by corosync on "hanode1"
@@ -79,7 +79,7 @@
     Then    Except "ERROR: cluster.init: Address already in use: 10.10.10.2"
     When    Try "crm cluster init -A 10.20.10.2 -y"
     Then    Except "ERROR: cluster.init: Address '10.20.10.2' not in any local 
network"
-    When    Run "crm cluster init -n hatest -A 10.10.10.123 -y 
--no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -n hatest -A 10.10.10.123 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster name is "hatest"
     And     Cluster virtual IP is "10.10.10.123"
@@ -88,7 +88,7 @@
   @clean
   Scenario: Init cluster service with udpu using "-u" option
     Given   Cluster service is "stopped" on "hanode1"
-    When    Run "crm cluster init -u -y -i eth0 --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -u -y -i eth0" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Cluster is using udpu transport mode
     And     IP "172.17.0.2" is used by corosync on "hanode1"
@@ -99,7 +99,7 @@
   Scenario: Init cluster service with ipv6 using "-I" option
     Given   Cluster service is "stopped" on "hanode1"
     Given   Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -I -i eth1 -y --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -I -i eth1 -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     IP "2001:db8:10::2" is used by corosync on "hanode1"
     When    Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
@@ -111,7 +111,7 @@
   Scenario: Init cluster service with ipv6 unicast using "-I" and "-u" option
     Given   Cluster service is "stopped" on "hanode1"
     Given   Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -I -i eth1 -u -y --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -I -i eth1 -u -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     IP "2001:db8:10::2" is used by corosync on "hanode1"
     When    Run "crm cluster join -c hanode1 -i eth1 -y" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/geo_setup.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/geo_setup.feature
--- old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/geo_setup.feature     
2020-06-22 03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/test/features/geo_setup.feature     
2020-07-08 11:06:24.000000000 +0200
@@ -8,11 +8,11 @@
   Scenario: GEO cluster setup
     Given   Cluster service is "stopped" on "hanode1"
     And     Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -y --no-overwrite-sshkey -n cluster1" on 
"hanode1"
+    When    Run "crm cluster init -y -n cluster1" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.123" 
on "hanode1"
 
-    When    Run "crm cluster init -y --no-overwrite-sshkey -n cluster2" on 
"hanode2"
+    When    Run "crm cluster init -y -n cluster2" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
     When    Run "crm configure primitive vip IPaddr2 params ip=10.10.10.124" 
on "hanode2"
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_options.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_options.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_options.feature   
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_options.feature   
    2020-07-08 11:06:24.000000000 +0200
@@ -12,7 +12,7 @@
   Scenario: Use "--qdevice-algo" to change qnetd decision algorithm to "lms"
     Given   Cluster service is "stopped" on "hanode1"
     And     Service "corosync-qdevice" is "stopped" on "hanode1"
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-algo=lms -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-algo=lms -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
@@ -25,7 +25,7 @@
   Scenario: Use "--qdevice-tie-breaker" to change qnetd tie_breaker to 
"highest"
     Given   Cluster service is "stopped" on "hanode1"
     And     Service "corosync-qdevice" is "stopped" on "hanode1"
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tie-breaker=highest -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tie-breaker=highest -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     And     Show corosync qdevice configuration
@@ -34,7 +34,7 @@
   Scenario: Use "--qdevice-tls" to turn off TLS certification
     Given   Cluster service is "stopped" on "hanode1"
     And     Service "corosync-qdevice" is "stopped" on "hanode1"
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tls=off -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-tls=off -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     And     Show corosync qdevice configuration
@@ -43,7 +43,7 @@
   Scenario: Use "--qdevice-heuristics" to configure heuristics
     Given   Cluster service is "stopped" on "hanode1"
     And     Service "corosync-qdevice" is "stopped" on "hanode1"
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics='/usr/bin/test -f /tmp/file_exists;/usr/bin/which 
pacemaker' -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node 
--qdevice-heuristics='/usr/bin/test -f /tmp/file_exists;/usr/bin/which 
pacemaker' -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     And     Show corosync qdevice configuration
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_setup_remove.feature
 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_setup_remove.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_setup_remove.feature
  2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_setup_remove.feature
  2020-07-08 11:06:24.000000000 +0200
@@ -12,7 +12,7 @@
 
   @clean
   Scenario: Setup qdevice/qnetd during init/join process
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y 
--no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
@@ -25,7 +25,7 @@
 
   @clean
   Scenario: Setup qdevice/qnetd on running cluster
-    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "stopped" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
@@ -43,7 +43,7 @@
 
   @clean
   Scenario: Setup qdevice with heuristics
-    When    Run "crm cluster init -y --no-overwrite-sshkey 
--qnetd-hostname=qnetd-node --qdevice-heuristics="/usr/bin/test -f 
/tmp/heuristics.txt" --qdevice-heuristics-mode="on"" on "hanode1"
+    When    Run "crm cluster init -y --qnetd-hostname=qnetd-node 
--qdevice-heuristics="/usr/bin/test -f /tmp/heuristics.txt" 
--qdevice-heuristics-mode="on"" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
@@ -61,7 +61,7 @@
 
   @clean
   Scenario: Remove qdevice from a two nodes cluster
-    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y 
--no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init --qnetd-hostname=qnetd-node -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     And     Service "corosync-qdevice" is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_usercase.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_usercase.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_usercase.feature  
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_usercase.feature  
    2020-07-08 11:06:24.000000000 +0200
@@ -21,7 +21,7 @@
   @clean
   Scenario: Master survive when split-brain
     # Setup a two-nodes cluster
-    When    Run "crm cluster init -y -i eth0 --no-overwrite-sshkey" on 
"hanode1"
+    When    Run "crm cluster init -y -i eth0" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     When    Run "crm cluster join -c hanode1 -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_validate.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_validate.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/qdevice_validate.feature  
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/qdevice_validate.feature  
    2020-07-08 11:06:24.000000000 +0200
@@ -61,9 +61,9 @@
   @clean
   Scenario: Node for qnetd is a cluster node
     Given   Cluster service is "stopped" on "hanode2"
-    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode2"
+    When    Run "crm cluster init -y" on "hanode2"
     Then    Cluster service is "started" on "hanode2"
-    When    Try "crm cluster init --qnetd-hostname=hanode2 -y 
--no-overwrite-sshkey"
+    When    Try "crm cluster init --qnetd-hostname=hanode2 -y"
     Then    Except multiple lines
       """"
       ERROR: cluster.init: host for qnetd must be a non-cluster node
@@ -78,7 +78,7 @@
   @clean
   Scenario: Node for qnetd not installed corosync-qnetd
     Given   Cluster service is "stopped" on "hanode2"
-    When    Try "crm cluster init --qnetd-hostname=hanode2 -y 
--no-overwrite-sshkey"
+    When    Try "crm cluster init --qnetd-hostname=hanode2 -y"
     Then    Except multiple lines
       """"
       ERROR: cluster.init: Package "corosync-qnetd" not installed on hanode2
@@ -98,7 +98,7 @@
   @clean
   Scenario: Run qdevice stage but miss "--qnetd-hostname" option
     Given   Cluster service is "stopped" on "hanode1"
-    When    Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When    Run "crm cluster init -y" on "hanode1"
     Then    Cluster service is "started" on "hanode1"
     When    Try "crm cluster init qdevice"
     Then    Except "ERROR: cluster.init: qdevice related options are missing 
(--qnetd-hostname option is mandatory, find for more information using --help)"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/resource_failcount.feature
 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/resource_failcount.feature
--- 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/resource_failcount.feature
    2020-06-22 03:52:25.000000000 +0200
+++ 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/resource_failcount.feature
    2020-07-08 11:06:24.000000000 +0200
@@ -5,7 +5,7 @@
 
   Background: Setup one node cluster and configure a Dummy resource
     Given     Cluster service is "stopped" on "hanode1"
-    When      Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When      Run "crm cluster init -y" on "hanode1"
     Then      Cluster service is "started" on "hanode1"
     When      Run "crm configure primitive d Dummy op monitor interval=3s" on 
"hanode1"
     Then      Resource "d" type "Dummy" is "Started"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/resource_set.feature 
new/crmsh-4.2.0+git.1594199184.309141ea/test/features/resource_set.feature
--- old/crmsh-4.2.0+git.1592790745.eaa14889/test/features/resource_set.feature  
2020-06-22 03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/test/features/resource_set.feature  
2020-07-08 11:06:24.000000000 +0200
@@ -5,7 +5,7 @@
 
   Background: Setup one node cluster and configure some resources
     Given     Cluster service is "stopped" on "hanode1"
-    When      Run "crm cluster init -y --no-overwrite-sshkey" on "hanode1"
+    When      Run "crm cluster init -y" on "hanode1"
     Then      Cluster service is "started" on "hanode1"
     When      Run "crm configure primitive d Dummy op monitor interval=3s" on 
"hanode1"
     Then      Resource "d" type "Dummy" is "Started"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_bootstrap.py 
new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_bootstrap.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_bootstrap.py    
2020-06-22 03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_bootstrap.py    
2020-07-08 11:06:24.000000000 +0200
@@ -340,69 +340,205 @@
         Global tearDown.
         """
 
-    @mock.patch('crmsh.bootstrap.append')
+    @mock.patch('crmsh.bootstrap.configure_local_ssh_key')
+    @mock.patch('crmsh.bootstrap.start_service')
+    def test_init_ssh(self, mock_start_service, mock_config_ssh):
+        bootstrap.init_ssh()
+        mock_start_service.assert_called_once_with("sshd.service")
+        mock_config_ssh.assert_called_once_with()
+
+    @mock.patch('crmsh.bootstrap.append_unique')
+    @mock.patch('builtins.open', create=True)
+    @mock.patch('crmsh.utils.this_node')
+    @mock.patch('crmsh.bootstrap.invoke')
     @mock.patch('crmsh.bootstrap.status')
     @mock.patch('os.path.exists')
-    @mock.patch('crmsh.bootstrap.start_service')
+    def test_configure_local_ssh_key(self, mock_exists, mock_status, 
mock_invoke,
+            mock_this_node, mock_open_file, mock_append):
+        mock_exists.side_effect = [False, False]
+        mock_this_node.return_value = "node1"
+
+        bootstrap.configure_local_ssh_key()
+
+        mock_exists.assert_has_calls([
+            mock.call(bootstrap.RSA_PRIVATE_KEY),
+            mock.call(bootstrap.AUTHORIZED_KEYS_FILE)
+            ])
+        mock_status.assert_called_once_with("Generating SSH key")
+        mock_invoke.assert_called_once_with("ssh-keygen -q -f {} -C 'Cluster 
Internal on {}' -N ''".format(bootstrap.RSA_PRIVATE_KEY, 
mock_this_node.return_value))
+        mock_this_node.assert_called_once_with()
+        mock_open_file.assert_called_once_with(bootstrap.AUTHORIZED_KEYS_FILE, 
'w')
+        mock_append.assert_called_once_with(bootstrap.RSA_PUBLIC_KEY, 
bootstrap.AUTHORIZED_KEYS_FILE)
+
+    @mock.patch('crmsh.bootstrap.append')
+    @mock.patch('crmsh.utils.check_file_content_included')
+    def test_append_unique(self, mock_check, mock_append):
+        mock_check.return_value = False
+        bootstrap.append_unique("fromfile", "tofile")
+        mock_check.assert_called_once_with("fromfile", "tofile")
+        mock_append.assert_called_once_with("fromfile", "tofile")
+
+    @mock.patch('crmsh.bootstrap.error')
     @mock.patch('crmsh.bootstrap.invoke')
-    def test_init_ssh_no_exist_keys(self, mock_invoke, mock_start_service,
-                                    mock_exists, mock_status, mock_append):
-        mock_exists.return_value = False
+    def test_append_to_remote_file(self, mock_invoke, mock_error):
+        mock_invoke.return_value = False
+        bootstrap.append_to_remote_file("fromfile", "node1", "tofile")
+        cmd = "cat fromfile | ssh -oStrictHostKeyChecking=no root@node1 'cat 
>> tofile'"
+        mock_invoke.assert_called_once_with(cmd)
+        mock_error.assert_called_once_with("Failed to run \"{}\"".format(cmd))
 
-        bootstrap.init_ssh()
+    @mock.patch('crmsh.bootstrap.invoke')
+    def test_fetch_public_key_from_remote_node_exception(self, mock_invoke):
+        mock_invoke.side_effect = [False, False, False, False]
+
+        with self.assertRaises(ValueError) as err:
+            bootstrap.fetch_public_key_from_remote_node("node1")
+        self.assertEqual("No ssh key exist on node1", str(err.exception))
 
-        mock_start_service.assert_called_once_with("sshd.service")
         mock_invoke.assert_has_calls([
-            mock.call("mkdir -m 700 -p /root/.ssh"),
-            mock.call("ssh-keygen -q -f /root/.ssh/id_rsa -C 'Cluster 
Internal' -N ''")
-        ])
-        mock_exists.assert_called_once_with("/root/.ssh/id_rsa")
-        mock_status.assert_called_once_with("Generating SSH key")
-        mock_append.assert_called_once_with("/root/.ssh/id_rsa.pub", 
"/root/.ssh/authorized_keys")
+            mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f 
/root/.ssh/id_rsa.pub'"),
+            mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f 
/root/.ssh/id_ecdsa.pub'"),
+            mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f 
/root/.ssh/id_ed25519.pub'"),
+            mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f 
/root/.ssh/id_dsa.pub'")
+            ])
 
-    @mock.patch('crmsh.bootstrap.append')
-    @mock.patch('crmsh.bootstrap.status')
-    @mock.patch('crmsh.bootstrap.rmfile')
-    @mock.patch('crmsh.bootstrap.confirm')
-    @mock.patch('os.path.exists')
-    @mock.patch('crmsh.bootstrap.start_service')
+    @mock.patch('crmsh.tmpfiles.create')
     @mock.patch('crmsh.bootstrap.invoke')
-    def test_init_ssh_exits_keys_yes_to_all_confirm(self, mock_invoke, 
mock_start_service,
-                         mock_exists, mock_confirm, mock_rmfile, mock_status, 
mock_append):
-        mock_exists.return_value = True
-        bootstrap._context = mock.Mock(yes_to_all=True, 
no_overwrite_sshkey=False)
-        mock_confirm.return_value = True
+    def test_fetch_public_key_from_remote_node(self, mock_invoke, 
mock_tmpfile):
+        mock_invoke.side_effect = [True, True]
+        mock_tmpfile.return_value = (0, "temp_file_name")
 
-        bootstrap.init_ssh()
+        res = bootstrap.fetch_public_key_from_remote_node("node1")
+        self.assertEqual(res, "temp_file_name")
 
-        mock_start_service.assert_called_once_with("sshd.service")
         mock_invoke.assert_has_calls([
-            mock.call("mkdir -m 700 -p /root/.ssh"),
-            mock.call("ssh-keygen -q -f /root/.ssh/id_rsa -C 'Cluster 
Internal' -N ''")
-        ])
-        mock_exists.assert_called_once_with("/root/.ssh/id_rsa")
-        mock_confirm.assert_called_once_with("/root/.ssh/id_rsa already exists 
- overwrite?")
-        mock_rmfile.assert_called_once_with("/root/.ssh/id_rsa")
-        mock_status.assert_called_once_with("Generating SSH key")
-        mock_append.assert_called_once_with("/root/.ssh/id_rsa.pub", 
"/root/.ssh/authorized_keys")
+            mock.call("ssh -oStrictHostKeyChecking=no root@node1 'test -f 
/root/.ssh/id_rsa.pub'"),
+            mock.call("scp -oStrictHostKeyChecking=no 
root@node1:/root/.ssh/id_rsa.pub temp_file_name")
+            ])
+        mock_tmpfile.assert_called_once_with()
+
+    @mock.patch('crmsh.bootstrap.error')
+    def test_join_ssh_no_seed_host(self, mock_error):
+        mock_error.side_effect = ValueError
+        with self.assertRaises(ValueError):
+            bootstrap.join_ssh(None)
+        mock_error.assert_called_once_with("No existing IP/hostname specified 
(use -c option)")
 
-    @mock.patch('crmsh.bootstrap.rmfile')
-    @mock.patch('crmsh.bootstrap.confirm')
-    @mock.patch('os.path.exists')
-    @mock.patch('crmsh.bootstrap.start_service')
+    @mock.patch('crmsh.bootstrap.error')
     @mock.patch('crmsh.bootstrap.invoke')
-    def test_init_ssh_exits_keys_no_overwrite(self, mock_invoke, 
mock_start_service,
-                                              mock_exists, mock_confirm, 
mock_rmfile):
-        mock_exists.return_value = True
-        bootstrap._context = mock.Mock(yes_to_all=True, 
no_overwrite_sshkey=True)
+    @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+    @mock.patch('crmsh.bootstrap.configure_local_ssh_key')
+    @mock.patch('crmsh.bootstrap.start_service')
+    def test_join_ssh(self, mock_start_service, mock_config_ssh, mock_swap, 
mock_invoke, mock_error):
+        bootstrap._context = mock.Mock(default_nic_list=["eth1"])
+        mock_invoke.return_value = False
 
-        bootstrap.init_ssh()
+        bootstrap.join_ssh("node1")
 
         mock_start_service.assert_called_once_with("sshd.service")
-        mock_invoke.assert_called_once_with("mkdir -m 700 -p /root/.ssh")
-        mock_exists.assert_called_once_with("/root/.ssh/id_rsa")
-        mock_confirm.assert_not_called()
-        mock_rmfile.assert_not_called()
+        mock_config_ssh.assert_called_once_with()
+        mock_swap.assert_called_once_with("node1")
+        mock_invoke.assert_called_once_with("ssh root@node1 crm cluster init 
-i eth1 ssh_remote")
+        mock_error.assert_called_once_with("Can't invoke crm cluster init -i 
eth1 ssh_remote on node1")
+
+    @mock.patch('crmsh.bootstrap.warn')
+    @mock.patch('crmsh.bootstrap.fetch_public_key_from_remote_node')
+    @mock.patch('crmsh.utils.check_ssh_passwd_need')
+    def test_swap_public_ssh_key_exception(self, mock_check_passwd, 
mock_fetch, mock_warn):
+        mock_check_passwd.return_value = False
+        mock_fetch.side_effect = ValueError("No key exist")
+
+        bootstrap.swap_public_ssh_key("node1")
+
+        mock_warn.assert_called_once_with(mock_fetch.side_effect)
+        mock_check_passwd.assert_called_once_with("node1")
+        mock_fetch.assert_called_once_with("node1")
+
+    @mock.patch('crmsh.bootstrap.append_unique')
+    @mock.patch('crmsh.bootstrap.fetch_public_key_from_remote_node')
+    @mock.patch('crmsh.bootstrap.append_to_remote_file')
+    @mock.patch('crmsh.bootstrap.status')
+    @mock.patch('crmsh.utils.check_ssh_passwd_need')
+    def test_swap_public_ssh_key(self, mock_check_passwd, mock_status, 
mock_append_remote, mock_fetch, mock_append_unique):
+        mock_check_passwd.return_value = True
+        mock_fetch.return_value = "file1"
+
+        bootstrap.swap_public_ssh_key("node1")
+
+        mock_check_passwd.assert_called_once_with("node1")
+        mock_status.assert_called_once_with("Configuring SSH passwordless with 
root@node1")
+        mock_append_remote.assert_called_once_with(bootstrap.RSA_PUBLIC_KEY, 
"node1", bootstrap.AUTHORIZED_KEYS_FILE)
+        mock_fetch.assert_called_once_with("node1")
+        mock_append_unique.assert_called_once_with("file1", 
bootstrap.AUTHORIZED_KEYS_FILE)
+
+    @mock.patch('crmsh.bootstrap.error')
+    @mock.patch('crmsh.utils.get_stdout_stderr')
+    def test_setup_passwordless_with_other_nodes_cluster_inactive(self, 
mock_run, mock_error):
+        mock_run.return_value = (1, None, None)
+        mock_error.side_effect = SystemExit
+
+        with self.assertRaises(SystemExit):
+            bootstrap.setup_passwordless_with_other_nodes("node1")
+
+        mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no 
root@node1 systemctl -q is-active pacemaker.service")
+        mock_error.assert_called_once_with("Cluster is inactive on node1")
+
+    @mock.patch('crmsh.bootstrap.error')
+    @mock.patch('crmsh.utils.get_stdout_stderr')
+    def test_setup_passwordless_with_other_nodes_failed_fetch_nodelist(self, 
mock_run, mock_error):
+        mock_run.side_effect = [(0, None, None), (1, None, None)]
+        mock_error.side_effect = SystemExit
+
+        with self.assertRaises(SystemExit):
+            bootstrap.setup_passwordless_with_other_nodes("node1")
+
+        mock_run.assert_has_calls([
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 systemctl -q 
is-active pacemaker.service"),
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node -l")
+            ])
+        mock_error.assert_called_once_with("Can't fetch cluster nodes list 
from node1: None")
+
+    @mock.patch('crmsh.bootstrap.error')
+    @mock.patch('crmsh.utils.get_stdout_stderr')
+    def test_setup_passwordless_with_other_nodes_failed_fetch_hostname(self, 
mock_run, mock_error):
+        out_node_list = """1 node1 member
+        2 node2 member"""
+        mock_run.side_effect = [
+                (0, None, None),
+                (0, out_node_list, None),
+                (1, None, None)
+                ]
+        mock_error.side_effect = SystemExit
+
+        with self.assertRaises(SystemExit):
+            bootstrap.setup_passwordless_with_other_nodes("node1")
+
+        mock_run.assert_has_calls([
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 systemctl -q 
is-active pacemaker.service"),
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node 
-l"),
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 hostname")
+            ])
+        mock_error.assert_called_once_with("Can't fetch hostname of node1: 
None")
+
+    @mock.patch('crmsh.bootstrap.swap_public_ssh_key')
+    @mock.patch('crmsh.utils.get_stdout_stderr')
+    def test_setup_passwordless_with_other_nodes(self, mock_run, mock_swap):
+        out_node_list = """1 node1 member
+        2 node2 member"""
+        mock_run.side_effect = [
+                (0, None, None),
+                (0, out_node_list, None),
+                (0, "node1", None)
+                ]
+
+        bootstrap.setup_passwordless_with_other_nodes("node1")
+
+        mock_run.assert_has_calls([
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 systemctl -q 
is-active pacemaker.service"),
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 crm_node 
-l"),
+            mock.call("ssh -o StrictHostKeyChecking=no root@node1 hostname")
+            ])
+        mock_swap.assert_called_once_with("node2")
 
     @mock.patch('builtins.open')
     @mock.patch('crmsh.bootstrap.append')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_corosync.py 
new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_corosync.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_corosync.py     
2020-06-22 03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_corosync.py     
2020-07-08 11:06:24.000000000 +0200
@@ -495,7 +495,7 @@
     def test_check_ssh_passwd_need(self, mock_ssh_passwd):
         mock_ssh_passwd.return_value = True
         self.assertTrue(self.qdevice_with_ip.check_ssh_passwd_need())
-        mock_ssh_passwd.assert_called_once_with(["10.10.10.123"])
+        mock_ssh_passwd.assert_called_once_with("10.10.10.123")
 
     @mock.patch("crmsh.parallax.parallax_call")
     def test_remote_running_cluster_false(self, mock_call):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_utils.py 
new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_utils.py
--- old/crmsh-4.2.0+git.1592790745.eaa14889/test/unittests/test_utils.py        
2020-06-22 03:52:25.000000000 +0200
+++ new/crmsh-4.2.0+git.1594199184.309141ea/test/unittests/test_utils.py        
2020-07-08 11:06:24.000000000 +0200
@@ -22,6 +22,30 @@
     imp.reload(utils)
 
 
+@mock.patch('os.path.exists')
+def test_check_file_content_included_target_not_exist(mock_exists):
+    mock_exists.side_effect = [True, False]
+    res = utils.check_file_content_included("file1", "file2")
+    assert res is False
+    mock_exists.assert_has_calls([mock.call("file1"), mock.call("file2")])
+
+
+@mock.patch("builtins.open")
+@mock.patch('os.path.exists')
+def test_check_file_content_included(mock_exists, mock_open_file):
+    mock_exists.side_effect = [True, True]
+    mock_open_file.side_effect = [
+            mock.mock_open(read_data="data1").return_value,
+            mock.mock_open(read_data="data2").return_value
+        ]
+
+    res = utils.check_file_content_included("file1", "file2")
+    assert res is False
+
+    mock_exists.assert_has_calls([mock.call("file1"), mock.call("file2")])
+    mock_open_file.assert_has_calls([mock.call("file2", 'r'), 
mock.call("file1", 'r')])
+
+
 @mock.patch('re.search')
 @mock.patch('crmsh.utils.get_stdout')
 def test_get_nodeid_from_name_run_None1(mock_get_stdout, mock_re_search):
@@ -59,24 +83,12 @@
     mock_re_search_inst.group.assert_called_once_with(1)
 
 
-def test_check_ssh_passwd_need_True():
-    with mock.patch('crmsh.utils.get_stdout_stderr') as mock_get_stdout_stderr:
-        mock_get_stdout_stderr.side_effect = [(0, None, None), (1, None, None)]
-        assert utils.check_ssh_passwd_need(["node1", "node2"]) == True
-    mock_get_stdout_stderr.assert_has_calls([
-            mock.call('ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15 -T -o Batchmode=yes node1 true'),
-            mock.call('ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15 -T -o Batchmode=yes node2 true')
-        ])
-
-
-def test_check_ssh_passwd_need_Flase():
-    with mock.patch('crmsh.utils.get_stdout_stderr') as mock_get_stdout_stderr:
-        mock_get_stdout_stderr.side_effect = [(0, None, None), (0, None, None)]
-        assert utils.check_ssh_passwd_need(["node1", "node2"]) == False
-    mock_get_stdout_stderr.assert_has_calls([
-            mock.call('ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15 -T -o Batchmode=yes node1 true'),
-            mock.call('ssh -o StrictHostKeyChecking=no -o EscapeChar=none -o 
ConnectTimeout=15 -T -o Batchmode=yes node2 true')
-        ])
+@mock.patch('crmsh.utils.get_stdout_stderr')
+def test_check_ssh_passwd_need(mock_run):
+    mock_run.return_value = (1, None, None)
+    res = utils.check_ssh_passwd_need("node1")
+    assert res is True
+    mock_run.assert_called_once_with("ssh -o StrictHostKeyChecking=no -o 
EscapeChar=none -o ConnectTimeout=15 -T -o Batchmode=yes node1 true")
 
 
 @mock.patch('crmsh.utils.common_debug')


Reply via email to