Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2023-03-27 18:18:02 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.31432 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Mon Mar 27 18:18:02 2023 rev:288 rq:1074728 version:4.5.0+20230327.c76ad5d5 Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2023-03-22 22:32:36.478786759 +0100 +++ /work/SRC/openSUSE:Factory/.crmsh.new.31432/crmsh.changes 2023-03-27 18:18:10.683707462 +0200 @@ -1,0 +2,34 @@ +Mon Mar 27 14:44:52 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230327.c76ad5d5: + * Fix: unittest: Adjust unit tests for previous changes + * Fix: utils: qdevice initialization should user_pair_for_ssh() to get appreciated users (crmsh#1157) + +------------------------------------------------------------------- +Mon Mar 27 09:44:27 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230327.3cbdd737: + * Dev: behave: Change docker image as liangxin1300/haleap:15.5 + * Fix: crm report: sustain if there are offline nodes (bsc#1209480) + +------------------------------------------------------------------- +Mon Mar 27 03:50:25 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230327.1d3873bd: + * Dev: behave: Add Scenario: Do upgrade job without root passwordless + * Dev: unittest: Add unit test case for previous changes + * Dev: upgradeutil: Change 'upgrade' terminology to 'configuration fix' + * Dev: behave: Reuse crmsh.utils.check_passwordless_between_nodes function + * Dev: utils: Check passwordless between cluster nodes + +------------------------------------------------------------------- +Thu Mar 23 14:46:37 UTC 2023 - xli...@suse.com + +- Update to version 4.5.0+20230323.9f3f56c1: + * Dev: behave: Adjust sbd functional test based on previous changes + * Dev: testcases: Adjust testcases after pacemaker upgrade + * Dev: workflows: split functional_test_resource_subcommand as two jobs + * Revert "Dev: workflows: Disable resource_failcount.feature temporarily" + * Dev: Dockerfile: Update pacemaker and libqb version + +------------------------------------------------------------------- Old: ---- crmsh-4.5.0+20230321.97bd51bb.tar.bz2 New: ---- crmsh-4.5.0+20230327.c76ad5d5.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.b4q7vi/_old 2023-03-27 18:18:11.179710079 +0200 +++ /var/tmp/diff_new_pack.b4q7vi/_new 2023-03-27 18:18:11.183710101 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 4.5.0+20230321.97bd51bb +Version: 4.5.0+20230327.c76ad5d5 Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.b4q7vi/_old 2023-03-27 18:18:11.227710332 +0200 +++ /var/tmp/diff_new_pack.b4q7vi/_new 2023-03-27 18:18:11.231710354 +0200 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">e751cc863b8189ee541fd84a72bf1584d163ffcf</param> + <param name="changesrevision">a108bd873f627434cea01de0805f0ebdc4b63b54</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-4.5.0+20230321.97bd51bb.tar.bz2 -> crmsh-4.5.0+20230327.c76ad5d5.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/.github/workflows/crmsh-ci.yml new/crmsh-4.5.0+20230327.c76ad5d5/.github/workflows/crmsh-ci.yml --- old/crmsh-4.5.0+20230321.97bd51bb/.github/workflows/crmsh-ci.yml 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/.github/workflows/crmsh-ci.yml 2023-03-27 16:00:29.000000000 +0200 @@ -122,7 +122,19 @@ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker.service index=`$GET_INDEX_OF bootstrap_options` - $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u + $DOCKER_SCRIPT $index + + functional_test_bootstrap_options_non_root: + runs-on: ubuntu-20.04 + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + - name: functional test for bootstrap options, under non root user + run: | + echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json + sudo systemctl restart docker.service + index=`$GET_INDEX_OF bootstrap_options` + $DOCKER_SCRIPT $index -u functional_test_qdevice_setup_remove: runs-on: ubuntu-20.04 @@ -170,7 +182,19 @@ echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker.service index=`$GET_INDEX_OF qdevice_validate` - $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u + $DOCKER_SCRIPT $index + + functional_test_qdevice_validate_non_root: + runs-on: ubuntu-20.04 + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + - name: functional test for qdevice validate, under non root user + run: | + echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json + sudo systemctl restart docker.service + index=`$GET_INDEX_OF qdevice_validate` + $DOCKER_SCRIPT $index -u functional_test_qdevice_user_case: runs-on: ubuntu-20.04 @@ -184,18 +208,42 @@ index=`$GET_INDEX_OF qdevice_usercase` $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u - functional_test_resource_subcommand: + functional_test_resource_failcount: runs-on: ubuntu-20.04 timeout-minutes: 40 steps: - uses: actions/checkout@v3 - - name: functional test for resource subcommand + - name: functional test for resource failcount run: | echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json sudo systemctl restart docker.service - index=`$GET_INDEX_OF resource_set` + index=`$GET_INDEX_OF resource_failcount` $DOCKER_SCRIPT $index && $DOCKER_SCRIPT -d && $DOCKER_SCRIPT $index -u + functional_test_resource_set: + runs-on: ubuntu-20.04 + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + - name: functional test for resource set + run: | + echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json + sudo systemctl restart docker.service + index=`$GET_INDEX_OF resource_set` + $DOCKER_SCRIPT $index + + functional_test_resource_set_non_root: + runs-on: ubuntu-20.04 + timeout-minutes: 40 + steps: + - uses: actions/checkout@v3 + - name: functional test for resource set, under non root user + run: | + echo '{ "exec-opts": ["native.cgroupdriver=systemd"] }' | sudo tee /etc/docker/daemon.json + sudo systemctl restart docker.service + index=`$GET_INDEX_OF resource_set` + $DOCKER_SCRIPT $index -u + functional_test_configure_sublevel: runs-on: ubuntu-20.04 timeout-minutes: 40 @@ -284,12 +332,16 @@ functional_test_bootstrap_common, functional_test_bootstrap_common_non_root, functional_test_bootstrap_options, + functional_test_bootstrap_options_non_root, functional_test_qdevice_setup_remove, functional_test_qdevice_setup_remove_non_root, functional_test_qdevice_options, functional_test_qdevice_validate, + functional_test_qdevice_validate_non_root, functional_test_qdevice_user_case, - functional_test_resource_subcommand, + functional_test_resource_failcount, + functional_test_resource_set, + functional_test_resource_set_non_root, functional_test_configure_sublevel, functional_test_constraints_bugs, functional_test_geo_cluster, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/Dockerfile new/crmsh-4.5.0+20230327.c76ad5d5/Dockerfile --- old/crmsh-4.5.0+20230321.97bd51bb/Dockerfile 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/Dockerfile 2023-03-27 16:00:29.000000000 +0200 @@ -5,8 +5,8 @@ ARG ssh_pub_key # docker build -t haleap --build-arg ssh_prv_key="$(cat /root/.ssh/id_rsa)" --build-arg ssh_pub_key="$(cat /root/.ssh/id_rsa.pub)" . # docker login -# docker tag haleap liangxin1300/haleap:15.4 -# docker push liangxin1300/haleap:15.4 +# docker tag haleap liangxin1300/haleap:15.5 +# docker push liangxin1300/haleap:15.5 RUN zypper ref RUN zypper -n install systemd @@ -16,7 +16,7 @@ RUN zypper --non-interactive up zypper RUN zypper ar -f -G https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15_SP4 repo_nhf RUN zypper --non-interactive refresh -RUN zypper --non-interactive up --allow-vendor-change -y python3-parallax resource-agents +RUN zypper --non-interactive up --allow-vendor-change -y python3-parallax resource-agents libqb100 pacemaker RUN mkdir -p /var/log/crmsh RUN mkdir -p /root/.ssh && chmod 0700 /root/.ssh diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/bootstrap.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/bootstrap.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/bootstrap.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/bootstrap.py 2023-03-27 16:00:29.000000000 +0200 @@ -1630,10 +1630,26 @@ utils.fatal("corosync-qdevice.service is not available on {}".format(node)) qdevice_inst = _context.qdevice_inst qnetd_addr = qdevice_inst.qnetd_addr - ssh_user = qdevice_inst.ssh_user if qdevice_inst.ssh_user is not None else _context.current_user + local_user = None + ssh_user = None + if qdevice_inst.ssh_user is not None: + # if the remote user is specified explicitly, use it + ssh_user = qdevice_inst.ssh_user + else: + try: + # if ssh session has ready been available, use that + local_user, ssh_user = utils.UserOfHost.instance().user_pair_for_ssh(qnetd_addr) + except utils.UserOfHost.UserNotFoundError: + pass + if local_user is None: + local_user = userdir.get_sudoer() + if local_user is None: + local_user = userdir.getuser() + if ssh_user is None: + ssh_user = local_user # Configure ssh passwordless to qnetd if detect password is needed - local_user = utils.user_of(utils.this_node()) if utils.check_ssh_passwd_need(local_user, ssh_user, qnetd_addr): + configure_ssh_key(local_user) utils.ssh_copy_id(local_user, ssh_user, qnetd_addr) user_by_host = utils.HostUserConfig() user_by_host.add(ssh_user, qnetd_addr) @@ -2463,7 +2479,7 @@ else: utils.fatal("Cluster is inactive on {}".format(cluster_node)) - lock_inst = lock.RemoteLock(remote_user, cluster_node) + lock_inst = lock.RemoteLock(cluster_node) try: with lock_inst.lock(): _context.node_list_in_cluster = utils.fetch_cluster_node_list_from_node(cluster_node) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/corosync.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/corosync.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/corosync.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/corosync.py 2023-03-27 16:00:29.000000000 +0200 @@ -8,6 +8,7 @@ import os import re import socket + from . import utils from . import tmpfiles from . import parallax @@ -97,6 +98,7 @@ """ Query qnetd status """ + import crmsh.bootstrap # workaround for circular dependencies if not utils.is_qdevice_configured(): raise ValueError("QDevice/QNetd not configured!") cluster_name = get_value('totem.cluster_name') @@ -109,6 +111,7 @@ # Configure ssh passwordless to qnetd if detect password is needed local_user, remote_user = utils.user_pair_for_ssh(qnetd_addr) if utils.check_ssh_passwd_need(local_user, remote_user, qnetd_addr): + crmsh.bootstrap.configure_ssh_key(local_user) utils.ssh_copy_id(local_user, remote_user, qnetd_addr) cmd = "corosync-qnetd-tool -lv -c {}".format(cluster_name) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/healthcheck.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/healthcheck.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/healthcheck.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/healthcheck.py 2023-03-27 16:00:29.000000000 +0200 @@ -115,6 +115,9 @@ SSH_DIR = os.path.expanduser('~hacluster/.ssh') KEY_TYPES = ['ed25519', 'ecdsa', 'rsa'] + def __str__(self): + return "Configure Passwordless for hacluster" + def check_quick(self) -> bool: for key_type in self.KEY_TYPES: try: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/lock.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/lock.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/lock.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/lock.py 2023-03-27 16:00:29.000000000 +0200 @@ -100,11 +100,10 @@ MIN_LOCK_TIMEOUT = 120 WAIT_INTERVAL = 10 - def __init__(self, remote_user, remote_node, for_join=True, lock_dir=None, wait=True, no_warn=False): + def __init__(self, remote_node, for_join=True, lock_dir=None, wait=True, no_warn=False): """ Init function """ - self.remote_user = remote_user self.remote_node = remote_node self.for_join = for_join self.wait = wait diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/qdevice.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/qdevice.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/qdevice.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/qdevice.py 2023-03-27 16:00:29.000000000 +0200 @@ -69,8 +69,7 @@ def wrapper(*args, **kwargs): cluster_name = args[0].cluster_name lock_dir = "/run/.crmsh_qdevice_lock_for_{}".format(cluster_name) - lock_inst = lock.RemoteLock(utils.user_of(args[0].qnetd_addr), args[0].qnetd_addr - , for_join=False, lock_dir=lock_dir, wait=False) + lock_inst = lock.RemoteLock(args[0].qnetd_addr, for_join=False, lock_dir=lock_dir, wait=False) try: with lock_inst.lock(): func(*args, **kwargs) @@ -87,8 +86,7 @@ """ @functools.wraps(func) def wrapper(*args, **kwargs): - lock_inst = lock.RemoteLock(utils.user_of(args[0].qnetd_addr) - , args[0].qnetd_addr, for_join=False, no_warn=True) + lock_inst = lock.RemoteLock(args[0].qnetd_addr, for_join=False, no_warn=True) try: with lock_inst.lock(): func(*args, **kwargs) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/report/utillib.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/report/utillib.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/report/utillib.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/report/utillib.py 2023-03-27 16:00:29.000000000 +0200 @@ -1375,6 +1375,9 @@ if err: print(err) + if out == '': # if we couldn't get anything + return + compress_data = "" for data in out.split('\n'): if data.startswith(constants.COMPRESS_DATA_FLAG): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/upgradeutil.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/upgradeutil.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/upgradeutil.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/upgradeutil.py 2023-03-27 16:00:29.000000000 +0200 @@ -109,11 +109,11 @@ for feature_class in VERSION_FEATURES[key]: feature = feature_class() if crmsh.healthcheck.feature_full_check(feature, nodes): - logger.debug("upgradeutil: feature %s is already functional.") + logger.debug("upgradeutil: feature '%s' is already functional.", str(feature)) else: - logger.debug("upgradeutil: fixing feature %s...") + logger.debug("upgradeutil: fixing feature '%s'...", str(feature)) crmsh.healthcheck.feature_fix(feature, nodes, ask) - logger.debug("upgradeutil: upgrade succeeded.") + logger.debug("upgradeutil: configuration fix succeeded.") except crmsh.healthcheck.AskDeniedByUser: raise _SkipUpgrade() from None @@ -124,11 +124,11 @@ if not crmsh.utils.can_ask(background_wait=False): return nodes = crmsh.utils.list_cluster_nodes(no_reg=True) - if nodes and _is_upgrade_needed(nodes): - logger.debug("upgradeutil: configuration upgrade needed") + if nodes and _is_upgrade_needed(nodes) and not crmsh.utils.check_passwordless_between_nodes(nodes): + logger.debug("upgradeutil: configuration fix needed") try: if not _is_cluster_target_seq_consistent(nodes): - logger.warning("crmsh version is inconsistent in cluster.") + logger.warning("crmsh configuration is inconsistent in cluster.") raise _SkipUpgrade() seq = _get_minimal_seq_in_cluster(nodes) logger.debug( @@ -137,7 +137,7 @@ ) _upgrade(nodes, seq) except _SkipUpgrade: - logger.debug("upgradeutil: upgrade skipped") + logger.debug("upgradeutil: configuration fix skipped") return # TODO: replace with parallax_copy when it is ready for node in nodes: @@ -150,7 +150,7 @@ node, ) crmsh.parallax.parallax_call(nodes, 'rm -f {}'.format(FORCE_UPGRADE_FILE_PATH)) - logger.debug("upgrade finished") + logger.debug("configuration fix finished") def force_set_local_upgrade_seq(): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/crmsh/utils.py new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/utils.py --- old/crmsh-4.5.0+20230321.97bd51bb/crmsh/utils.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/crmsh/utils.py 2023-03-27 16:00:29.000000000 +0200 @@ -112,6 +112,10 @@ class UserNotFoundError(Exception): pass + @staticmethod + def instance(): + return _user_of_host_instance + def __init__(self): self._user_cache = dict() self._user_pair_cache = dict() @@ -130,6 +134,8 @@ def user_pair_for_ssh(self, host: str) -> typing.Tuple[str, str]: """Return (local_user, remote_user) pair for ssh connection""" + local_user = None + remote_user = None try: local_user = self.user_of(this_node()) remote_user = self.user_of(host) @@ -137,12 +143,17 @@ except self.UserNotFoundError: cached = self._user_pair_cache.get(host) if cached is None: - ret = self._guess_user_for_ssh(host) - if ret is None: - raise ValueError('Can not create ssh session from {} to {}.'.format(this_node(), host)) - else: + if local_user is not None: + ret = local_user, local_user self._user_pair_cache[host] = ret return ret + else: + ret = self._guess_user_for_ssh(host) + if ret is None: + raise self.UserNotFoundError + else: + self._user_pair_cache[host] = ret + return ret else: return cached @@ -174,7 +185,7 @@ def _guess_user_for_ssh(host: str) -> typing.Tuple[str, str]: args = ['ssh'] args.extend(constants.SSH_OPTION_ARGS) - args.extend(['-o', 'BatchMode=yes', host, 'true']) + args.extend(['-o', 'BatchMode=yes', host, 'sudo', 'true']) rc = subprocess.call( args, stdin=subprocess.DEVNULL, @@ -189,7 +200,7 @@ return None result = su_subprocess_run( sudoer, - 'ssh {} -o BatchMode=yes {}@{} sudo -u {} true'.format(constants.SSH_OPTION, sudoer, host, sudoer), + 'ssh {} -o BatchMode=yes {}@{} sudo true'.format(constants.SSH_OPTION, sudoer, host), stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, @@ -208,7 +219,10 @@ def user_pair_for_ssh(host): - return _user_of_host_instance.user_pair_for_ssh(host) + try: + return _user_of_host_instance.user_pair_for_ssh(host) + except UserOfHost.UserNotFoundError: + raise ValueError('Can not create ssh session from {} to {}.'.format(this_node(), host)) def ssh_copy_id(local_user, remote_user, remote_node): @@ -2368,6 +2382,39 @@ return rc != 0 +def check_passwordless_between_nodes(node_list, user='root'): + """ + Check passwordless between cluster nodes + Suppose each node has the same user + Return a list of hosts that require passwords between + """ + need_pw_pair_list = [] + me = this_node() + # check local node between remote node + for node in node_list: + if node == me: + continue + if check_ssh_passwd_need(user, user, node): + need_pw_pair_list.append((me, node)) + + if not need_pw_pair_list: + for node in node_list: + for n in node_list: + if node == n or node == me: + continue + if user == 'root': + cmd = f"ssh {node} \"ssh {SSH_OPTION} -T -o Batchmode=yes {user}@{n} true\"" + else: + cmd = f"ssh {node} \"su - {user} -c 'ssh {SSH_OPTION} -T -o Batchmode=yes {user}@{n} true'\"" + rc, _, _ = get_stdout_stderr(cmd) + if rc != 0: + need_pw_pair_list.append((node, n)) + + for m, n in need_pw_pair_list: + logger.debug("There is no passwordless configured from %s to %s under '%s'", m, n, user) + return need_pw_pair_list + + def check_port_open(ip, port): import socket diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/features/bootstrap_bugs.feature new/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_bugs.feature --- old/crmsh-4.5.0+20230321.97bd51bb/test/features/bootstrap_bugs.feature 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_bugs.feature 2023-03-27 16:00:29.000000000 +0200 @@ -155,3 +155,17 @@ When Run "su xin -c "sudo crm cluster run 'touch /tmp/1209193'"" on "hanode1" And Run "test -f /tmp/1209193" on "hanode1" And Run "test -f /tmp/1209193" on "hanode2" + + @clean + @skip_non_root + Scenario: Do upgrade job without root passwordless + Given Cluster service is "stopped" on "hanode1" + And Cluster service is "stopped" on "hanode2" + When Run "crm cluster init -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + When Run "rm -f /var/lib/crmsh/upgrade_seq" on "hanode1" + And Run "rm -f /root/.config/crm/crm.conf" on "hanode1" + And Run "rm -rf /root/.ssh" on "hanode1" + And Run "crm status" on "hanode1" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/features/bootstrap_sbd_normal.feature new/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_sbd_normal.feature --- old/crmsh-4.5.0+20230321.97bd51bb/test/features/bootstrap_sbd_normal.feature 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/features/bootstrap_sbd_normal.feature 2023-03-27 16:00:29.000000000 +0200 @@ -41,15 +41,15 @@ Given Cluster service is "stopped" on "hanode1" Given Cluster service is "stopped" on "hanode2" When Run "crm cluster init -S -y" on "hanode1" - Then Expected "Diskless SBD requires cluster with three or more nodes." in stdout + Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr Then Cluster service is "started" on "hanode1" And Service "sbd" is "started" on "hanode1" When Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Expected "Diskless SBD requires cluster with three or more nodes." in stdout + Then Expected "Diskless SBD requires cluster with three or more nodes." in stderr Then Cluster service is "started" on "hanode2" And Service "sbd" is "started" on "hanode2" When Run "crm cluster join -c hanode1 -y" on "hanode3" - Then Expected "Diskless SBD requires cluster with three or more nodes." not in stdout + Then Expected "Diskless SBD requires cluster with three or more nodes." not in stderr Then Cluster service is "started" on "hanode3" And Service "sbd" is "started" on "hanode3" And Resource "stonith:external/sbd" not configured @@ -133,7 +133,7 @@ And Online nodes are "hanode1 hanode2" When Run "crm configure primitive d Dummy op monitor interval=3s" on "hanode1" When Run "crm cluster init sbd -s /dev/sda1 -y" on "hanode1" - Then Expected "WARNING: To start sbd.service, need to restart cluster service manually on each node" in stdout + Then Expected "WARNING: To start sbd.service, need to restart cluster service manually on each node" in stderr Then Service "sbd" is "stopped" on "hanode1" And Service "sbd" is "stopped" on "hanode2" When Run "crm cluster restart" on "hanode1" @@ -173,7 +173,27 @@ Then Cluster service is "started" on "hanode2" And Service "sbd" is "started" on "hanode2" When Run "stonith_admin -H hanode2 -c" on "hanode1" - When Run "su hacluster -c 'crm -F node fence hanode2'" on "hanode1" + When Run "crm -F node fence hanode2" on "hanode1" + Then Expected return code is "0" + Then Node "hanode2" is UNCLEAN + Then Wait "60" seconds for "hanode2" successfully fenced + + @clean + @skip_non_root + Scenario: Setup sbd and test fence node, use hacluster to fence + Given Has disk "/dev/sda1" on "hanode1" + Given Cluster service is "stopped" on "hanode1" + Given Has disk "/dev/sda1" on "hanode2" + Given Cluster service is "stopped" on "hanode2" + When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" + Then Cluster service is "started" on "hanode1" + And Service "sbd" is "started" on "hanode1" + And Resource "stonith-sbd" type "external/sbd" is "Started" + When Run "crm cluster join -c hanode1 -y" on "hanode2" + Then Cluster service is "started" on "hanode2" + And Service "sbd" is "started" on "hanode2" + When Run "stonith_admin -H hanode2 -c" on "hanode1" + When Run "su hacluster -c '/usr/sbin/crm -F node fence hanode2'" on "hanode1" Then Expected return code is "0" Then Node "hanode2" is UNCLEAN Then Wait "60" seconds for "hanode2" successfully fenced diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/features/steps/step_implementation.py new/crmsh-4.5.0+20230327.c76ad5d5/test/features/steps/step_implementation.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/features/steps/step_implementation.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/features/steps/step_implementation.py 2023-03-27 16:00:29.000000000 +0200 @@ -483,17 +483,11 @@ def step_impl(context, nodelist): if userdir.getuser() != 'root': return True - ssh_option = "-o StrictHostKeyChecking=no -T -o Batchmode=yes" - node_list = nodelist.split() - for node in node_list: - for n in node_list: - if node == n: - continue - cmd = f"su - hacluster -c 'ssh {ssh_option} hacluster@{n} true'" - if node != me(): - cmd = f"ssh {node} \"{cmd}\"" - context.logger.info(f"\nRun cmd: {cmd}") - run_command(context, cmd) + need_pw_list = crmutils.check_passwordless_between_nodes(nodelist.split(), 'hacluster') + for m, n in need_pw_list: + context.logger.error(f"There is no passwordless configured from {m} to {n} under 'hacluster'") + assert need_pw_list == [] + @then('Check user shell for hacluster between "{nodelist}"') def step_impl(context, nodelist): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/run-functional-tests new/crmsh-4.5.0+20230327.c76ad5d5/test/run-functional-tests --- old/crmsh-4.5.0+20230321.97bd51bb/test/run-functional-tests 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/run-functional-tests 2023-03-27 16:00:29.000000000 +0200 @@ -1,5 +1,5 @@ #!/bin/bash -DOCKER_IMAGE=${DOCKER_IMAGE:-"liangxin1300/haleap:15.4"} +DOCKER_IMAGE=${DOCKER_IMAGE:-"liangxin1300/haleap:15.5"} PROJECT_PATH=$(dirname $(dirname `realpath $0`)) PROJECT_INSIDE="/opt/crmsh" DOCKER_SERVICE="docker.service" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/node.exp new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/node.exp --- old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/node.exp 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/node.exp 2023-03-27 16:00:29.000000000 +0200 @@ -163,7 +163,6 @@ .TRY node attribute node1 delete a1 .EXT crm_attribute -D -t nodes -N 'node1' -n 'a1' Deleted nodes attribute: id=nodes-node1-a1 name=a1 - .INP: configure .INP: _regtest on .INP: show xml node1 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/ra.exp new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/ra.exp --- old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/ra.exp 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/ra.exp 2023-03-27 16:00:29.000000000 +0200 @@ -68,96 +68,76 @@ Viva la Vida Loca! pcmk_host_argument (string, [port]): Advanced use only: An alternate parameter to supply instead of 'port' - Some devices do not support the standard 'port' parameter or may provide additional ones. - Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. - A value of 'none' can be used to tell the cluster not to supply any additional parameters. + some devices do not support the standard 'port' parameter or may provide additional ones. Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced. A value of none can be used to tell the cluster not to supply any additional parameters. pcmk_host_map (string): A mapping of host names to ports numbers for devices that do not support host names. Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2 -pcmk_host_list (string): A list of machines controlled by this device (Optional unless pcmk_host_check=static-list). - Eg. node1,node2,node3 +pcmk_host_list (string): Eg. node1,node2,node3 + A list of machines controlled by this device (Optional unless pcmk_host_list=static-list) pcmk_host_check (string, [dynamic-list]): How to determine which machines are controlled by the device. Allowed values: dynamic-list (query the device via the 'list' command), static-list (check the pcmk_host_list attribute), status (query the device via the 'status' command), none (assume every device can fence every machine) -pcmk_delay_max (time, [0s]): Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. - This prevents double fencing when using slow devices such as sbd. - Use this to enable a random delay for fencing actions. - The overall delay is derived from this random delay value adding a static delay so that the sum is kept below the maximum delay. - -pcmk_delay_base (time, [0s]): Enable a base delay for fencing actions and specify base delay value. - This prevents double fencing when different delays are configured on the nodes. - Use this to enable a static delay for fencing actions. - The overall delay is derived from a random delay value adding this static delay so that the sum is kept below the maximum delay. - Set to eg. node1:1s;node2:5 to set different value per node. +pcmk_delay_max (time, [0s]): Enable a base delay for fencing actions and specify base delay value. + Enable a delay of no more than the time specified before executing fencing actions. Pacemaker derives the overall delay by taking the value of pcmk_delay_base and adding a random delay value such that the sum is kept below this maximum. + +pcmk_delay_base (string, [0s]): Enable a base delay for fencing actions and specify base delay value. + This enables a static delay for fencing actions, which can help avoid "death matches" where two nodes try to fence each other at the same time. If pcmk_delay_max is also used, a random delay will be added such that the total delay is kept below that value.This can be set to a single time value to apply to any node targeted by this device (useful if a separate device is configured for each target), or to a node map (for example, "node1:1s;node2:5") to set a different value per target. pcmk_action_limit (integer, [1]): The maximum number of actions can be performed in parallel on this device - Cluster property concurrent-fencing=true needs to be configured first. - Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited. + Cluster property concurrent-fencing=true needs to be configured first.Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited. pcmk_reboot_action (string, [reboot]): Advanced use only: An alternate command to run instead of 'reboot' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'reboot' action. + Some devices do not support the standard commands or may provide additional ones.\nUse this to specify an alternate, device-specific, command that implements the 'reboot' action. pcmk_reboot_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'reboot' actions. + Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'reboot' actions. pcmk_reboot_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'reboot' actions before giving up. pcmk_off_action (string, [off]): Advanced use only: An alternate command to run instead of 'off' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'off' action. + Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'off' action. pcmk_off_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'off' actions. + Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'off' actions. pcmk_off_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'off' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'off' actions before giving up. pcmk_on_action (string, [on]): Advanced use only: An alternate command to run instead of 'on' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'on' action. + Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'on' action. pcmk_on_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'on' actions. + Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'on' actions. pcmk_on_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'on' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'on' actions before giving up. pcmk_list_action (string, [list]): Advanced use only: An alternate command to run instead of 'list' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'list' action. + Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'list' action. pcmk_list_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'list' actions. + Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'list' actions. pcmk_list_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'list' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'list' actions before giving up. pcmk_monitor_action (string, [monitor]): Advanced use only: An alternate command to run instead of 'monitor' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'monitor' action. + Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'monitor' action. pcmk_monitor_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'monitor' actions. + Some devices need much more/less time to complete than normal.\nUse this to specify an alternate, device-specific, timeout for 'monitor' actions. pcmk_monitor_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'monitor' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'monitor' actions before giving up. pcmk_status_action (string, [status]): Advanced use only: An alternate command to run instead of 'status' - Some devices do not support the standard commands or may provide additional ones. - Use this to specify an alternate, device-specific, command that implements the 'status' action. + Some devices do not support the standard commands or may provide additional ones.Use this to specify an alternate, device-specific, command that implements the 'status' action. pcmk_status_timeout (time, [60s]): Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout - Some devices need much more/less time to complete than normal. - Use this to specify an alternate, device-specific, timeout for 'status' actions. + Some devices need much more/less time to complete than normal.Use this to specify an alternate, device-specific, timeout for 'status' actions. pcmk_status_retries (integer, [2]): Advanced use only: The maximum number of times to retry the 'status' command within the timeout period Some devices do not support multiple connections. Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation, if there is time remaining. Use this option to alter the number of times Pacemaker retries 'status' actions before giving up. diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/resource.exp new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/resource.exp --- old/crmsh-4.5.0+20230321.97bd51bb/test/testcases/resource.exp 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/testcases/resource.exp 2023-03-27 16:00:29.000000000 +0200 @@ -1176,24 +1176,24 @@ Stopped: [ node1 ] Original: node1 capacity: -pcmk__native_allocate: st allocation score on node1: 0 +pcmk__primitive_assign: st allocation score on node1: 0 pcmk__clone_allocate: c1 allocation score on node1: 0 pcmk__clone_allocate: p1:0 allocation score on node1: 0 -pcmk__native_allocate: p1:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: p1:0 allocation score on node1: -INFINITY pcmk__clone_allocate: m1 allocation score on node1: 0 pcmk__clone_allocate: p2:0 allocation score on node1: 0 -pcmk__native_allocate: p2:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: p2:0 allocation score on node1: -INFINITY p2:0 promotion score on none: 0 -pcmk__native_allocate: p3 allocation score on node1: -INFINITY +pcmk__primitive_assign: p3 allocation score on node1: -INFINITY pcmk__clone_allocate: msg allocation score on node1: 0 pcmk__clone_allocate: g:0 allocation score on node1: 0 pcmk__clone_allocate: p0:0 allocation score on node1: 0 pcmk__clone_allocate: p4:0 allocation score on node1: 0 -pcmk__group_allocate: g:0 allocation score on node1: -INFINITY -pcmk__group_allocate: p0:0 allocation score on node1: -INFINITY -pcmk__group_allocate: p4:0 allocation score on node1: -INFINITY -pcmk__native_allocate: p0:0 allocation score on node1: -INFINITY -pcmk__native_allocate: p4:0 allocation score on node1: -INFINITY +pcmk__group_assign: g:0 allocation score on node1: -INFINITY +pcmk__group_assign: p0:0 allocation score on node1: -INFINITY +pcmk__group_assign: p4:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: p0:0 allocation score on node1: -INFINITY +pcmk__primitive_assign: p4:0 allocation score on node1: -INFINITY g:0 promotion score on none: 0 Remaining: node1 capacity: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_bootstrap.py new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_bootstrap.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_bootstrap.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_bootstrap.py 2023-03-27 16:00:29.000000000 +0200 @@ -798,22 +798,24 @@ mock_disable.assert_called_once_with("corosync-qdevice.service") @mock.patch('crmsh.utils.HostUserConfig') - @mock.patch('crmsh.utils.user_of') + @mock.patch('crmsh.utils.UserOfHost.instance') @mock.patch('crmsh.utils.list_cluster_nodes') @mock.patch('crmsh.utils.ssh_copy_id') + @mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch('crmsh.utils.check_ssh_passwd_need') @mock.patch('logging.Logger.info') def test_init_qdevice_copy_ssh_key_failed( self, mock_status, mock_check_ssh_passwd_need, - mock_ssh_copy_id, mock_list_nodes, mock_userof, + mock_configure_ssh_key, mock_ssh_copy_id, mock_list_nodes, mock_user_of_host, mock_host_user_config_class, ): mock_list_nodes.return_value = [] bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob", user_list=["alice"]) mock_check_ssh_passwd_need.return_value = True mock_ssh_copy_id.side_effect = ValueError('foo') - mock_userof.return_value = "bob" + mock_user_of_host.return_value = mock.MagicMock(crmsh.utils.UserOfHost) + mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob" with self.assertRaises(ValueError): bootstrap.init_qdevice() @@ -822,25 +824,28 @@ mock.call("Configure Qdevice/Qnetd:"), ]) mock_check_ssh_passwd_need.assert_called_once_with("bob", "bob", "10.10.10.123") + mock_configure_ssh_key.assert_called_once_with('bob') mock_ssh_copy_id.assert_called_once_with('bob', 'bob', '10.10.10.123') @mock.patch('crmsh.utils.HostUserConfig') - @mock.patch('crmsh.utils.user_of') + @mock.patch('crmsh.utils.UserOfHost.instance') @mock.patch('crmsh.utils.list_cluster_nodes') @mock.patch('crmsh.bootstrap.confirm') @mock.patch('crmsh.utils.is_qdevice_configured') + @mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch('crmsh.utils.check_ssh_passwd_need') @mock.patch('logging.Logger.info') def test_init_qdevice_already_configured( self, - mock_status, mock_ssh, - mock_qdevice_configured, mock_confirm, mock_list_nodes, mock_userof, + mock_status, mock_ssh, mock_configure_ssh_key, + mock_qdevice_configured, mock_confirm, mock_list_nodes, mock_user_of_host, mock_host_user_config_class, ): mock_list_nodes.return_value = [] bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob", user_list=["alice"]) mock_ssh.return_value = False - mock_userof.return_value = "bob" + mock_user_of_host.return_value = mock.MagicMock(crmsh.utils.UserOfHost) + mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob" mock_qdevice_configured.return_value = True mock_confirm.return_value = False self.qdevice_with_ip.start_qdevice_service = mock.Mock() @@ -849,25 +854,28 @@ mock_status.assert_called_once_with("Configure Qdevice/Qnetd:") mock_ssh.assert_called_once_with("bob", "bob", "10.10.10.123") + mock_configure_ssh_key.assert_not_called() mock_host_user_config_class.return_value.save_remote.assert_called_once_with(mock_list_nodes.return_value) mock_qdevice_configured.assert_called_once_with() mock_confirm.assert_called_once_with("Qdevice is already configured - overwrite?") self.qdevice_with_ip.start_qdevice_service.assert_called_once_with() @mock.patch('crmsh.utils.HostUserConfig') - @mock.patch('crmsh.utils.user_of') + @mock.patch('crmsh.utils.UserOfHost.instance') @mock.patch('crmsh.bootstrap.adjust_priority_fencing_delay') @mock.patch('crmsh.bootstrap.adjust_priority_in_rsc_defaults') @mock.patch('crmsh.utils.list_cluster_nodes') @mock.patch('crmsh.utils.is_qdevice_configured') + @mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch('crmsh.utils.check_ssh_passwd_need') @mock.patch('logging.Logger.info') - def test_init_qdevice(self, mock_info, mock_ssh, mock_qdevice_configured, mock_list_nodes, - mock_adjust_priority, mock_adjust_fence_delay, mock_userof, mock_host_user_config_class): + def test_init_qdevice(self, mock_info, mock_ssh, mock_configure_ssh_key, mock_qdevice_configured, mock_list_nodes, + mock_adjust_priority, mock_adjust_fence_delay, mock_user_of_host, mock_host_user_config_class): bootstrap._context = mock.Mock(qdevice_inst=self.qdevice_with_ip, current_user="bob", user_list=["alice"]) mock_list_nodes.return_value = [] mock_ssh.return_value = False - mock_userof.return_value = "bob" + mock_user_of_host.return_value = mock.MagicMock(crmsh.utils.UserOfHost) + mock_user_of_host.return_value.user_pair_for_ssh.return_value = "bob", "bob" mock_qdevice_configured.return_value = False self.qdevice_with_ip.set_cluster_name = mock.Mock() self.qdevice_with_ip.valid_qnetd = mock.Mock() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_corosync.py new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_corosync.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_corosync.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_corosync.py 2023-03-27 16:00:29.000000000 +0200 @@ -180,11 +180,12 @@ @mock.patch('crmsh.utils.user_pair_for_ssh') @mock.patch("crmsh.parallax.parallax_call") @mock.patch("crmsh.utils.ssh_copy_id") +@mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch("crmsh.utils.check_ssh_passwd_need") @mock.patch("crmsh.corosync.get_value") @mock.patch("crmsh.utils.is_qdevice_configured") def test_query_qnetd_status_copy_id_failed(mock_qdevice_configured, - mock_get_value, mock_check_passwd, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh): + mock_get_value, mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_user_pair_for_ssh): mock_user_pair_for_ssh.return_value = "alice", "root" mock_parallax_call.side_effect = ValueError("Failed on 10.10.10.123: foo") mock_qdevice_configured.return_value = True @@ -199,6 +200,7 @@ mock.call("quorum.device.net.host") ]) mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123") + mock_config_ssh_key.assert_called_once_with('alice') mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123') @@ -206,11 +208,12 @@ @mock.patch("crmsh.utils.print_cluster_nodes") @mock.patch("crmsh.parallax.parallax_call") @mock.patch("crmsh.utils.ssh_copy_id") +@mock.patch('crmsh.bootstrap.configure_ssh_key') @mock.patch("crmsh.utils.check_ssh_passwd_need") @mock.patch("crmsh.corosync.get_value") @mock.patch("crmsh.utils.is_qdevice_configured") def test_query_qnetd_status_copy(mock_qdevice_configured, mock_get_value, - mock_check_passwd, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes, + mock_check_passwd, mock_config_ssh_key, mock_ssh_copy_id, mock_parallax_call, mock_print_nodes, mock_user_pair_for_ssh): mock_user_pair_for_ssh.return_value = "alice", "root" mock_qdevice_configured.return_value = True @@ -226,6 +229,7 @@ mock.call("quorum.device.net.host") ]) mock_check_passwd.assert_called_once_with("alice", "root", "10.10.10.123") + mock_config_ssh_key.assert_called_once_with('alice') mock_ssh_copy_id.assert_called_once_with('alice', 'root', '10.10.10.123') mock_parallax_call.assert_called_once_with(["10.10.10.123"], "corosync-qnetd-tool -lv -c hacluster") mock_print_nodes.assert_called_once_with() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_lock.py new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_lock.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_lock.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_lock.py 2023-03-27 16:00:29.000000000 +0200 @@ -118,8 +118,8 @@ """ Test setUp. """ - self.lock_inst = lock.RemoteLock("alice", "node1") - self.lock_inst_no_wait = lock.RemoteLock("alice", "node1", wait=False) + self.lock_inst = lock.RemoteLock("node1") + self.lock_inst_no_wait = lock.RemoteLock("node1", wait=False) def tearDown(self): """ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_qdevice.py new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_qdevice.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_qdevice.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_qdevice.py 2023-03-27 16:00:29.000000000 +0200 @@ -67,66 +67,55 @@ @mock.patch('crmsh.lock.RemoteLock') -@mock.patch('crmsh.utils.user_of') -def test_qnetd_lock_for_same_cluster_name(mock_userof, mock_remote_lock): +def test_qnetd_lock_for_same_cluster_name(mock_remote_lock): _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1") remote_lock_inst = mock.Mock() mock_remote_lock.return_value = remote_lock_inst - mock_userof.return_value = "alice" remote_lock_inst.lock.return_value.__enter__ = mock.Mock() remote_lock_inst.lock.return_value.__exit__ = mock.Mock() @qdevice.qnetd_lock_for_same_cluster_name def decorated(ctx): return decorated(_context) - mock_userof.assert_called_once_with("qnetd-node") - mock_remote_lock.assert_called_once_with("alice", "qnetd-node", for_join=False, + mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False) @mock.patch('crmsh.utils.fatal') @mock.patch('crmsh.lock.RemoteLock') -@mock.patch('crmsh.utils.user_of') -def test_qnetd_lock_for_same_cluster_name_claim_error(mock_userof, mock_remote_lock, mock_fatal): +def test_qnetd_lock_for_same_cluster_name_claim_error(mock_remote_lock, mock_fatal): _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1") remote_lock_inst = mock.Mock() mock_remote_lock.return_value = remote_lock_inst - mock_userof.return_value = "alice" remote_lock_inst.lock.side_effect = lock.ClaimLockError @qdevice.qnetd_lock_for_same_cluster_name def decorated(ctx): return decorated(_context) - mock_userof.assert_called_once_with("qnetd-node") mock_fatal.assert_called_once_with("Duplicated cluster name \"cluster1\"!") - mock_remote_lock.assert_called_once_with("alice", "qnetd-node", for_join=False, + mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False) @mock.patch('crmsh.utils.fatal') @mock.patch('crmsh.lock.RemoteLock') -@mock.patch('crmsh.utils.user_of') -def test_qnetd_lock_for_same_cluster_name_ssh_error(mock_userof, mock_remote_lock, mock_fatal): +def test_qnetd_lock_for_same_cluster_name_ssh_error(mock_remote_lock, mock_fatal): _context = mock.Mock(qnetd_addr="qnetd-node", cluster_name="cluster1") remote_lock_inst = mock.Mock() mock_remote_lock.return_value = remote_lock_inst - mock_userof.return_value = "alice" remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!") @qdevice.qnetd_lock_for_same_cluster_name def decorated(ctx): return decorated(_context) - mock_userof.assert_called_once_with("qnetd-node") - mock_remote_lock.assert_called_once_with("alice", "qnetd-node", for_join=False, + mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, lock_dir="/run/.crmsh_qdevice_lock_for_cluster1", wait=False) @mock.patch('crmsh.lock.RemoteLock') -@mock.patch('crmsh.utils.user_of') -def test_qnetd_lock_for_multi_cluster(mock_userof, mock_remote_lock): +def test_qnetd_lock_for_multi_cluster(mock_remote_lock): _context = mock.Mock(qnetd_addr="qnetd-node") remote_lock_inst = mock.Mock() - mock_userof.return_value = "alice" mock_remote_lock.return_value = remote_lock_inst remote_lock_inst.lock.return_value.__enter__ = mock.Mock() remote_lock_inst.lock.return_value.__exit__ = mock.Mock() @@ -134,24 +123,21 @@ def decorated(ctx): return decorated(_context) - mock_remote_lock.assert_called_once_with("alice", "qnetd-node", for_join=False, no_warn=True) + mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True) @mock.patch('crmsh.utils.fatal') @mock.patch('crmsh.lock.RemoteLock') -@mock.patch('crmsh.utils.user_of') -def test_qnetd_lock_for_multi_cluster_error(mock_userof, mock_remote_lock, mock_fatal): +def test_qnetd_lock_for_multi_cluster_error(mock_remote_lock, mock_fatal): _context = mock.Mock(qnetd_addr="qnetd-node") remote_lock_inst = mock.Mock() - mock_userof.return_value = "alice" mock_remote_lock.return_value = remote_lock_inst remote_lock_inst.lock.side_effect = lock.SSHError("ssh error!") @qdevice.qnetd_lock_for_multi_cluster def decorated(ctx): return decorated(_context) - mock_userof.assert_called_once_with("qnetd-node") - mock_remote_lock.assert_called_once_with("alice", "qnetd-node", for_join=False, no_warn=True) + mock_remote_lock.assert_called_once_with("qnetd-node", for_join=False, no_warn=True) class TestQDevice(unittest.TestCase): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_utils.py new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_utils.py --- old/crmsh-4.5.0+20230321.97bd51bb/test/unittests/test_utils.py 2023-03-21 16:02:58.000000000 +0100 +++ new/crmsh-4.5.0+20230327.c76ad5d5/test/unittests/test_utils.py 2023-03-27 16:00:29.000000000 +0200 @@ -1906,3 +1906,20 @@ with pytest.raises(utils.TerminateSubCommand) as err: utils.check_user_access('cluster') mock_error.assert_called_once_with('Please run this command starting with "sudo".\nCurrently, this command needs to use sudo to escalate itself as root.\nPlease consider to add "user" as sudoer. For example:\n sudo bash -c \'echo "user ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/user\'') + + +@mock.patch('logging.Logger.debug') +@mock.patch('crmsh.utils.get_stdout_stderr') +@mock.patch('crmsh.utils.check_ssh_passwd_need') +@mock.patch('crmsh.utils.this_node') +def test_check_passwordless_between_nodes(mock_this_node, mock_check_ssh, mock_run, mock_debug): + mock_this_node.return_value = "node1" + mock_check_ssh.return_value = False + mock_run.return_value = (1, None, None) + + res_list = utils.check_passwordless_between_nodes(["node1", "node2"]) + assert res_list == [("node2", "node1")] + + mock_run.assert_called_once_with('ssh node2 "ssh -o StrictHostKeyChecking=no -T -o Batchmode=yes root@node1 true"') + mock_debug.assert_called_once_with("There is no passwordless configured from %s to %s under '%s'", 'node2', 'node1', 'root') +