Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2026-02-03 21:35:01 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.1995 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Tue Feb 3 21:35:01 2026 rev:395 rq:1330755 version:5.0.0+20260203.366eb25a Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2026-01-26 12:35:14.811500296 +0100 +++ /work/SRC/openSUSE:Factory/.crmsh.new.1995/crmsh.changes 2026-02-03 21:36:10.396397776 +0100 @@ -1,0 +2,20 @@ +Tue Feb 03 12:05:59 UTC 2026 - [email protected] + +- Update to version 5.0.0+20260203.366eb25a: + * Fix: log: Add milliseconds time format to crmsh.log (bsc#1255021) + * Dev: corosync: Add milliseconds to log time format + +------------------------------------------------------------------- +Tue Feb 03 07:30:57 UTC 2026 - [email protected] + +- Update to version 5.0.0+20260203.2855bc97: + * Fix: ui_cluster: Stop dlm in maintenance mode correctly (bsc#1253733) + +------------------------------------------------------------------- +Tue Jan 27 05:13:00 UTC 2026 - [email protected] + +- Update to version 5.0.0+20260127.deec490c: + * Dev: run-functional-tests: Sleep 2s after creating contaier and attaching network + * Dev: run-functional-tests: Add timestamp logging for better debugging + +------------------------------------------------------------------- Old: ---- crmsh-5.0.0+20260126.95a91857.tar.bz2 New: ---- crmsh-5.0.0+20260203.366eb25a.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.alkvGH/_old 2026-02-03 21:36:11.404440142 +0100 +++ /var/tmp/diff_new_pack.alkvGH/_new 2026-02-03 21:36:11.404440142 +0100 @@ -41,7 +41,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 5.0.0+20260126.95a91857 +Version: 5.0.0+20260203.366eb25a Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.alkvGH/_old 2026-02-03 21:36:11.460442494 +0100 +++ /var/tmp/diff_new_pack.alkvGH/_new 2026-02-03 21:36:11.464442663 +0100 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">c58a6e83071267885b061de91e4e0638eaa61474</param> + <param name="changesrevision">366eb25a48a219b8bd9689289cc2b107a57d74a9</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-5.0.0+20260126.95a91857.tar.bz2 -> crmsh-5.0.0+20260203.366eb25a.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/crmsh/corosync.py new/crmsh-5.0.0+20260203.366eb25a/crmsh/corosync.py --- old/crmsh-5.0.0+20260126.95a91857/crmsh/corosync.py 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/crmsh/corosync.py 2026-02-03 12:19:21.000000000 +0100 @@ -38,7 +38,7 @@ to_logfile: yes logfile: /var/log/cluster/corosync.log to_syslog: yes - timestamp: on + timestamp: hires } """ KNET_LINK_NUM_LIMIT = 8 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/crmsh/log.py new/crmsh-5.0.0+20260203.366eb25a/crmsh/log.py --- old/crmsh-5.0.0+20260126.95a91857/crmsh/log.py 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/crmsh/log.py 2026-02-03 12:19:21.000000000 +0100 @@ -7,6 +7,7 @@ import logging import logging.config import typing +import time from contextlib import contextmanager from . import options @@ -194,6 +195,14 @@ return rtv +class ISO8601Formatter(logging.Formatter): + def formatTime(self, record, datefmt=None): + ct = self.converter(record.created) + s = time.strftime(datefmt, ct) + tz = time.strftime('%z', ct) + return f"{s}.{int(record.msecs):03d}{tz}" + + LOGGING_CFG = { "version": 1, "disable_existing_loggers": "False", @@ -215,8 +224,9 @@ }, }, "file": { + "()": ISO8601Formatter, "format": "%(asctime)s {} %(name)s: %(levelname)s: %(message)s".format(socket.gethostname()), - "datefmt": "%Y-%m-%dT%H:%M:%S%z", + "datefmt": "%Y-%m-%dT%H:%M:%S", } }, "filters": { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/crmsh/ui_cluster.py new/crmsh-5.0.0+20260203.366eb25a/crmsh/ui_cluster.py --- old/crmsh-5.0.0+20260126.95a91857/crmsh/ui_cluster.py 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/crmsh/ui_cluster.py 2026-02-03 12:19:21.000000000 +0100 @@ -260,10 +260,14 @@ return logger.debug(f"stop node list: {node_list}") - if utils.is_cluster_in_maintenance_mode() and utils.is_dlm_running(): - logger.info("The cluster is in maintenance mode") - logger.error("Stopping pacemaker/corosync will trigger unexpected node fencing when 'dlm_controld' is running in maintenance mode.") - return False + cluster_in_maintenance = utils.is_cluster_in_maintenance_mode() + for node in node_list[:]: + if cluster_in_maintenance and utils.is_dlm_running(on_node=node): + logger.info("The cluster is in maintenance mode and dlm is running on %s", node) + logger.error("Stopping pacemaker/corosync will trigger unexpected node fencing when 'dlm_controld' is running in maintenance mode.") + node_list.remove(node) + if not node_list: + return utils.wait_for_dc(node_list[0]) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/crmsh/utils.py new/crmsh-5.0.0+20260203.366eb25a/crmsh/utils.py --- old/crmsh-5.0.0+20260126.95a91857/crmsh/utils.py 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/crmsh/utils.py 2026-02-03 12:19:21.000000000 +0100 @@ -2796,11 +2796,11 @@ shell.get_stdout_or_raise_error(f'dlm_tool set_config "{option}={value}"', peer) -def is_dlm_running(peer=None): +def is_dlm_running(peer=None, on_node=None): """ Check if dlm ra controld is running """ - return xmlutil.CrmMonXmlParser(peer).is_resource_started(constants.DLM_CONTROLD_RA) + return xmlutil.CrmMonXmlParser(peer).is_resource_started(constants.DLM_CONTROLD_RA, node=on_node) def is_dlm_configured(peer=None): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/crmsh/xmlutil.py new/crmsh-5.0.0+20260203.366eb25a/crmsh/xmlutil.py --- old/crmsh-5.0.0+20260126.95a91857/crmsh/xmlutil.py 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/crmsh/xmlutil.py 2026-02-03 12:19:21.000000000 +0100 @@ -1631,13 +1631,16 @@ return True return False - def is_resource_started(self, ra): + def is_resource_started(self, ra, node=None): """ Check if the RA started(in all clone instances if configured as clone) @ra could be resource id or resource type + @node: optional, specify the node name to check if the resource is started on the node """ xpath = f'//resource[(@id="{ra}" or @resource_agent="{ra}") and @active="true" and @role="Started"]' + if node: + xpath += f"/node[@name='{node}']" return bool(self.xml_elem.xpath(xpath)) def get_resource_top_parent_id_set_via_type(self, ra_type): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20260126.95a91857/test/run-functional-tests new/crmsh-5.0.0+20260203.366eb25a/test/run-functional-tests --- old/crmsh-5.0.0+20260126.95a91857/test/run-functional-tests 2026-01-26 09:28:29.000000000 +0100 +++ new/crmsh-5.0.0+20260203.366eb25a/test/run-functional-tests 2026-02-03 12:19:21.000000000 +0100 @@ -55,6 +55,11 @@ EOM +log_date() { + date +"%Y-%m-%d %H:%M:%S" +} + + fatal() { error $* exit 1 @@ -62,17 +67,17 @@ error() { - echo "ERROR: $*" + echo "$(log_date) ERROR: $*" } warning() { - echo "WARNING: $*" + echo "$(log_date) WARNING: $*" } info() { - echo "INFO: $*" + echo "$(log_date) INFO: $*" } @@ -199,6 +204,7 @@ podman run --rm -d $podman_options $podman_capabilties $podman_security $CONTAINER_IMAGE podman network connect ha_network_second $node_name + sleep 2 if [ "$node_name" != "qnetd-node" ];then rm_qnetd_cmd="rpm -q corosync-qnetd && rpm -e corosync-qnetd" podman_exec $node_name "$rm_qnetd_cmd"
