Hello community, here is the log from the commit of package crmsh for openSUSE:Leap:15.2 checked in at 2020-05-29 15:16:25 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Leap:15.2/crmsh (Old) and /work/SRC/openSUSE:Leap:15.2/.crmsh.new.3606 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh" Fri May 29 15:16:25 2020 rev:74 rq:810132 version:4.2.0+git.1585096577.f3257c89 Changes: -------- --- /work/SRC/openSUSE:Leap:15.2/crmsh/crmsh.changes 2020-05-28 20:09:29.642880750 +0200 +++ /work/SRC/openSUSE:Leap:15.2/.crmsh.new.3606/crmsh.changes 2020-05-29 15:16:32.199147609 +0200 @@ -1,0 +2,9 @@ +Wed May 27 11:58:42 UTC 2020 - XinLiang <[email protected]> + +- Add patches: + - Low: bootstrap: Simplify bootstrap context + * 0001-Low-bootstrap-Simplify-bootstrap-context.patch + - High: bootstrap: using class SBDManager for sbd configuration and management(bsc#1170037, bsc#1170999) + * 0002-High-bootstrap-using-class-SBDManager-for-sbd-config.patch + +------------------------------------------------------------------- New: ---- 0001-Low-bootstrap-Simplify-bootstrap-context.patch 0002-High-bootstrap-using-class-SBDManager-for-sbd-config.patch ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.jBXfP5/_old 2020-05-29 15:16:32.563148796 +0200 +++ /var/tmp/diff_new_pack.jBXfP5/_new 2020-05-29 15:16:32.567148810 +0200 @@ -40,6 +40,8 @@ Release: 0 Url: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 +Patch1: 0001-Low-bootstrap-Simplify-bootstrap-context.patch +Patch2: 0002-High-bootstrap-using-class-SBDManager-for-sbd-config.patch BuildRoot: %{_tmppath}/%{name}-%{version}-build %if 0%{?suse_version} @@ -142,6 +144,8 @@ %prep %setup -q +%patch1 -p1 +%patch2 -p1 # replace the shebang in all the scripts # with ${_bindir}/python3 ++++++ 0001-Low-bootstrap-Simplify-bootstrap-context.patch ++++++ >From 76ca07fa006ae6726fe8f3f0afd57883eaf3b23e Mon Sep 17 00:00:00 2001 From: liangxin1300 <[email protected]> Date: Sun, 22 Mar 2020 17:38:49 +0800 Subject: [PATCH 1/2] Low: bootstrap: Simplify bootstrap context --- crmsh/bootstrap.py | 200 ++++++++++++++++++++------------------------ crmsh/ui_cluster.py | 105 ++++++++++------------- 2 files changed, 134 insertions(+), 171 deletions(-) diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py index db78b7dd..c8e21bb3 100644 --- a/crmsh/bootstrap.py +++ b/crmsh/bootstrap.py @@ -50,27 +50,59 @@ class Context(object): Context object used to avoid having to pass these variables to every bootstrap method. """ - def __init__(self, quiet, yes_to_all, nic=None, ip_address=None, ip_network=None): - self.quiet = quiet - self.yes_to_all = yes_to_all - self.nic = nic - self.ip_address = ip_address - self.ip_network = ip_network + def __init__(self): + ''' + Initialize attributes + ''' + self.quiet = None + self.yes_to_all = None + self.template = None self.cluster_name = None - self.cluster_node = None - self.ocfs2_device = None - self.shared_device = None - self.sbd_device = None - self.diskless_sbd = False # if True, enable SBD for diskless operation + self.diskless_sbd = None + self.watchdog = None + self.no_overwrite_sshkey = None + self.nic = None self.unicast = None - self.ipv6 = None self.admin_ip = None - self.watchdog = None - self.host_status = None - self.connect_name = None self.second_hb = None - self.ui_context = None + self.ipv6 = None self.qdevice = None + self.qdevice_host = None + self.qdevice_port = None + self.qdevice_algo = None + self.qdevice_tie_breaker = None + self.qdevice_tls = None + self.qdevice_heuristics = None + self.qdevice_heuristics_mode = None + self.shared_device = None + self.sbd_device = None + self.ocfs2_device = None + self.cluster_node = None + self.force = None + self.arbitrator = None + self.clusters = None + self.tickets = None + self.ip_address = None + self.ip_network = None + + @classmethod + def set_context(cls, options): + ctx = cls() + for opt in vars(options): + setattr(ctx, opt, getattr(options, opt)) + return ctx + + def init_qdevice(self): + if not self.qdevice_host: + return + self.qdevice = corosync.QDevice( + self.qdevice_host, + port=self.qdevice_port, + algo=self.qdevice_algo, + tie_breaker=self.qdevice_tie_breaker, + tls=self.qdevice_tls, + cmds=self.qdevice_heuristics, + mode=self.qdevice_heuristics_mode) _context = None @@ -1724,8 +1756,9 @@ def join_csync2(seed_host): # If we *were* updating /etc/hosts, the next line would have "\"$hosts_line\"" as # the last arg (but this requires re-enabling this functionality in ha-cluster-init) - if not invoke("ssh -o StrictHostKeyChecking=no root@{} crm cluster init -i {} csync2_remote {}".format(seed_host, _context.nic, utils.this_node())): - error("Can't invoke crm cluster init -i {} init csync2_remote on {}".format(_context.nic, seed_host)) + cmd = "crm cluster init -i {} csync2_remote {}".format(_context.nic, utils.this_node()) + if not invoke("ssh -o StrictHostKeyChecking=no root@{} {}".format(seed_host, cmd)): + error("Can't invoke \"{}\" on {}".format(cmd, seed_host)) # This is necessary if syncing /etc/hosts (to ensure everyone's got the # same list of hosts) @@ -2215,53 +2248,10 @@ def remove_localhost_check(): return nodename == utils.this_node() -def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, ocfs2_device=None, - shared_device=None, sbd_device=None, diskless_sbd=False, quiet=False, - template=None, admin_ip=None, yes_to_all=False, no_overwrite_sshkey=False, - unicast=False, second_hb=False, ipv6=False, watchdog=None, qdevice=None, stage=None, args=None): - """ - -i <nic> - -o <ocfs2-device> - -p <shared-device> - -s <sbd-device> - -S - configure SBD without disk - -t <template> - -A [<admin-ip>] - -q - quiet - -y - yes to all - -u - unicast - <stage> - - stages: - ssh - ssh_remote - csync2 - csync2_remote - corosync - storage - sbd - cluster - vgfs - admin - qdevice +def bootstrap_init(context): + """ + Init cluster process """ - global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all, nic=nic) - _context.cluster_name = cluster_name - _context.ocfs2_device = ocfs2_device - _context.shared_device = shared_device - _context.sbd_device = sbd_device - _context.diskless_sbd = diskless_sbd - _context.unicast = unicast - _context.second_hb = second_hb - _context.ipv6 = ipv6 - _context.admin_ip = admin_ip - _context.watchdog = watchdog - _context.ui_context = ui_context - _context.qdevice = qdevice - _context.no_overwrite_sshkey = no_overwrite_sshkey - _context.stage = stage - def check_option(): if _context.admin_ip and not valid_adminIP(_context.admin_ip): error("Invalid option: admin_ip") @@ -2271,6 +2261,11 @@ def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, ocfs2_de except ValueError as err: error(err) + global _context + _context = context + _context.init_qdevice() + + stage = _context.stage if stage is None: stage = "" @@ -2296,6 +2291,7 @@ def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, ocfs2_de if not check_prereqs(stage): return elif stage == 'csync2_remote': + args = _context.args log("args: {}".format(args)) if len(args) != 2: error("Expected NODE argument to csync2_remote") @@ -2306,18 +2302,18 @@ def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, ocfs2_de if stage != "": globals()["init_" + stage]() else: - if watchdog is not None: + if _context.watchdog is not None: init_watchdog() init_ssh() init_csync2() init_corosync() init_remote_auth() - if template == 'ocfs2': - if sbd_device is None or ocfs2_device is None: + if _context.template == 'ocfs2': + if _context.sbd_device is None or _context.ocfs2_device is None: init_storage() init_sbd() init_cluster() - if template == 'ocfs2': + if _context.template == 'ocfs2': init_vgfs() init_admin() init_qdevice() @@ -2325,24 +2321,12 @@ def bootstrap_init(cluster_name="hacluster", ui_context=None, nic=None, ocfs2_de status("Done (log saved to %s)" % (LOG_FILE)) -def bootstrap_join(cluster_node=None, ui_context=None, nic=None, quiet=False, yes_to_all=False, watchdog=None, stage=None): +def bootstrap_join(context): """ - -c <cluster-node> - -i <nic> - -q - quiet - -y - yes to all - <stage> - # stages: - ssh - csync2 - ssh_merge - cluster + Join cluster process """ global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all, nic=nic) - _context.cluster_node = cluster_node - _context.watchdog = watchdog - _context.ui_context = ui_context + _context = context check_tty() @@ -2355,10 +2339,11 @@ def bootstrap_join(cluster_node=None, ui_context=None, nic=None, quiet=False, ye init() - if stage != "": - globals()["join_" + stage](cluster_node) + cluster_node = _context.cluster_node + if _context.stage != "": + globals()["join_" + _context.stage](cluster_node) else: - if not yes_to_all and cluster_node is None: + if not _context.yes_to_all and cluster_node is None: status("""Join This Node to Cluster: You will be asked for the IP address of an existing node, from which configuration will be copied. If you have not already configured @@ -2385,19 +2370,15 @@ def join_remote_auth(node): invoke("touch {}".format(PCMK_REMOTE_AUTH)) -def bootstrap_remove(cluster_node=None, ui_context=None, quiet=False, yes_to_all=False, force=False, - qdevice=None): +def bootstrap_remove(context): """ - -c <cluster-node> - node to remove from cluster - -q - quiet - -y - yes to all - -f - force removal of self + Remove node from cluster, or remove qdevice configuration """ global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all) - _context.cluster_node = cluster_node - _context.ui_context = ui_context - _context.qdevice = qdevice + _context = context + yes_to_all = _context.yes_to_all + cluster_node = _context.cluster_node + force = _context.force if _context.qdevice: if not utils.is_qdevice_configured(): @@ -2527,13 +2508,12 @@ port="9929" os.chmod(BOOTH_CFG, 0o644) -def bootstrap_init_geo(quiet, yes_to_all, arbitrator, clusters, tickets, ui_context=None): +def bootstrap_init_geo(context): """ Configure as a geo cluster member. """ global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all) - _context.ui_context = ui_context + _context = context if os.path.exists(BOOTH_CFG) and not confirm("This will overwrite {} - continue?".format(BOOTH_CFG)): return @@ -2548,18 +2528,18 @@ def bootstrap_init_geo(quiet, yes_to_all, arbitrator, clusters, tickets, ui_cont # set common.startup.degr-wfc-timeout 120 create_booth_authkey() - create_booth_config(arbitrator, clusters, tickets) + create_booth_config(_context.arbitrator, _context.clusters, _context.tickets) status("Sync booth configuration across cluster") csync2_update("/etc/booth") init_csync2_geo() - geo_cib_config(clusters) + geo_cib_config(_context.clusters) def geo_fetch_config(node): # TODO: clean this up status("Retrieving configuration - This may prompt for root@%s:" % (node)) tmpdir = tmpfiles.create_dir() - invoke("scp root@%s:'/etc/booth/*' %s/" % (node, tmpdir)) + invoke("scp -oStrictHostKeyChecking=no root@%s:'/etc/booth/*' %s/" % (node, tmpdir)) try: if os.path.isfile("%s/authkey" % (tmpdir)): invoke("mv %s/authkey %s" % (tmpdir, BOOTH_AUTH)) @@ -2589,30 +2569,30 @@ group g-booth booth-ip booth-site meta target-role=Stopped crm_configure_load("update", crm_template.substitute(iprules=" ".join(iprule.format(k, v) for k, v in clusters.items()))) -def bootstrap_join_geo(quiet, yes_to_all, node, clusters, ui_context=None): +def bootstrap_join_geo(context): """ Run on second cluster to add to a geo configuration. It fetches its booth configuration from the other node (cluster node or arbitrator). """ global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all) - _context.ui_context = ui_context + _context = context init_common_geo() check_tty() - geo_fetch_config(node) + geo_fetch_config(_context.cluster_node) status("Sync booth configuration across cluster") csync2_update("/etc/booth") - geo_cib_config(clusters) + geo_cib_config(_context.clusters) -def bootstrap_arbitrator(quiet, yes_to_all, node, ui_context=None): +def bootstrap_arbitrator(context): """ Configure this machine as an arbitrator. It fetches its booth configuration from a cluster node already in the cluster. """ global _context - _context = Context(quiet=quiet, yes_to_all=yes_to_all) - _context.ui_context = ui_context + _context = context + node = _context.cluster_node + init_common_geo() check_tty() geo_fetch_config(node) diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py index aa27f4cd..37eb4072 100644 --- a/crmsh/ui_cluster.py +++ b/crmsh/ui_cluster.py @@ -208,7 +208,7 @@ Note: help='Answer "yes" to all prompts (use with caution, this is destructive, especially during the "storage" stage. The /root/.ssh/id_rsa key will be overwritten unless the option "--no-overwrite-sshkey" is used)') parser.add_argument("-t", "--template", dest="template", help='Optionally configure cluster with template "name" (currently only "ocfs2" is valid here)') - parser.add_argument("-n", "--name", metavar="NAME", dest="name", default="hacluster", + parser.add_argument("-n", "--name", metavar="NAME", dest="cluster_name", default="hacluster", help='Set the name of the configured cluster.') parser.add_argument("-N", "--nodes", metavar="NODES", dest="nodes", help='Additional nodes to add to the created cluster. May include the current node, which will always be the initial cluster node.') @@ -234,7 +234,7 @@ Note: help="Configure corosync use IPv6") qdevice_group = parser.add_argument_group("QDevice configuration", "Options for configuring QDevice and QNetd.") - qdevice_group.add_argument("--qnetd-hostname", dest="qdevice", metavar="HOST", + qdevice_group.add_argument("--qnetd-hostname", dest="qdevice_host", metavar="HOST", help="HOST or IP of the QNetd server to be used") qdevice_group.add_argument("--qdevice-port", dest="qdevice_port", metavar="PORT", type=int, default=5403, help="TCP PORT of QNetd server(default:5403)") @@ -270,45 +270,21 @@ Note: if options.template and options.template != "ocfs2": parser.error("Invalid template (%s)" % (options.template)) - # if options.geo and options.name == "hacluster": - # parser.error("For a geo cluster, each cluster must have a unique name (use --name to set)") - - qdevice = None - if options.qdevice: + if options.qdevice_host: if options.qdevice_heuristics_mode and not options.qdevice_heuristics: parser.error("Option --qdevice-heuristics is required if want to configure heuristics mode") options.qdevice_heuristics_mode = options.qdevice_heuristics_mode or "sync" - qdevice = corosync.QDevice( - options.qdevice, - port=options.qdevice_port, - algo=options.qdevice_algo, - tie_breaker=options.qdevice_tie_breaker, - tls=options.qdevice_tls, - cmds=options.qdevice_heuristics, - mode=options.qdevice_heuristics_mode) elif re.search("--qdevice-.*", ' '.join(sys.argv)): parser.error("Option --qnetd-hostname is required if want to configure qdevice") - bootstrap.bootstrap_init( - cluster_name=options.name, - ui_context=context, - nic=options.nic, - ocfs2_device=options.ocfs2_device, - shared_device=options.shared_device, - sbd_device=options.sbd_device, - diskless_sbd=options.diskless_sbd, - quiet=options.quiet, - template=options.template, - admin_ip=options.admin_ip, - yes_to_all=options.yes_to_all, - no_overwrite_sshkey=options.no_overwrite_sshkey, - unicast=options.unicast, - second_hb=options.second_hb, - ipv6=options.ipv6, - watchdog=options.watchdog, - qdevice=qdevice, - stage=stage, - args=args) + # if options.geo and options.name == "hacluster": + # parser.error("For a geo cluster, each cluster must have a unique name (use --name to set)") + boot_context = bootstrap.Context.set_context(options) + boot_context.ui_context = context + boot_context.stage = stage + boot_context.args = args + + bootstrap.bootstrap_init(boot_context) # if options.geo: # bootstrap.bootstrap_init_geo() @@ -358,14 +334,11 @@ If stage is not specified, each stage will be invoked in sequence. if stage not in ("ssh", "csync2", "ssh_merge", "cluster", ""): parser.error("Invalid stage (%s)" % (stage)) - bootstrap.bootstrap_join( - cluster_node=options.cluster_node, - ui_context=context, - nic=options.nic, - quiet=options.quiet, - yes_to_all=options.yes_to_all, - watchdog=options.watchdog, - stage=stage) + join_context = bootstrap.Context.set_context(options) + join_context.ui_context = context + join_context.stage = stage + + bootstrap.bootstrap_join(join_context) return True @@ -417,21 +390,16 @@ If stage is not specified, each stage will be invoked in sequence. if options.cluster_node is not None and options.cluster_node not in args: args = list(args) + [options.cluster_node] + + rm_context = bootstrap.Context.set_context(options) + rm_context.ui_context = context + if len(args) == 0: - bootstrap.bootstrap_remove( - cluster_node=None, - ui_context=context, - quiet=options.quiet, - yes_to_all=options.yes_to_all, - qdevice=options.qdevice) + bootstrap.bootstrap_remove(rm_context) else: for node in args: - bootstrap.bootstrap_remove( - cluster_node=node, - ui_context=context, - quiet=options.quiet, - yes_to_all=options.yes_to_all, - force=options.force) + rm_context.cluster_node = node + bootstrap.bootstrap_remove(rm_context) return True @command.skill_level('administrator') @@ -529,7 +497,13 @@ Cluster Description ticketlist = [t for t in re.split('[ ,;]+', options.tickets)] except ValueError: parser.error("Invalid ticket list") - bootstrap.bootstrap_init_geo(options.quiet, options.yes_to_all, options.arbitrator, clustermap, ticketlist, ui_context=context) + + geo_context = bootstrap.Context.set_context(options) + geo_context.clusters = clustermap + geo_context.tickets = ticketlist + geo_context.ui_context = context + + bootstrap.bootstrap_init_geo(geo_context) return True @command.name("geo_join") @@ -543,13 +517,13 @@ Cluster Description parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message") parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet") parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all") - parser.add_argument("-c", "--cluster-node", help="IP address of an already-configured geo cluster or arbitrator", dest="node", metavar="IP") + parser.add_argument("-c", "--cluster-node", help="IP address of an already-configured geo cluster or arbitrator", dest="cluster_node", metavar="IP") parser.add_argument("-s", "--clusters", help="Geo cluster description (see geo-init for details)", dest="clusters", metavar="DESC") options, args = parse_options(parser, args) if options is None or args is None: return errs = [] - if options.node is None: + if options.cluster_node is None: errs.append("The --cluster-node argument is required.") if options.clusters is None: errs.append("The --clusters argument is required.") @@ -558,7 +532,12 @@ Cluster Description clustermap = self._parse_clustermap(options.clusters) if clustermap is None: parser.error("Invalid cluster description format") - bootstrap.bootstrap_join_geo(options.quiet, options.yes_to_all, options.node, clustermap, ui_context=context) + + geo_context = bootstrap.Context.set_context(options) + geo_context.clusters = clustermap + geo_context.ui_context = context + + bootstrap.bootstrap_join_geo(geo_context) return True @command.name("geo_init_arbitrator") @@ -572,11 +551,15 @@ Cluster Description parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message") parser.add_argument("-q", "--quiet", help="Be quiet (don't describe what's happening, just do it)", action="store_true", dest="quiet") parser.add_argument("-y", "--yes", help='Answer "yes" to all prompts (use with caution)', action="store_true", dest="yes_to_all") - parser.add_argument("-c", "--cluster-node", help="IP address of an already-configured geo cluster", dest="other", metavar="IP") + parser.add_argument("-c", "--cluster-node", help="IP address of an already-configured geo cluster", dest="cluster_node", metavar="IP") options, args = parse_options(parser, args) if options is None or args is None: return - bootstrap.bootstrap_arbitrator(options.quiet, options.yes_to_all, options.other, ui_context=context) + + geo_context = bootstrap.Context.set_context(options) + geo_context.ui_context = context + + bootstrap.bootstrap_arbitrator(geo_context) return True @command.completers_repeating(compl.call(scripts.param_completion_list, 'health')) -- 2.21.1 ++++++ 0002-High-bootstrap-using-class-SBDManager-for-sbd-config.patch ++++++ >From 45d0feaa92f357b216d92a99545d4acd3b68ed41 Mon Sep 17 00:00:00 2001 From: liangxin1300 <[email protected]> Date: Fri, 8 May 2020 15:14:10 +0800 Subject: [PATCH 2/2] High: bootstrap: using class SBDManager for sbd configuration and management(bsc#1170037, bsc#1170999) --- crmsh/bootstrap.py | 398 +++++++++++++++++++++++++------------------- crmsh/ui_cluster.py | 2 +- 2 files changed, 230 insertions(+), 170 deletions(-) diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py index c8e21bb3..56397f1a 100644 --- a/crmsh/bootstrap.py +++ b/crmsh/bootstrap.py @@ -58,7 +58,6 @@ class Context(object): self.yes_to_all = None self.template = None self.cluster_name = None - self.diskless_sbd = None self.watchdog = None self.no_overwrite_sshkey = None self.nic = None @@ -75,7 +74,6 @@ class Context(object): self.qdevice_heuristics = None self.qdevice_heuristics_mode = None self.shared_device = None - self.sbd_device = None self.ocfs2_device = None self.cluster_node = None self.force = None @@ -84,6 +82,9 @@ class Context(object): self.tickets = None self.ip_address = None self.ip_network = None + self.sbd_manager = None + self.sbd_devices = None + self.diskless_sbd = None @classmethod def set_context(cls, options): @@ -104,6 +105,225 @@ class Context(object): cmds=self.qdevice_heuristics, mode=self.qdevice_heuristics_mode) + def init_sbd_manager(self): + self.sbd_manager = SBDManager(self.sbd_devices, self.diskless_sbd) + + +class SBDManager(object): + """ + Class to manage sbd configuration and services + """ + SYSCONFIG_SBD_TEMPLATE = "/usr/share/fillup-templates/sysconfig.sbd" + SBD_STATUS_DESCRIPTION = """ +Configure SBD: + If you have shared storage, for example a SAN or iSCSI target, + you can use it avoid split-brain scenarios by configuring SBD. + This requires a 1 MB partition, accessible to all nodes in the + cluster. The device path must be persistent and consistent + across all nodes in the cluster, so /dev/disk/by-id/* devices + are a good choice. Note that all data on the partition you + specify here will be destroyed. +""" + + def __init__(self, sbd_devices=None, diskless_sbd=False): + """ + Init function + + sbd_devices is provided by '-s' option on init process + diskless_sbd is provided by '-S' option on init process + """ + self.sbd_devices_input = sbd_devices + self.diskless_sbd = diskless_sbd + self._sbd_service_flag = False + self._sbd_devices = None + + @staticmethod + def _check_environment(): + """ + Check prerequisites for SBD + """ + if not check_watchdog(): + error("Watchdog device must be configured in order to use SBD") + if not utils.is_program("sbd"): + error("sbd executable not found! Cannot configure SBD") + + def _parse_sbd_device(self): + """ + Parse sbd devices, possible command line is like: + -s "/dev/sdb1;/dev/sdb2" + -s /dev/sdb1 -s /dev/sbd2 + """ + result_list = [] + for dev in self.sbd_devices_input: + if ';' in dev: + result_list.extend(dev.strip(';').split(';')) + else: + result_list.append(dev) + return result_list + + @staticmethod + def _verify_sbd_device(dev_list): + """ + Verify sbd device + """ + if len(dev_list) > 3: + raise ValueError("Maximum number of SBD device is 3") + for dev in dev_list: + if not is_block_device(dev): + raise ValueError("{} doesn't look like a block device".format(dev)) + + def _get_sbd_device_interactive(self): + """ + Get sbd device on interactive mode + """ + if _context.yes_to_all: + warn("Not configuring SBD (%s left untouched)." % (SYSCONFIG_SBD)) + return + + status(self.SBD_STATUS_DESCRIPTION) + + if not confirm("Do you wish to use SBD?"): + warn("Not configuring SBD - STONITH will be disabled.") + return + + self._check_environment() + + configured_dev = self._get_sbd_device_from_config() + if configured_dev and not confirm("SBD is already configured to use {} - overwrite?".format(';'.join(configured_dev))): + return configured_dev + + dev_list = [] + dev_looks_sane = False + while not dev_looks_sane: + dev = prompt_for_string('Path to storage device (e.g. /dev/disk/by-id/...), or "none" for diskless sbd, use ";" as separator for multi path', r'none|\/.*') + if dev == "none": + self.diskless_sbd = True + return + dev_list = dev.strip(';').split(';') + try: + self._verify_sbd_device(dev_list) + except ValueError as err_msg: + print(term.render(clidisplay.error(str(err_msg)))) + continue + for dev_item in dev_list: + warn("All data on {} will be destroyed!".format(dev_item)) + if confirm('Are you sure you wish to use this device?'): + dev_looks_sane = True + else: + dev_looks_sane = False + break + + return dev_list + + def _get_sbd_device(self): + """ + Get sbd device from options or interactive mode + """ + dev_list = [] + if self.sbd_devices_input: + dev_list = self._parse_sbd_device() + self._verify_sbd_device(dev_list) + self._check_environment() + elif self.diskless_sbd: + self._check_environment() + else: + dev_list = self._get_sbd_device_interactive() + self._sbd_devices = dev_list + + def _initialize_sbd(self): + """ + Initialize SBD device + """ + if self.diskless_sbd: + return + for dev in self._sbd_devices: + if not invoke("sbd -d {} create".format(dev)): + error("Failed to initialize SBD device {}".format(dev)) + + def _update_configuration(self): + """ + Update /etc/sysconfig/sbd + """ + shutil.copyfile(self.SYSCONFIG_SBD_TEMPLATE, SYSCONFIG_SBD) + sbd_config_dict = { + "SBD_PACEMAKER": "yes", + "SBD_STARTMODE": "always", + "SBD_DELAY_START": "no", + "SBD_WATCHDOG_DEV": detect_watchdog_device() + } + if self._sbd_devices: + sbd_config_dict["SBD_DEVICE"] = ';'.join(self._sbd_devices) + utils.sysconfig_set(SYSCONFIG_SBD, **sbd_config_dict) + csync2_update(SYSCONFIG_SBD) + + @staticmethod + def _get_sbd_device_from_config(): + """ + Gets currently configured SBD device, i.e. what's in /etc/sysconfig/sbd + """ + conf = utils.parse_sysconfig(SYSCONFIG_SBD) + res = conf.get("SBD_DEVICE") + if res: + return res.strip(';').split(';') + else: + return None + + def sbd_init(self): + """ + Function sbd_init includes these steps: + 1. Get sbd device from options or interactive mode + 2. Initialize sbd device + 3. Write config file /etc/sysconfig/sbd + """ + self._get_sbd_device() + if not self._sbd_devices and not self.diskless_sbd: + return + status_long("Initializing {}SBD...".format("diskless " if self.diskless_sbd else "")) + self._initialize_sbd() + self._update_configuration() + status_done() + # If process work through here, consider it's ready for enable service + self._sbd_service_flag = True + + def manage_sbd_service(self): + """ + Manage sbd service, running on both init and join process + """ + if self._sbd_service_flag: + invoke("systemctl enable sbd.service") + else: + invoke("systemctl disable sbd.service") + + def configure_sbd_resource(self): + """ + Configure stonith-sbd resource and stonith-enabled property + """ + if self._sbd_devices and self._get_sbd_device_from_config(): + if not invoke("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s"): + error("Can't create stonith-sbd primitive") + if not invoke("crm configure property stonith-enabled=true"): + error("Can't enable STONITH for SBD") + elif self.diskless_sbd: + if not invoke("crm configure property stonith-enabled=true stonith-watchdog-timeout=5s"): + error("Can't enable STONITH for diskless SBD") + + def join_sbd(self, peer_host): + """ + Function join_sbd running on join process only + On joining process, check whether peer node has enabled sbd.service + If so, check prerequisites of SBD and verify sbd device on join node + """ + if not os.path.exists(SYSCONFIG_SBD): + return + if not invoke("ssh -o StrictHostKeyChecking=no root@{} systemctl is-enabled sbd.service".format(peer_host)): + return + self._check_environment() + dev_list = self._get_sbd_device_from_config() + if dev_list: + self._verify_sbd_device(dev_list) + status("Got {}SBD configuration".format("" if dev_list else "diskless ")) + self._sbd_service_flag = True + _context = None @@ -419,14 +639,6 @@ def probe_partitions(): status_done() -def configured_sbd_device(): - """ - Gets currently configured SBD device, i.e. what's in /etc/sysconfig/sbd - """ - conf = utils.parse_sysconfig(SYSCONFIG_SBD) - return conf.get("SBD_DEVICE") - - def check_tty(): """ Check for pseudo-tty: Cannot display read prompts without a TTY (bnc#892702) @@ -690,12 +902,7 @@ def init_cluster_local(): if pass_msg: warn("You should change the hacluster password to something more secure!") - # for cluster join, diskless_sbd flag is set in join_cluster() if - # sbd is running on seed host - if (configured_sbd_device() and _context.sbd_device) or _context.diskless_sbd: - invoke("systemctl enable sbd.service") - else: - invoke("systemctl disable sbd.service") + _context.sbd_manager.manage_sbd_service() start_service("pacemaker.service") wait_for_cluster() @@ -1206,8 +1413,7 @@ def is_block_device(dev): from stat import S_ISBLK try: rc = S_ISBLK(os.stat(dev).st_mode) - except OSError as msg: - warn(msg) + except OSError: return False return rc @@ -1360,42 +1566,6 @@ def check_watchdog(): return rc == 0 -def sysconfig_comment_out(scfile, key): - """ - Comments out the given key in the sysconfig file - """ - matcher = re.compile(r'^\s*{}\s*='.format(key)) - outp, ncomments = "", 0 - for line in scfile.readlines(): - if matcher.match(line): - outp += '#' + line - ncomments += 1 - else: - outp += line - return outp, ncomments - - -def init_sbd_diskless(): - """ - Initialize SBD in diskless mode. - """ - status_long("Initializing diskless SBD...") - if os.path.isfile(SYSCONFIG_SBD): - log("Overwriting {} with diskless configuration".format(SYSCONFIG_SBD)) - scfg, nmatches = sysconfig_comment_out(open(SYSCONFIG_SBD), "SBD_DEVICE") - if nmatches > 0: - utils.str2file(scfg, SYSCONFIG_SBD) - else: - log("Creating {} with diskless configuration".format(SYSCONFIG_SBD)) - utils.sysconfig_set(SYSCONFIG_SBD, - SBD_PACEMAKER="yes", - SBD_STARTMODE="always", - SBD_DELAY_START="no", - SBD_WATCHDOG_DEV=detect_watchdog_device()) - csync2_update(SYSCONFIG_SBD) - status_done() - - def init_sbd(): """ Configure SBD (Storage-based fencing). @@ -1403,108 +1573,7 @@ def init_sbd(): SBD can also run in diskless mode if no device is configured. """ - def get_dev_list(dev_list): - result_list = [] - for dev in dev_list: - if ';' in dev: - result_list.extend(dev.strip(';').split(';')) - else: - result_list.append(dev) - return result_list - - # non-interactive case - if _context.sbd_device: - _context.sbd_device = get_dev_list(_context.sbd_device) - if len(_context.sbd_device) > 3: - error("Maximum number of SBD device is 3") - for dev in _context.sbd_device: - if not is_block_device(dev): - error("{} doesn't look like a block device".format(dev)) - # diskless sbd - elif _context.diskless_sbd: - init_sbd_diskless() - return - # interactive case - else: - # SBD device not set up by init_storage (ocfs2 template) and - # also not passed in as command line argument - prompt user - if _context.yes_to_all: - warn("Not configuring SBD (%s left untouched)." % (SYSCONFIG_SBD)) - return - status(""" -Configure SBD: - If you have shared storage, for example a SAN or iSCSI target, - you can use it avoid split-brain scenarios by configuring SBD. - This requires a 1 MB partition, accessible to all nodes in the - cluster. The device path must be persistent and consistent - across all nodes in the cluster, so /dev/disk/by-id/* devices - are a good choice. Note that all data on the partition you - specify here will be destroyed. -""") - - if not confirm("Do you wish to use SBD?"): - warn("Not configuring SBD - STONITH will be disabled.") - # Comment out SBD devices if present - if os.path.isfile(SYSCONFIG_SBD): - scfg, nmatches = sysconfig_comment_out(open(SYSCONFIG_SBD), "SBD_DEVICE") - if nmatches > 0: - utils.str2file(scfg, SYSCONFIG_SBD) - csync2_update(SYSCONFIG_SBD) - return - - if not check_watchdog(): - error("Watchdog device must be configured if want to use SBD!") - - if utils.is_program("sbd") is None: - error("sbd executable not found! Cannot configure SBD.") - - configured_dev = configured_sbd_device() - if configured_dev: - if not confirm("SBD is already configured to use %s - overwrite?" % (configured_dev)): - return - - dev_looks_sane = False - while not dev_looks_sane: - dev = prompt_for_string('Path to storage device (e.g. /dev/disk/by-id/...), or "none", use ";" as separator for multi path', r'none|\/.*') - if dev == "none": - _context.diskless_sbd = True - init_sbd_diskless() - return - dev_list = dev.strip(';').split(';') - if len(dev_list) > 3: - error("Maximum number of SBD device is 3") - continue - for dev_item in dev_list: - if not is_block_device(dev_item): - error("{} doesn't look like a block device".format(dev_item)) - dev_looks_sane = False - break - else: - warn("All data on {} will be destroyed!".format(dev_item)) - if confirm('Are you sure you wish to use this device?'): - dev_looks_sane = True - else: - dev_looks_sane = False - break - - _context.sbd_device = dev_list - - # TODO: need to ensure watchdog is available - # (actually, should work if watchdog unavailable, it'll just whine in the logs...) - # TODO: what about timeouts for multipath devices? - status_long('Initializing SBD...') - for dev in _context.sbd_device: - if not invoke("sbd -d %s create" % (dev)): - error("Failed to initialize SBD device %s" % (dev)) - status_done() - - utils.sysconfig_set(SYSCONFIG_SBD, - SBD_DEVICE=';'.join(_context.sbd_device), - SBD_PACEMAKER="yes", - SBD_STARTMODE="always", - SBD_DELAY_START="no", - SBD_WATCHDOG_DEV=detect_watchdog_device()) - csync2_update(SYSCONFIG_SBD) + _context.sbd_manager.sbd_init() def init_cluster(): @@ -1528,15 +1597,7 @@ op_defaults op-options: timeout=600 record-pending=true rsc_defaults rsc-options: resource-stickiness=1 migration-threshold=3 """) - if configured_sbd_device() and _context.sbd_device: - if not invoke("crm configure primitive stonith-sbd stonith:external/sbd pcmk_delay_max=30s"): - error("Can't create stonith-sbd primitive") - if not invoke("crm configure property stonith-enabled=true"): - error("Can't enable STONITH for SBD") - elif _context.diskless_sbd: - # TODO: configure stonith-watchdog-timeout correctly - if not invoke("crm configure property stonith-enabled=true stonith-watchdog-timeout=5s"): - error("Can't enable STONITH for diskless SBD") + _context.sbd_manager.configure_sbd_resource() def init_vgfs(): @@ -2007,10 +2068,7 @@ def join_cluster(seed_host): csync2_update(corosync.conf()) invoke("ssh -o StrictHostKeyChecking=no root@{} corosync-cfgtool -R".format(seed_host)) - # if no SBD devices are configured, - # check the existing cluster if the sbd service is enabled - if not configured_sbd_device() and invoke("ssh -o StrictHostKeyChecking=no root@{} systemctl is-enabled sbd.service".format(seed_host)): - _context.diskless_sbd = True + _context.sbd_manager.join_sbd(seed_host) if ipv6_flag and not is_unicast: # for ipv6 mcast @@ -2264,6 +2322,7 @@ def bootstrap_init(context): global _context _context = context _context.init_qdevice() + _context.init_sbd_manager() stage = _context.stage if stage is None: @@ -2327,6 +2386,7 @@ def bootstrap_join(context): """ global _context _context = context + _context.init_sbd_manager() check_tty() diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py index 37eb4072..caf527f1 100644 --- a/crmsh/ui_cluster.py +++ b/crmsh/ui_cluster.py @@ -252,7 +252,7 @@ Note: storage_group = parser.add_argument_group("Storage configuration", "Options for configuring shared storage.") storage_group.add_argument("-p", "--partition-device", dest="shared_device", metavar="DEVICE", help='Partition this shared storage device (only used in "storage" stage)') - storage_group.add_argument("-s", "--sbd-device", dest="sbd_device", metavar="DEVICE", action="append", + storage_group.add_argument("-s", "--sbd-device", dest="sbd_devices", metavar="DEVICE", action="append", help="Block device to use for SBD fencing, use \";\" as separator or -s multiple times for multi path (up to 3 devices)") storage_group.add_argument("-o", "--ocfs2-device", dest="ocfs2_device", metavar="DEVICE", help='Block device to use for OCFS2 (only used in "vgfs" stage)') -- 2.21.1
