lu.glm.list_owned becomes lu.owned_locks, which is clearer for the
reader.
Also rename three variables (which were before named owned_locks) to
make clearer what they track.
---
lib/cmdlib.py | 118 ++++++++++++++++++++++++++++-----------------------------
1 files changed, 58 insertions(+), 60 deletions(-)
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index a70dda0..d7f9126 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -119,6 +119,8 @@ class LogicalUnit(object):
self.op = op
self.cfg = context.cfg
self.glm = context.glm
+ # readability alias
+ self.owned_locks = context.glm.list_owned
self.context = context
self.rpc = rpc
# Dicts used to declare locking needs to mcpu
@@ -374,7 +376,7 @@ class LogicalUnit(object):
# future we might want to have different behaviors depending on the value
# of self.recalculate_locks[locking.LEVEL_NODE]
wanted_nodes = []
- locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
wanted_nodes.append(instance.primary_node)
if not primary_only:
@@ -488,7 +490,7 @@ class _QueryBase:
"""
if self.do_locking:
- names = lu.glm.list_owned(lock_level)
+ names = lu.owned_locks(lock_level)
else:
names = all_names
@@ -665,18 +667,18 @@ def _ReleaseLocks(lu, level, names=None, keep=None):
release = []
# Determine which locks to release
- for name in lu.glm.list_owned(level):
+ for name in lu.owned_locks(level):
if should_release(name):
release.append(name)
else:
retain.append(name)
- assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
+ assert len(lu.owned_locks(level)) == (len(retain) + len(release))
# Release just some locks
lu.glm.release(level, names=release)
- assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
+ assert frozenset(lu.owned_locks(level)) == frozenset(retain)
else:
# Release everything
lu.glm.release(level)
@@ -1609,7 +1611,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# volumes for these instances are healthy, we will need to do an
# extra call to their secondaries. We ensure here those nodes will
# be locked.
- for inst in self.glm.list_owned(locking.LEVEL_INSTANCE):
+ for inst in self.owned_locks(locking.LEVEL_INSTANCE):
# Important: access only the instances whose lock is owned
if all_inst_info[inst].disk_template in constants.DTS_INT_MIRROR:
nodes.update(all_inst_info[inst].secondary_nodes)
@@ -1621,10 +1623,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
unlocked_nodes = \
- group_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+ group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
unlocked_instances = \
- group_instances.difference(self.glm.list_owned(locking.LEVEL_INSTANCE))
+ group_instances.difference(self.owned_locks(locking.LEVEL_INSTANCE))
if unlocked_nodes:
raise errors.OpPrereqError("Missing lock for nodes: %s" %
@@ -1658,7 +1660,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
extra_lv_nodes.add(nname)
unlocked_lv_nodes = \
- extra_lv_nodes.difference(self.glm.list_owned(locking.LEVEL_NODE))
+ extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_lv_nodes:
raise errors.OpPrereqError("these nodes could be locked: %s" %
@@ -2882,7 +2884,7 @@ class LUClusterVerifyDisks(NoHooksLU):
}
def Exec(self, feedback_fn):
- group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
# Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
@@ -2924,10 +2926,8 @@ class LUGroupVerifyDisks(NoHooksLU):
# going via the node before it's locked, requiring verification
# later on
[group_uuid
- for instance_name in
- self.glm.list_owned(locking.LEVEL_INSTANCE)
- for group_uuid in
- self.cfg.GetInstanceNodeGroups(instance_name)])
+ for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
+ for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
elif level == locking.LEVEL_NODE:
# This will only lock the nodes in the group to be verified which contain
@@ -2936,14 +2936,14 @@ class LUGroupVerifyDisks(NoHooksLU):
self._LockInstancesNodes()
# Lock all nodes in group to be verified
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
def CheckPrereq(self):
- owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
- owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert self.group_uuid in owned_groups
@@ -2997,7 +2997,7 @@ class LUGroupVerifyDisks(NoHooksLU):
if inst.admin_up])
if nv_dict:
- nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
+ nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
set(self.cfg.GetVmCapableNodeList()))
node_lvs = self.rpc.call_lv_list(nodes, [])
@@ -3058,7 +3058,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
"""
if self.wanted_names is None:
- self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
self.wanted_instances = \
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
@@ -3283,7 +3283,7 @@ class LUClusterSetParams(LogicalUnit):
" drbd-based instances exist",
errors.ECODE_INVAL)
- node_list = self.glm.list_owned(locking.LEVEL_NODE)
+ node_list = self.owned_locks(locking.LEVEL_NODE)
# if vg_name not None, checks given volume group on all nodes
if self.op.vg_name:
@@ -4354,7 +4354,7 @@ class LUNodeQueryvols(NoHooksLU):
"""Computes the list of nodes and their attributes.
"""
- nodenames = self.glm.list_owned(locking.LEVEL_NODE)
+ nodenames = self.owned_locks(locking.LEVEL_NODE)
volumes = self.rpc.call_node_volumes(nodenames)
ilist = self.cfg.GetAllInstancesInfo()
@@ -4423,7 +4423,7 @@ class LUNodeQueryStorage(NoHooksLU):
"""Computes the list of nodes and their attributes.
"""
- self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ self.nodes = self.owned_locks(locking.LEVEL_NODE)
# Always get name to sort by
if constants.SF_NAME in self.op.output_fields:
@@ -4512,17 +4512,15 @@ class _InstanceQuery(_QueryBase):
# via the node before it's locked, requiring verification later on
lu.needed_locks[locking.LEVEL_NODEGROUP] = \
set(group_uuid
- for instance_name in
- lu.glm.list_owned(locking.LEVEL_INSTANCE)
- for group_uuid in
- lu.cfg.GetInstanceNodeGroups(instance_name))
+ for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+ for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
elif level == locking.LEVEL_NODE:
lu._LockInstancesNodes() # pylint: disable-msg=W0212
@staticmethod
def _CheckGroupLocks(lu):
- owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP))
+ owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
# Check if node groups for locked instances are still correct
for instance_name in owned_instances:
@@ -5044,7 +5042,7 @@ class LUNodeSetParams(LogicalUnit):
instances_keep = []
# Build list of instances to release
- locked_i = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
for instance_name, instance in self.cfg.GetMultiInstanceInfo(locked_i):
if (instance.disk_template in constants.DTS_INT_MIRROR and
self.op.node_name in instance.all_nodes):
@@ -5053,7 +5051,7 @@ class LUNodeSetParams(LogicalUnit):
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
- assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
+ assert (set(self.owned_locks(locking.LEVEL_INSTANCE)) ==
set(instances_keep))
def BuildHooksEnv(self):
@@ -6867,7 +6865,7 @@ class LUNodeMigrate(LogicalUnit):
# running the iallocator and the actual migration, a good consistency model
# will have to be found.
- assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+ assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset([self.op.node_name]))
return ResultWithJobs(jobs)
@@ -8263,7 +8261,7 @@ class LUInstanceCreate(LogicalUnit):
src_path = self.op.src_path
if src_node is None:
- locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exp_list = self.rpc.call_export_list(locked_nodes)
found = False
for node in exp_list:
@@ -9091,7 +9089,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = [node_name
- for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
@@ -9131,7 +9129,7 @@ class LUInstanceReplaceDisks(LogicalUnit):
assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
self.op.iallocator is None)
- owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
if owned_groups != groups:
@@ -9299,7 +9297,7 @@ class TLReplaceDisks(Tasklet):
if remote_node is None:
self.remote_node_info = None
else:
- assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
+ assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node
self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
@@ -9419,13 +9417,13 @@ class TLReplaceDisks(Tasklet):
if __debug__:
# Verify owned locks before starting operation
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
- assert set(owned_locks) == set(self.node_secondary_ip), \
+ owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
+ assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
- (owned_locks, self.node_secondary_ip.keys()))
+ (owned_nodes, self.node_secondary_ip.keys()))
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
- assert list(owned_locks) == [self.instance_name], \
+ owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
+ assert list(owned_instances) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
@@ -9460,12 +9458,12 @@ class TLReplaceDisks(Tasklet):
if __debug__:
# Verify owned locks
- owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
+ owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
nodes = frozenset(self.node_secondary_ip)
- assert ((self.early_release and not owned_locks) or
- (not self.early_release and not (set(owned_locks) - nodes))), \
+ assert ((self.early_release and not owned_nodes) or
+ (not self.early_release and not (set(owned_nodes) - nodes))), \
("Not owning the correct locks, early_release=%s, owned=%r,"
- " nodes=%r" % (self.early_release, owned_locks, nodes))
+ " nodes=%r" % (self.early_release, owned_nodes, nodes))
return result
@@ -10024,9 +10022,9 @@ class LUNodeEvacuate(NoHooksLU):
def CheckPrereq(self):
# Verify locks
- owned_instances = self.glm.list_owned(locking.LEVEL_INSTANCE)
- owned_nodes = self.glm.list_owned(locking.LEVEL_NODE)
- owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
+ owned_nodes = self.owned_locks(locking.LEVEL_NODE)
+ owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
assert owned_nodes == self.lock_nodes
@@ -10317,7 +10315,7 @@ class LUInstanceQueryData(NoHooksLU):
"""
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
- self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
self.wanted_instances = \
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
@@ -11137,7 +11135,7 @@ class LUBackupQuery(NoHooksLU):
that node.
"""
- self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ self.nodes = self.owned_locks(locking.LEVEL_NODE)
rpcresult = self.rpc.call_export_list(self.nodes)
result = {}
for node in rpcresult:
@@ -11520,7 +11518,7 @@ class LUBackupRemove(NoHooksLU):
fqdn_warn = True
instance_name = self.op.instance_name
- locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
+ locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exportlist = self.rpc.call_export_list(locked_nodes)
found = False
for node in exportlist:
@@ -11640,12 +11638,12 @@ class LUGroupAssignNodes(NoHooksLU):
"""
assert self.needed_locks[locking.LEVEL_NODEGROUP]
- assert (frozenset(self.glm.list_owned(locking.LEVEL_NODE)) ==
+ assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
frozenset(self.op.nodes))
expected_locks = (set([self.group_uuid]) |
self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
- actual_locks = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
if actual_locks != expected_locks:
raise errors.OpExecError("Nodes changed groups since locks were
acquired,"
" current groups are '%s', used to be '%s'" %
@@ -12105,7 +12103,7 @@ class LUGroupEvacuate(LogicalUnit):
# via the node before it's locked, requiring verification later on
lock_groups.update(group_uuid
for instance_name in
- self.glm.list_owned(locking.LEVEL_INSTANCE)
+ self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in
self.cfg.GetInstanceNodeGroups(instance_name))
else:
@@ -12121,14 +12119,14 @@ class LUGroupEvacuate(LogicalUnit):
self._LockInstancesNodes()
# Lock all nodes in group to be evacuated
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
def CheckPrereq(self):
- owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
- owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
+ owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert owned_groups.issuperset(self.req_target_uuids)
assert self.group_uuid in owned_groups
@@ -12192,14 +12190,14 @@ class LUGroupEvacuate(LogicalUnit):
"""
mn = self.cfg.GetMasterNode()
- assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
return (run_nodes, run_nodes)
def Exec(self, feedback_fn):
- instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
+ instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
assert self.group_uuid not in self.target_uuids
--
1.7.3.1