On 12:15 Thu 10 Mar , Michael Hanselmann wrote: > Am 10. März 2011 11:57 schrieb Iustin Pop <[email protected]>: > > Actually this fails in the same way for migration. Interdiff: > > LGTM
Hi, this actually broke some stuff. Following patch should fix these. Thanks, Apollon -- Commit faaabe3c fixed failover behaviour for DTS_INT_MIRROR instances, however it broke migration for DTS_EXT_MIRROR instances, by moving iallocator and node checks from LUInstanceMigrate to TLMigrateInstance. This has the side-effect that the LU called the TL with None for both, node and iallocator when the default iallocator was being used. This patch maintains the iallocator checks in TLMigrateInstance and fixes the LU-TL integration. Signed-off-by: Apollon Oikonomopoulos <[email protected]> --- lib/cmdlib.py | 29 +++++++++++++---------------- 1 files changed, 13 insertions(+), 16 deletions(-) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 956c31c..b87fc58 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -5947,8 +5947,7 @@ class LUInstanceMigrate(LogicalUnit): self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE self._migrater = TLMigrateInstance(self, self.op.instance_name, - self.op.cleanup, self.op.iallocator, - self.op.target_node) + self.op.cleanup) self.tasklets = [self._migrater] def DeclareLocks(self, level): @@ -6194,8 +6193,7 @@ class LUNodeMigrate(LogicalUnit): logging.debug("Migrating instance %s", inst.name) names.append(inst.name) - tasklets.append(TLMigrateInstance(self, inst.name, False, - self.op.iallocator, None)) + tasklets.append(TLMigrateInstance(self, inst.name, False)) if inst.disk_template in constants.DTS_EXT_MIRROR: # We need to lock all nodes, as the iallocator will choose the @@ -6241,8 +6239,7 @@ class TLMigrateInstance(Tasklet): this variable is initalized only after CheckPrereq has run """ - def __init__(self, lu, instance_name, cleanup, - iallocator=None, target_node=None): + def __init__(self, lu, instance_name, cleanup): """Initializes this class. """ @@ -6252,8 +6249,6 @@ class TLMigrateInstance(Tasklet): self.instance_name = instance_name self.cleanup = cleanup self.live = False # will be overridden later - self.iallocator = iallocator - self.target_node = target_node def CheckPrereq(self): """Check prerequisites. @@ -6276,16 +6271,18 @@ class TLMigrateInstance(Tasklet): raise errors.OpPrereqError("Do not specify both, iallocator and" " target node", errors.ECODE_INVAL) - if self.iallocator: + if self.lu.op.iallocator: self._RunAllocator() + else: + # We set set self.target_node as it is required by + # BuildHooksEnv + self.target_node = self.lu.op.target_node - # self.target_node is already populated, either directly or by the - # iallocator run target_node = self.target_node if len(self.lu.tasklets) == 1: # It is safe to remove locks only when we're the only tasklet in the LU - nodes_keep = [instance.primary_node, self.target_node] + nodes_keep = [instance.primary_node, target_node] nodes_rel = [node for node in self.lu.acquired_locks[locking.LEVEL_NODE] if node not in nodes_keep] self.lu.context.glm.release(locking.LEVEL_NODE, nodes_rel) @@ -6329,21 +6326,21 @@ class TLMigrateInstance(Tasklet): self.instance.primary_node], ) - ial.Run(self.iallocator) + ial.Run(self.lu.op.iallocator) if not ial.success: raise errors.OpPrereqError("Can't compute nodes using" " iallocator '%s': %s" % - (self.iallocator, ial.info), + (self.lu.op.iallocator, ial.info), errors.ECODE_NORES) if len(ial.result) != ial.required_nodes: raise errors.OpPrereqError("iallocator '%s' returned invalid number" " of nodes (%s), required %s" % - (self.iallocator, len(ial.result), + (self.lu.op.iallocator, len(ial.result), ial.required_nodes), errors.ECODE_FAULT) self.target_node = ial.result[0] self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s", - self.instance_name, self.iallocator, + self.instance_name, self.lu.op.iallocator, utils.CommaJoin(ial.result)) if self.lu.op.live is not None and self.lu.op.mode is not None: -- 1.7.2.3
