CIB as follows
<cib generated="true" admin_epoch="0" have_quorum="true" ignore_dtd="false"
num_peers="2" ccm_transition="14" cib_feature_revision="2.0"
crm_feature_set="2.0" dc_uuid="9e29c2ad-674a-4b84-b697-0884d48c5bc0" epoch="50"
num_updates="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="2.1.4-node: 74d736e8f667fae1e4a904ccd13ad6553c5c010c"/>
<nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="false"/>
<nvpair id="cib-bootstrap-options-is-managed-default"
name="is-managed-default" value="false"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-start-failure-is-fatal"
name="start-failure-is-fatal" value="false"/>
<nvpair id="cib-bootstrap-options-stop-orphan-resources"
name="stop-orphan-resources" value="false"/>
<nvpair id="cib-bootstrap-options-default-resource-stickiness"
name="default-resource-stickiness" value="0"/>
<nvpair
id="cib-bootstrap-options-default-resource-failure-stickiness"
name="default-resource-failure-stickiness" value="0"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node uname="lpxenhost15" type="normal"
id="9e29c2ad-674a-4b84-b697-0884d48c5bc0">
<instance_attributes id="nodes-9e29c2ad-674a-4b84-b697-0884d48c5bc0">
<attributes>
<nvpair
id="nodes-9e29c2ad-674a-4b84-b697-0884d48c5bc0-lddhcp01_lvmdev"
name="lddhcp01_lvmdev" value="_xsite:lddhcp01-sys"/>
</attributes>
</instance_attributes>
</node>
<node id="e64dc919-9b92-49dd-8afa-3339111cf998" uname="lpxenhost16"
type="normal">
<instance_attributes id="nodes-e64dc919-9b92-49dd-8afa-3339111cf998">
<attributes>
<nvpair
id="nodes-e64dc919-9b92-49dd-8afa-3339111cf998-lpdhcp01_lvmdev"
name="lpdhcp01_lvmdev" value="_xsite:lpdhcp01-sys"/>
</attributes>
</instance_attributes>
</node>
</nodes>
<resources>
<clone id="cmlilo">
<meta_attributes id="cmlilo_meta_attrs">
<attributes>
<nvpair id="cmlilo_metaattr_is_managed" name="is_managed"
value="true"/>
<nvpair id="cmlilo_metaattr_target_role" name="target_role"
value="started"/>
<nvpair id="cmlilo_metaattr_clone_max" name="clone_max" value="2"/>
<nvpair id="cmlilo_metaattr_clone_node_max" name="clone_node_max"
value="1"/>
</attributes>
</meta_attributes>
<primitive id="stonith" class="stonith" type="external/cmlilo.pl"
provider="heartbeat">
<instance_attributes id="stonith_instance_attrs">
<attributes>
<nvpair id="stonith_nodeilo" name="NODEILO"
value="lpxenhost15,172.22.24.51:lpxenhost16,172.25.24.53:"/>
</attributes>
</instance_attributes>
<operations>
<op id="op_stonith_monitor" name="monitor" interval="60s"
timeout="30s"/>
</operations>
</primitive>
</clone>
<primitive id="lddhcp01" class="ocf" type="xen" provider="cml">
<meta_attributes id="lddhcp01_meta_attrs">
<attributes>
<nvpair id="lddhcp01_metaattr_is_managed" name="is_managed"
value="true"/>
<nvpair id="lddhcp01_metaattr_resource_stickiness"
name="resource_stickiness" value="100"/>
<nvpair id="lddhcp01_metaattr_resource_failure_stickiness"
name="resource_failure_stickiness" value="-500"/>
<nvpair id="lddhcp01_metaattr_multiple_active"
name="multiple_active" value="stop_only"/>
<nvpair id="lddhcp01_metaattr_target_role" name="target_role"
value="started"/>
</attributes>
</meta_attributes>
<instance_attributes id="lddhcp01_instance_attrs">
<attributes>
<nvpair id="lddhcp01_xmfile" name="xmfile"
value="/proj/xenconfigs/lddhcp01"/>
<nvpair id="lddhcp01_allow_migrate" name="allow_migrate"
value="1"/>
<nvpair id="lddhcp01_shutdown_timeout" name="shutdown_timeout"
value="280"/>
<nvpair id="lddhcp01_internal_ip" name="internal_ip"
value="10.10.176.75"/>
</attributes>
</instance_attributes>
<operations>
<op id="op_lddhcp01_monitor_ssh" name="monitor" interval="60s"
timeout="40s"/>
<op id="op_lddhcp01_monitor_lvm" name="monitor" interval="90s"
timeout="40s"/>
<op id="op_lddhcp01_start" name="start" timeout="180s"/>
<op id="op_lddhcp01_stop" name="stop" timeout="300s"/>
</operations>
</primitive>
<primitive id="lpdhcp01" class="ocf" type="xen" provider="cml">
<meta_attributes id="lpdhcp01_meta_attrs">
<attributes>
<nvpair id="lpdhcp01_metaattr_is_managed" name="is_managed"
value="true"/>
<nvpair id="lpdhcp01_metaattr_resource_stickiness"
name="resource_stickiness" value="100"/>
<nvpair id="lpdhcp01_metaattr_resource_failure_stickiness"
name="resource_failure_stickiness" value="-500"/>
<nvpair id="lpdhcp01_metaattr_multiple_active"
name="multiple_active" value="stop_only"/>
<nvpair id="lpdhcp01_metaattr_target_role" name="target_role"
value="started"/>
</attributes>
</meta_attributes>
<instance_attributes id="lpdhcp01_instance_attrs">
<attributes>
<nvpair id="lpdhcp01_xmfile" name="xmfile"
value="/proj/xenconfigs/lpdhcp01"/>
<nvpair id="lpdhcp01_allow_migrate" name="allow_migrate"
value="1"/>
<nvpair id="lpdhcp01_shutdown_timeout" name="shutdown_timeout"
value="280"/>
<nvpair id="lpdhcp01_internal_ip" name="internal_ip"
value="10.10.202.101"/>
</attributes>
</instance_attributes>
<operations>
<op id="op_lpdhcp01_monitor_ssh" name="monitor" interval="60s"
timeout="40s"/>
<op id="op_lpdhcp01_monitor_lvm" name="monitor" interval="90s"
timeout="40s"/>
<op id="op_lpdhcp01_start" name="start" timeout="180s"/>
<op id="op_lpdhcp01_stop" name="stop" timeout="300s"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="stonith" rsc="cmlilo">
<rule id="prefered_stonith" score="INFINITY"/>
</rsc_location>
<rsc_location id="lddhcp01-denied" rsc="lddhcp01">
<rule id="lddhcp01-denied-rule" score="-200"/>
</rsc_location>
<rsc_location id="lddhcp01-allow" rsc="lddhcp01">
<rule id="lddhcp01-allow-rule" score="300" boolean_op="or">
<expression attribute="#uname" id="lddhcp01-allow-rule-lpxenhost16"
operation="eq" value="lpxenhost16"/>
<expression attribute="#uname" id="lddhcp01-allow-rule-lpxenhost15"
operation="eq" value="lpxenhost15"/>
</rule>
</rsc_location>
<rsc_location id="lpdhcp01-denied" rsc="lpdhcp01">
<rule id="lpdhcp01-denied-rule" score="-200"/>
</rsc_location>
<rsc_location id="lpdhcp01-allow" rsc="lpdhcp01">
<rule id="lpdhcp01-allow-rule" score="300" boolean_op="or">
<expression attribute="#uname" id="lpdhcp01-allow-rule-lpxenhost16"
operation="eq" value="lpxenhost16"/>
<expression attribute="#uname" id="lpdhcp01-allow-rule-lpxenhost15"
operation="eq" value="lpxenhost15"/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status>
</status>
</cib>
-----Original Message-----
From: [email protected]
[mailto:[email protected]]on Behalf Of Andrew Beekhof
Sent: Thursday, 5 March 2009 7:16 PM
To: General Linux-HA mailing list
Subject: Re: [Linux-HA] live migrate
On Thu, Mar 5, 2009 at 01:35, David Pinkerton H
<[email protected]> wrote:
>
> Having an issue with live migrates:
>
> When I migrate a single domU (ie. crm_resource -M -r domU) the source dom0
> calls "migrate_to" and the target dom0 calls "migrate_from" - as expected.
> If I execute several migrates at once, the source dom0 calls "migrate_to"
> whereas the target now calls "migrate_from" for the first domU and "start"
> for the remainder...
>
> All domU's have allowed_migrate set to 1
>
> I added the following code to the xen script to dump the calls/variables.
>
snip
> Is this behaviour correct?
it really depends on the rest of your configuration - there are some
very specific conditions that need to be met before a resource can do
a true migrate.
cib.xml?
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems
This email and any attachments may contain privileged and confidential
information
and are intended for the named addressee only. If you have received this e-mail
in
error, please notify the sender and delete this e-mail immediately. Any
confidentiality, privilege or copyright is not waived or lost because this
e-mail
has been sent to you in error. It is your responsibility to check this e-mail
and
any attachments for viruses. No warranty is made that this material is free
from
computer virus or any other defect or error. Any loss/damage incurred by using
this
material is not the sender's responsibility. The sender's entire liability
will be
limited to resupplying the material.
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems