Hi,

I'm trying to understand a strange behaviou, maybe someon here can help
me out.

I configured 3 VMs primitives in my pacemaker cluster. Migrating a VM
works fine.

But when I tell pacemaker to move one VM away, all the VMs are migrating
to another node, together. This is not load balancing as I planned to
implement it :)

See my configuration at end of this message.

Jonathan



orque2 ~ # crm resource show
 Master/Slave Set: drbd-data-clone
     Masters: [ orque orque2 ]
 Clone Set: dlm-clone
     Started: [ orque orque2 ]
 Clone Set: gfs-clone
     Started: [ orque orque2 ]
 Clone Set: orque-fs-clone
     Started: [ orque orque2 ]
 orque2-fencing (stonith:meatware) Started
 orque-fencing  (stonith:meatware) Started
 kvm-testVM     (ocf::heartbeat:VirtualDomain) Started
 kvm-observatoire-test  (ocf::heartbeat:VirtualDomain) Started
 kvm-adonga     (ocf::heartbeat:VirtualDomain) Started



orque2 ~ # crm configure show
node orque \
        attributes standby="false"
node orque2 \
        attributes standby="false"
primitive dlm ocf:pacemaker:controld \
        op monitor interval="120s" \
        op start interval="0" timeout="90s" \
        op stop interval="0" timeout="100s"
primitive drbd-data ocf:linbit:drbd \
        params drbd_resource="orque-raid" \
        op start interval="0" timeout="240s" start-delay="5s" \
        op stop interval="0" timeout="100s" \
        op monitor interval="30s" timeout="30s" start-delay="5s"
primitive gfs-control ocf:pacemaker:controld \
        params daemon="gfs_controld.pcmk" args="-g 0" \
        op monitor interval="120s" \
        op start interval="0" timeout="90s" \
        op stop interval="0" timeout="100s"
primitive kvm-adonga ocf:heartbeat:VirtualDomain \
        params config="/etc/libvirt/qemu/adonga.xml"
hypervisor="qemu:///system" migration_transport="ssh" \
        meta allow-migrate="true" target-role="Started" is-managed="true" \
        op start interval="0" timeout="200s" \
        op stop interval="0" timeout="200s" \
        op monitor interval="10" timeout="200s" on-fail="restart" depth="0"
primitive kvm-observatoire-test ocf:heartbeat:VirtualDomain \
        params config="/etc/libvirt/qemu/observatoire-test.xml"
hypervisor="qemu:///system" migration_transport="ssh" \
        meta allow-migrate="true" target-role="Started" is-managed="true" \
        op start interval="0" timeout="200s" \
        op stop interval="0" timeout="200s" \
        op monitor interval="10" timeout="200s" on-fail="restart" depth="0"
primitive kvm-testVM ocf:heartbeat:VirtualDomain \
        params config="/etc/libvirt/qemu/testVM.xml"
hypervisor="qemu:///system" migration_transport="ssh" \
        meta allow-migrate="true" target-role="Started" is-managed="true" \
        op start interval="0" timeout="200s" \
        op stop interval="0" timeout="200s" \
        op monitor interval="10" timeout="200s" on-fail="restart" depth="0"
primitive orque-fencing stonith:meatware \
        params hostlist="orque" \
        meta is-managed="true"
primitive orque-fs ocf:heartbeat:Filesystem \
        params device="/dev/drbd/by-res/orque-raid" directory="/data"
fstype="gfs2" \
        op start interval="0" timeout="60s" start-delay="5s" \
        op stop interval="0" timeout="60s" \
        op monitor interval="10" timeout="40s" start-delay="5s"
primitive orque2-fencing stonith:meatware \
        params hostlist="orque2" \
        meta is-managed="true" target-role="Started"
ms drbd-data-clone drbd-data \
        meta master-max="2" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true"
clone dlm-clone dlm \
        meta interleave="true" target-role="Started"
clone gfs-clone gfs-control \
        meta interleave="true" target-role="Started"
clone orque-fs-clone orque-fs \
        meta is-managed="true" target-role="Started" interleave="true"
ordered="true"
location kvm-testVM-prefers-orque kvm-testVM 50: orque
location loc-orque-fencing orque-fencing -inf: orque
location loc-orque2-fencing orque2-fencing -inf: orque2
colocation fs_on_drbd inf: orque-fs-clone drbd-data-clone:Master
colocation gfs-with-dlm inf: gfs-clone dlm-clone
colocation kvm-adonga-with-orque-fs inf: kvm-adonga orque-fs-clone
colocation kvm-observatoire-test-with-orque-fs inf:
kvm-observatoire-test orque-fs-clone
colocation kvm-testVM-with-orque-fs inf: kvm-testVM orque-fs-clone
colocation orque-fs-with-gfs-control inf: orque-fs-clone gfs-clone
order gfs-after-dlm inf: dlm-clone gfs-clone
order kvm-adonga-after-orque-fs inf: orque-fs-clone kvm-adonga
order kvm-observatoire-test-after-orque-fs inf: orque-fs-clone
kvm-observatoire-test
order kvm-testVM-after-orque-fs inf: orque-fs-clone kvm-testVM
order orque-fs-after-drbd-data inf: drbd-data-clone:promote
orque-fs-clone:start
order orque-fs-after-gfs-control inf: gfs-clone orque-fs-clone
property $id="cib-bootstrap-options" \
        dc-version="1.0.9-unknown" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="2" \
        stonith-enabled="true" \
        no-quorum-policy="ignore" \
        last-lrm-refresh="1300292868"
rsc_defaults $id="rsc-options" \
        resource-stickiness="100"

Attachment: 0xA8657ED2.asc
Description: application/pgp-keys

Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to