Hi
I read http://wiki.linux-ha.org/DRBD/HowTov2 today and wanted to set up
those examples for a testing scenario.
My drbd setup is working. I can manually set each note to be primary for
each resource (while the other is secondary of course). When starting
heartbeat, I make sure every drbd device is either Unconfigured/down or
secondary.
I get one drbd+fs pair to run (the one using r1 in my config). But when
I try to add another one (the one using r0 in my config), it does not
promote the master and therefore does not mount the fs. The OCF script
hangs and times out at "crm_master -v 75" and as you can see in the
nodes section of the CIB, only the master value for r1 made it to the CIB.
Is there something special needed to run multiple DRBD devices in
master/slave mode?
If you need any further info, just ask, I'll be happy to supply it.
Regards
Dominik
global {
usage-count no;
}
common {
syncer { rate 128M; }
}
resource r0 {
protocol C;
startup {
wfc-timeout 15;
degr-wfc-timeout 120;
}
disk {
on-io-error detach;
}
net {
after-sb-0pri disconnect;
after-sb-1pri disconnect;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
syncer {
rate 128M;
}
on ACD-xen01 {
device /dev/drbd0;
disk /dev/sdb1;
address 10.250.250.31:7789;
meta-disk internal;
}
on ACD-xen03 {
device /dev/drbd0;
disk /dev/sdb1;
address 10.250.250.33:7789;
meta-disk internal;
}
}
resource r1 {
protocol C;
startup {
wfc-timeout 15;
degr-wfc-timeout 120;
}
disk {
on-io-error detach;
}
net {
after-sb-0pri disconnect;
after-sb-1pri disconnect;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
syncer {
rate 128M;
}
on ACD-xen01 {
device /dev/drbd1;
disk /dev/sda4;
address 10.250.250.31:7788;
meta-disk internal;
}
on ACD-xen03 {
device /dev/drbd1;
disk /dev/sda4;
address 10.250.250.33:7788;
meta-disk internal;
}
}
resource r2 {
protocol C;
startup {
wfc-timeout 15;
degr-wfc-timeout 120;
}
disk {
on-io-error detach;
}
net {
after-sb-0pri disconnect;
after-sb-1pri disconnect;
after-sb-2pri disconnect;
rr-conflict disconnect;
}
syncer {
rate 128M;
}
on ACD-xen01 {
device /dev/drbd2;
disk /dev/sdb2;
address 10.250.250.31:7790;
meta-disk internal;
}
on ACD-xen03 {
device /dev/drbd2;
disk /dev/sdb2;
address 10.250.250.33:7790;
meta-disk internal;
}
}
<cib generated="true" admin_epoch="0" have_quorum="true" ignore_dtd="false" num_peers="2" ccm_transition="2" cib_feature_revision="1.3" dc_uuid="d4506030-b86e-4877-9984-72b7b39e29ca" epoch="1" num_updates="97" cib-last-written="Wed Jul 4 12:41:00 2007">
<configuration>
<crm_config/>
<nodes>
<node id="d4506030-b86e-4877-9984-72b7b39e29ca" uname="acd-xen03" type="normal">
<instance_attributes id="master-d4506030-b86e-4877-9984-72b7b39e29ca">
<attributes>
<nvpair id="master-r1:0-d4506030-b86e-4877-9984-72b7b39e29ca" name="master-r1:0" value="75"/>
</attributes>
</instance_attributes>
</node>
<node id="f6ffbaa8-9c5b-4da1-9e93-b50d227ba805" uname="acd-xen01" type="normal">
<instance_attributes id="master-f6ffbaa8-9c5b-4da1-9e93-b50d227ba805">
<attributes>
<nvpair id="master-r1:1-f6ffbaa8-9c5b-4da1-9e93-b50d227ba805" name="master-r1:1" value="75"/>
</attributes>
</instance_attributes>
</node>
</nodes>
<resources>
<master_slave id="ms-r0">
<meta_attributes id="ma-ms-r0">
<attributes>
<nvpair id="ma-ms-r0-1" name="clone_max" value="2"/>
<nvpair id="ma-ms-r0-2" name="clone_node_max" value="1"/>
<nvpair id="ma-ms-r0-3" name="master_max" value="1"/>
<nvpair id="ma-ms-r0-4" name="master_node_max" value="1"/>
<nvpair id="ma-ms-r0-5" name="notify" value="yes"/>
<nvpair id="ma-ms-r0-6" name="globally_unique" value="false"/>
<nvpair name="target_role" id="ma-ms-r0-7" value="started"/>
</attributes>
</meta_attributes>
<primitive id="r0" class="ocf" provider="dk" type="drbd_master_slave">
<instance_attributes id="ia-r0">
<attributes>
<nvpair id="ia-r0-1" name="drbd_resource" value="r0"/>
</attributes>
</instance_attributes>
</primitive>
</master_slave>
<primitive class="ocf" provider="heartbeat" type="Filesystem" id="fs0">
<meta_attributes id="ma-fs0">
<attributes>
<nvpair name="target_role" id="ma-fs0-1" value="started"/>
</attributes>
</meta_attributes>
<instance_attributes id="ia-fs0">
<attributes>
<nvpair id="ia-fs0-1" name="fstype" value="ext3"/>
<nvpair id="ia-fs0-2" name="directory" value="/mnt/drbd0"/>
<nvpair id="ia-fs0-3" name="device" value="/dev/drbd0"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="ocf" provider="heartbeat" type="Filesystem" id="fs1">
<meta_attributes id="ma-fs1">
<attributes>
<nvpair name="target_role" id="ma-fs1-1" value="started"/>
</attributes>
</meta_attributes>
<instance_attributes id="ia-fs1">
<attributes>
<nvpair id="ia-fs1-1" name="fstype" value="ext3"/>
<nvpair id="ia-fs1-2" name="directory" value="/mnt/drbd1"/>
<nvpair id="ia-fs1-3" name="device" value="/dev/drbd1"/>
</attributes>
</instance_attributes>
</primitive>
<master_slave id="ms-r1">
<meta_attributes id="ma-ms-r1">
<attributes>
<nvpair id="ma-ms-r1-1" name="clone_max" value="2"/>
<nvpair id="ma-ms-r1-2" name="clone_node_max" value="1"/>
<nvpair id="ma-ms-r1-3" name="master_max" value="1"/>
<nvpair id="ma-ms-r1-4" name="master_node_max" value="1"/>
<nvpair id="ma-ms-r1-5" name="notify" value="yes"/>
<nvpair id="ma-ms-r1-6" name="globally_unique" value="false"/>
<nvpair name="target_role" id="ma-ms-r1-7" value="started"/>
</attributes>
</meta_attributes>
<primitive id="r1" class="ocf" provider="dk" type="drbd_master_slave">
<instance_attributes id="ia-r1">
<attributes>
<nvpair id="ia-r1-1" name="drbd_resource" value="r1"/>
</attributes>
</instance_attributes>
</primitive>
</master_slave>
</resources>
<constraints>
<rsc_colocation id="fs1_on_r1" to="ms-r1" to_role="master" from="fs1" score="infinity"/>
<rsc_order id="r0_before_fs0" from="fs0" action="start" to="ms-r0" to_action="promote"/>
<rsc_colocation id="fs0_on_r0" to="ms-r0" to_role="master" from="fs0" score="infinity"/>
<rsc_order id="r1_before_fs1" from="fs1" action="start" to="ms-r1" to_action="promote"/>
</constraints>
</configuration>
</cib>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems