On 16 November 2010 17:15, Robinson, Eric <[email protected]> wrote:
> I'm not sure if this list or the DRBD list is the right one to ask this.
>
> Is it possible to deploy a 3-node CRM-based cluster where:
>
> -- nodes A and C share resource R1 on /dev/drbd0
>
> -- nodes B and C share resource R2 on /dev/drbd1
>
> -- resource constraints prevent R1 from running on node B and
> prevent resource R2 from running on node A?
>
> -
>
Yes, this can be done. I have already a cluster like this
here our my confs
drbd conf common on all nodes
# cat drbd.conf
#
# please have a a look at the example configuration file in
# /usr/share/doc/drbd83/drbd.conf
#
global {
usage-count yes;
}
common {
protocol C;
syncer {
csums-alg sha1;
verify-alg sha1;
rate 10M;
}
net {
data-integrity-alg sha1;
max-buffers 20480;
max-epoch-size 16384;
}
disk {
on-io-error detach;
### Only when DRBD is under cluster ###
fencing resource-only;
### --- ###
}
startup {
wfc-timeout 60;
degr-wfc-timeout 30;
outdated-wfc-timeout 15;
}
### Only when DRBD is under cluster ###
handlers {
split-brain "/usr/lib/drbd/notify-split-brain.sh root";
fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
}
### --- ###
}
resource drbd_resource_01 {
on node-01 {
device /dev/drbd1;
disk /dev/sdb1;
address 10.10.10.129:7789;
meta-disk internal;
}
on node-03 {
device /dev/drbd1;
disk /dev/sdb1;
address 10.10.10.131:7789;
meta-disk internal;
}
syncer {
cpu-mask 2;
}
}
resource drbd_resource_02 {
on node-02 {
device /dev/drbd2;
disk /dev/sdb1;
address 10.10.10.130:7790;
meta-disk internal;
}
on node-03 {
device /dev/drbd2;
disk /dev/sdc1;
address 10.10.10.131:7790;
meta-disk internal;
}
syncer {
cpu-mask 1;
}
}
# cat cluster.conf
node $id="059313ce-c6aa-4bd5-a4fb-4b781de6d98f" node-03
node $id="d791b1f5-9522-4c84-a66f-cd3d4e476b38" node-02
node $id="e388e797-21f4-4bbe-a588-93d12964b4d7" node-01
primitive drbd_01 ocf:linbit:drbd \
params drbd_resource="drbd_resource_01" \
op monitor interval="30s" \
op start interval="0" timeout="240s" \
op stop interval="0" timeout="120s"
primitive drbd_02 ocf:linbit:drbd \
params drbd_resource="drbd_resource_02" \
op monitor interval="30s" \
op start interval="0" timeout="240s" \
op stop interval="0" timeout="120s"
primitive fs_01 ocf:heartbeat:Filesystem \
params device="/dev/drbd1" directory="/pbx_service_01" fstype="ext3"
\
meta migration-threshold="3" failure-timeout="60" \
op monitor interval="20s" timeout="40s" OCF_CHECK_LEVEL="20" \
op start interval="0" timeout="60s" \
op stop interval="0" timeout="60s"
primitive fs_02 ocf:heartbeat:Filesystem \
params device="/dev/drbd2" directory="/pbx_service_02" fstype="ext3"
\
meta migration-threshold="3" failure-timeout="60" \
op monitor interval="20s" timeout="40s" OCF_CHECK_LEVEL="20" \
op start interval="0" timeout="60s" \
op stop interval="0" timeout="60s"
primitive ip_01 ocf:heartbeat:IPaddr2 \
params ip="192.168.78.10" nic="eth3" cidr_netmask="24"
broadcast="192.168.78.255" \
meta failure-timeout="120" migration-threshold="3" \
op monitor interval="5s"
primitive ip_02 ocf:heartbeat:IPaddr2 \
meta failure-timeout="120" migration-threshold="3" \
params ip="192.168.78.20" nic="eth3" cidr_netmask="24"
broadcast="192.168.78.255" \
op monitor interval="5s"
primitive mailAlert-01 ocf:heartbeat:MailTo \
params email="root" subject="[Zanadoo Clustet event] pbx_service_01"
\
op monitor interval="2" timeout="10" \
op start interval="0" timeout="10" \
op stop interval="0" timeout="10"
primitive mailAlert-02 ocf:heartbeat:MailTo \
params email="root" subject="[Zanadoo Clustet event] pbx_service_02"
\
op monitor interval="2" timeout="10" \
op start interval="0" timeout="10" \
op stop interval="0" timeout="10"
primitive pbx_01 lsb:znd-pbx_01 \
meta migration-threshold="3" failure-timeout="60" \
op monitor interval="20s" timeout="20s" \
op start interval="0" timeout="60s" \
op stop interval="0" timeout="60s"
primitive pbx_02 lsb:znd-pbx_02 \
meta migration-threshold="3" failure-timeout="60" \
op monitor interval="20s" timeout="20s" \
op start interval="0" timeout="60s" \
op stop interval="0" timeout="60s"
primitive pdu stonith:external/rackpdu \
params community="empisteftiko"
names_oid=".1.3.6.1.4.1.318.1.1.4.4.2.1.4"
oid=".1.3.6.1.4.1.318.1.1.4.4.2.1.3" hostlist="AUTO" pduip="192.168.100.100"
stonith-timeout="30"
primitive sshd_01 lsb:znd-sshd-pbx_01 \
meta is-managed="true" \
op monitor on-fail="stop" interval="10m" \
op start interval="0" timeout="60s" on-fail="stop" \
op stop interval="0" timeout="60s" on-fail="stop"
primitive sshd_02 lsb:znd-sshd-pbx_02 \
op monitor on-fail="stop" interval="10m" \
op start interval="0" timeout="60s" on-fail="stop" \
op stop interval="0" timeout="60s" on-fail="stop"
group pbx_service_01 ip_01 fs_01 pbx_01 sshd_01 mailAlert-01 \
meta target-role="Started"
group pbx_service_02 ip_02 fs_02 pbx_02 sshd_02 mailAlert-02 \
meta target-role="Started"
ms ms-drbd_01 drbd_01 \
meta master-max="1" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true" target-role="Started"
ms ms-drbd_02 drbd_02 \
meta master-max="1" master-node-max="1" clone-max="2"
clone-node-max="1" notify="true" target-role="Started"
clone fencing pdu \
meta target-role="Started"
location PrimaryNode-drbd_01 ms-drbd_01 100: node-01
location PrimaryNode-drbd_02 ms-drbd_02 100: node-02
location PrimaryNode-pbx_service_01 pbx_service_01 200: node-01
location PrimaryNode-pbx_service_02 pbx_service_02 200: node-02
location SecondaryNode-drbd_01 ms-drbd_01 0: node-03
location SecondaryNode-drbd_02 ms-drbd_02 0: node-03
location SecondaryNode-pbx_service_01 pbx_service_01 10: node-03
location SecondaryNode-pbx_service_02 pbx_service_02 10: node-03
location fencing-on-node-01 fencing 1: node-01
location fencing-on-node-02 fencing 1: node-02
location fencing-on-node-03 fencing 1: node-03
colocation fs_01-on-drbd_01 inf: fs_01 ms-drbd_01:Master
colocation fs_02-on-drbd_02 inf: fs_02 ms-drbd_02:Master
order pbx_service_01-after-drbd_01 inf: ms-drbd_01:promote
pbx_service_01:start
order pbx_service_02-after-drbd_02 inf: ms-drbd_02:promote
pbx_service_02:start
property $id="cib-bootstrap-options" \
dc-version="1.1.3-9c2342c0378140df9bed7d192f2b9ed157908007" \
cluster-infrastructure="Heartbeat" \
symmetric-cluster="false" \
stonith-enabled="true" \
last-lrm-refresh="1288188633"
rsc_defaults $id="rsc-options" \
resource-stickiness="1000"
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems