> On Wed, Dec 12, 2007 at 05:23:28PM -0000, Rodrigo Pereira wrote:
> >
> > >
> > > On Tue, Dec 11, 2007 at 06:42:59PM -0000, Rodrigo Borges Pereira
> > > wrote:
> > > > Hello,
> > > >
> > > > I just want to confirm this. From what i've learned so far,
> > > STONITH is
> > > > relevant only to avoid data corruption when using shared
> > > storage. So,
> > > > is STONITH relevant when i'm using a non-shared setup with
> > > Heartbeat
> > > > and XEN VM on top of DRBD? Xen is using file images created
> > > on top of
> > > > a ext3 FS on top of DRBD, and there should not be any
> > > concurrent access.
> > >
> > > Here you say yourself that you need STONITH :)
> >
> > I guess i shouldn't have used the "should" word :) I think the key
> > fact here is that i am running the virtual machine on just one node
> > and on top of DRBD, in any given moment. A concurrency
> scenario would
> > happen if i'd have two virtual machines using the same virtual disk
> > file, running each on it's node, and over DRBD set to
> primary/primary.
> > Is this something that could really happen in case of
> > mis-configuration of the cluster and/or lack of STONITH?
>
> Yes, in case there's a split brain (lack of communication
> between the two nodes). But I'm not sure I understand your
> configuration:
> your cluster consists of two Xen resources, right?
>
The only case i see the split brain occuring is with human error, with
cables disconnection or misconfig on the servers that affects networking. If
that happens, i won't be able to use SSH STONITH anyway, right?
I'm attaching my current "work in progress" resources and constraints files,
to clarify my config.
tks again,
rodrigo
> Thanks,
>
> Dejan
>
> >
> > tks,
> > rodrigo
> >
> > >
> > > Thanks,
> > >
> > > Dejan
> > >
> >
> >
> > _______________________________________________
> > Linux-HA mailing list
> > [email protected]
> > http://lists.linux-ha.org/mailman/listinfo/linux-ha
> > See also: http://linux-ha.org/ReportingProblems
<constraints>
<!-- LOCATIONS -->
<rsc_location id="mail0-prefnode" rsc="mail0">
<rule id="r1-mail0-prefnode" score="100">
<expression attribute="#uname" id="e1-r1-mail0-prefnode" operation="eq"
value="node1"/>
</rule>
</rsc_location>
<rsc_location id="ms-drbd0-prefnode" rsc="ms-drbd0">
<rule id="r1-drbd0-prefnode" score="100" boolean_op="and" role="master">
<expression attribute="#uname" id="e1-ms-drbd0-prefnode" operation="eq"
value="node1"/>
</rule>
</rsc_location>
<!-- COLOCATIONS -->
<rsc_colocation id="fs0_on_drbd0" from="fs0" to="ms-drbd0" to_role="master"
score="INFINITY"/>
<rsc_colocation id="vm-svc1_on_fs0" from="vm-svc1" to="fs0" score="INFINITY"/>
<rsc_colocation id="vm-ebx1_on_fs0" from="vm-ebx1" to="fs0" score="INFINITY"/>
<!-- ORDERING -->
<rsc_order id="fs0_after_drbd0" from="fs0" to="ms-drbd0" to_action="promote"
action="start"/>
<rsc_order id="vm-svc1_after_fs0" from="vm-svc1" action="start" type="after"
to="fs0"/>
<rsc_order id="vm-ebx1_after_fs0" from="vm-ebx1" action="start" type="after"
to="fs0"/>
</constraints><resources>
<!-- MAIL NOTIFICATIONS -->
<primitive id="mail0" type="MailTo" class="ocf" provider="heartbeat">
<instance_attributes id="ia-mail0">
<attributes
<nvpair id="ia-mail0-1" name="email" value="root"/>
<nvpair id="ia-mail0-2" name="subject" value="CLUSTER"/>
</attributes>
</instance_attributes>
</primitive>
<!-- DRBD MASTER-SLAVE DEFINITION -->
<master_slave id="ms-drbd0">
<meta_attributes id="ma-ms-drbd0">
<attributes>
<nvpair id="ma-ms-drbd0-1" name="clone_max" value="2"/>
<nvpair id="ma-ms-drbd0-2" name="clone_node_max" value="1"/>
<nvpair id="ma-ms-drbd0-3" name="master_max" value="1"/>
<nvpair id="ma-ms-drbd0-4" name="master_node_max" value="1"/>
<nvpair id="ma-ms-drbd0-5" name="notify" value="yes"/>
<nvpair id="ma-ms-drbd0-6" name="globally_unique" value="false"/>
<nvpair id="ma-ms-drbd0-7" name="target_role" value="started"/>
</attributes>
</meta_attributes>
<primitive id="drbd0" class="ocf" provider="heartbeat" type="drbd">
<instance_attributes id="ia-drbd0">
<attributes>
<nvpair id="ia-drbd0-1" name="drbd_resource" value="drbd0"/>
</attributes>
</instance_attributes>
</primitive>
</master_slave>
<!-- EXT3 FILESYSTEM ON TOP OF DRBD DEVICE -->
<!-- CONTAINS XEN IMAGES -->
<primitive id="fs0" type="Filesystem" class="ocf" provider="heartbeat">
<meta_attributes id="ma-fs0">
<attributes>
<!-- <nvpair id="ma-fs0-1" name="target_role" value="started"/> -->
</attributes>
</meta_attributes>
<instance_attributes id="ia-fs0">
<attributes>
<nvpair id="ia-fs0-1" name="fstype" value="ext3"/>
<nvpair id="ia-fs0-2" name="directory" value="/drbd0"/>
<nvpair id="ia-fs0-3" name="device" value="/dev/drbd0"/>
<nvpair id="ia-fs0-4" name="target_role" value="started"/>
</attributes>
</instance_attributes>
</primitive>
<!-- XEN VM SERVICES INSTANCE 1 -->
<!-- STANDARD PARA-VIRTUAL -->
<primitive id="vm-svc1" class="ocf" type="Xen" provider="heartbeat">
<operations>
<op id="op-vm-svc1-1" name="monitor" interval="10s" timeout="60s"
prereq="nothing"/>
<op id="op-vm-svc1-2" name="start" timeout="60s" start_delay="0"/>
<op id="op-vm-svc1-3" name="stop" timeout="300s"/>
</operations>
<instance_attributes id="ia-vm-svc1">
<attributes>
<nvpair id="ia-vm-svc1-1" name="xmfile" value="/etc/xen/vm/vm-svc1"/>
<nvpair id="ia-vm-svc1-2" name="target_role" value="started"/>
</attributes>
</instance_attributes>
<meta_attributes id="ma-vm-svc1">
<attributes>
<nvpair id="ma-vm-svc1-1" name="allow_migrate" value="false"/>
</attributes>
</meta_attributes>
</primitive>
<!-- XEN VM DEMO INSTANCE 1 -->
<!-- HVM FULL VIRTUALIZATION // VMWARE SERVER STYLE -->
<primitive id="vm-ebx1" class="ocf" type="Xen" provider="heartbeat">
<operations>
<op id="op-vm-ebx1-1" name="monitor" interval="10s" timeout="60s"
prereq="nothing"/>
<op id="op-vm-ebx1-2" name="start" timeout="60s" start_delay="0"/>
<op id="op-vm-ebx1-3" name="stop" timeout="300s"/>
</operations>
<instance_attributes id="ia-vm-ebx1">
<attributes>
<nvpair id="ia-vm-ebx1-1" name="xmfile" value="/etc/xen/vm/vm-ebx1"/>
<nvpair id="ia-vm-ebx1-2" name="target_role" value="started"/>
</attributes>
</instance_attributes>
<meta_attributes id="ma-vm-ebx1">
<attributes>
<nvpair id="ma-vm-ebx1-1" name="allow_migrate" value="false"/>
</attributes>
</meta_attributes>
</primitive>
</resources>_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems