I dont see any ordering constraints telling the cluster to wait until a DRBD had been promoted to Master before starting the group. So the cluster probably tried to mount the filesystem too soon.
Also, you're using the drbd master/slave resource _and_ drbddisk... not a good idea. Choose one. On Thu, Aug 28, 2008 at 01:14, Craig Ayliffe <[EMAIL PROTECTED]> wrote: > Oops sorry of course I forgot to include that. > > <resources> > <group ordered="true" collocated="true" id="rg_vmail"> > <primitive class="heartbeat" type="drbddisk" provider="heartbeat" > id="drbddisk_vmail"> > <instance_attributes id="7b55a172-8ba0-4ee9-a207-c7223c49cf20"> > <attributes> > <nvpair name="target_role" value="started" > id="e635e94e-e773-4546-a621-16a785152576"/> > <nvpair name="1" value="vmail" > id="c5d219bc-5adc-4a0d-aa8a-de5075480a01"/> > </attributes> > </instance_attributes> > <instance_attributes id="8ed3aafc-bc31-48b2-9085-d2c5fbde24db"> > <attributes> > <nvpair name="target_role" value="started" > id="bf586f30-e531-4718-8479-64f9ac63f715"/> > <nvpair name="1" value="vmail" > id="286b41eb-b2cf-40af-b4c4-f1dd3aa76d54"/> > </attributes> > </instance_attributes> > </primitive> > <primitive class="ocf" type="Filesystem" provider="heartbeat" > id="fs_vmail"> > <instance_attributes id="d8ed72a1-7653-46a2-9801-06935ee5efe4"> > <attributes> > <nvpair name="device" value="/dev/drbd0" > id="7af4d92a-f676-4a9d-9aea-0cad3f4e2d84"/> > <nvpair name="directory" value="/data/vmail" > id="51c6a074-3683-407d-9e83-b1f91b87019c"/> > <nvpair name="type" value="ext2" > id="c5994a9e-2efa-4999-a44c-83890b247904"/> > </attributes> > </instance_attributes> > <instance_attributes id="eee9e723-2c59-4b6b-ae03-cf2566e878bd"> > <attributes> > <nvpair name="device" value="/dev/drbd0" > id="cce7a82b-0593-447d-a012-21a1f97a9447"/> > <nvpair name="directory" value="/data/vmail" > id="ea838625-4d71-4a43-81f6-a7aadf7b60f2"/> > <nvpair name="type" value="ext2" > id="1914b11e-4196-4a68-8b92-5b746a4cc9b0"/> > </attributes> > </instance_attributes> > </primitive> > <primitive class="ocf" type="IPaddr2" provider="heartbeat" > id="ip_vmail"> > <instance_attributes id="b08960e6-3a76-48eb-8398-20de9a554302"> > <attributes> > <nvpair name="ip" value="X.X.X.220" > id="fa8ad586-12ea-4b3b-b838-318f29cbf283"/> > <nvpair name="nic" value="bond0" > id="8fd1caf1-6be1-4f15-93e7-c07ec3b04d41"/> > <nvpair name="cidr_netmask" value="26" > id="c4e96b3e-9394-496d-a9ae-cd357c102111"/> > <nvpair name="broadcast" value="X.X.X.255" > id="57cb9a90-8b89-42cd-bf5b-cc7fea020ff0"/> > </attributes> > </instance_attributes> > <instance_attributes id="9af1502b-9118-47a7-bba8-6dfae0e2f21f"> > <attributes> > <nvpair name="ip" value="X.X.X.220" > id="05a96335-ba81-4bf2-93d2-f7497f852e60"/> > <nvpair name="nic" value="bond0" > id="175264fe-791d-4895-9e9f-74ba4581728a"/> > <nvpair name="broadcast" value="X.X.X.255" > id="911fcab0-01a2-4f65-8a92-2470217d47b9"/> > </attributes> > </instance_attributes> > </primitive> > <primitive class="lsb" type="dovecot" provider="heartbeat" > id="dovecot"/> > <primitive class="lsb" type="postfix" provider="heartbeat" > id="postfix"/> > <primitive class="lsb" type="mailbox-daemon" provider="heartbeat" > id="mailbox-daemon"/> > </group> > <master_slave id="ms_drbd_vweb"> > <meta_attributes id="ma_ms_drbd_vweb"> > <attributes> > <nvpair id="ma_ms_drbd_vweb_1" name="clone_max" value="2"/> > <nvpair id="ma_ms_drbd_vweb_2" name="clone_node_max" > value="1"/> > <nvpair id="ma_ms_drbd_vweb_3" name="master_max" value="1"/> > <nvpair id="ma_ms_drbd_vweb_4" name="master_node_max" > value="1"/> > <nvpair id="ma_ms_drbd_vweb_5" name="notify" value="yes"/> > <nvpair id="ma_ms_drbd_vweb_6" name="globally_unique" > value="false"/> > <nvpair id="ma_ms_drbd_vweb_7" name="target_role" > value="started"/> > </attributes> > </meta_attributes> > <primitive id="drbd_vweb" class="ocf" provider="heartbeat" > type="drbd"> > <instance_attributes id="ia_drbd_vweb"> > <attributes> > <nvpair id="ia_drbd_vweb_1" name="drbd_resource" > value="vweb"/> > </attributes> > </instance_attributes> > <operations> > <op id="op_drbd_vweb_1" name="monitor" interval="59s" > timeout="10s" role="Master"/> > <op id="op_drbd_vweb_2" name="monitor" interval="60s" > timeout="10s" role="Slave"/> > </operations> > </primitive> > </master_slave> > <group ordered="true" collocated="true" id="rg_vweb"> > <meta_attributes id="ma_rg_vweb"> > <attributes> > <nvpair id="ma_rg_vweb-1" name="target_role" value="started"/> > </attributes> > </meta_attributes> > <primitive class="ocf" provider="heartbeat" type="Filesystem" > id="fs_vweb"> > <instance_attributes id="ia_fs_vweb"> > <attributes> > <nvpair id="ia_fs_vweb_1" name="device" value="/dev/drbd1"/> > <nvpair id="ia_fs_vweb_2" name="directory" > value="/data/vweb"/> > <nvpair id="ia_fs_vweb_3" name="type" value="ext3"/> > </attributes> > </instance_attributes> > <instance_attributes id="fs_vweb"> > <attributes> > <nvpair id="fs_vweb-target_role" name="target_role" > value="started"/> > </attributes> > </instance_attributes> > </primitive> > <primitive class="ocf" type="IPaddr2" provider="heartbeat" > id="ip_vweb"> > <instance_attributes id="ia_ip_vweb"> > <attributes> > <nvpair id="ia_ip_vweb_1" name="ip" value="X.X.X.230"/> > <nvpair id="ia_ip_vweb_2" name="nic" value="bond0"/> > <nvpair id="ia_ip_vweb_3" name="cidr_netmask" value="26"/> > <nvpair id="ia_ip_vweb_4" name="broadcast" > value="X.X.X.255"/> > </attributes> > </instance_attributes> > </primitive> > <primitive class="lsb" type="apache2" provider="heartbeat" > id="apache2"/> > <primitive class="lsb" type="mysql" provider="heartbeat" > id="mysql"/> > </group> > </resources> > <constraints> > <rsc_location id="location-ip_vmail" rsc="ip_vmail"> > <rule id="ip_vmail-rule-1" score="100"> > <expression attribute="#uname" operation="eq" > value="syd-hosting-01" id="d1ed8ce9-b18c-4802-bdb3-9f70eff7070c"/> > </rule> > </rsc_location> > <rsc_location id="location-mailbox-daemon" rsc="mailbox-daemon"> > <rule id="mailbox-daemon-rule-1" score="100"> > <expression attribute="#uname" operation="eq" > value="syd-hosting-01" id="43658d37-61ec-4518-ae51-1d38d813fb6b"/> > </rule> > </rsc_location> > <rsc_location id="location-postfix" rsc="postfix"> > <rule id="postfix-rule-1" score="100"> > <expression attribute="#uname" operation="eq" > value="syd-hosting-01" id="abdfecf6-85ae-4735-8265-30d8f7d43cfb"/> > </rule> > </rsc_location> > <rsc_location id="location-dovecot" rsc="dovecot"> > <rule id="dovecot-rule-1" score="100"> > <expression attribute="#uname" operation="eq" > value="syd-hosting-01" id="53ef533f-1602-4472-ad8c-36e8e807f567"/> > </rule> > </rsc_location> > <rsc_location id="location-rg_vmail" rsc="rg_vmail"> > <rule id="rg_vmail-rule-1" score="100"> > <expression attribute="#uname" operation="eq" > value="syd-hosting-01" id="e6863f28-2504-4617-9a78-e3943dc776fd"/> > </rule> > </rsc_location> > <rsc_order id="vweb_after_drbd" from="rg_vweb" action="start" > to="ms_drbd_vweb" to_action="promote" type="after"/> > <rsc_colocation id="vweb_on_drbd" to="ms_drbd_vweb" to_role="master" > from="rg_vweb" score="INFINITY"/> > <rsc_location id="drbd_vweb_master_on_syd-hosting-02" > rsc="ms_drbd_vweb"> > <rule id="drbd_vweb_master_on_syd-hosting-02_rule1" role="master" > score="100"> > <expression id="drbd_vweb_master_on_syd-hosting-02_expression1" > attribute="#uname" operation="eq" value="syd-hosting-02"/> > </rule> > </rsc_location> > <rsc_location id="location_fs_vweb" rsc="fs_vweb"> > <rule id="fs_vweb_on_syd-hosting-02_rule1" score="100"> > <expression id="fs_vweb_on_syd-hosting-02_expression1" > attribute="#uname" operation="eq" value="syd-hosting-02"/> > </rule> > </rsc_location> > <rsc_location id="location_apache2" rsc="apache2"> > <rule id="apache2_on_syd-hosting-02_rule1" score="100"> > <expression id="apache2_on_syd-hosting-02_expression1" > attribute="#uname" operation="eq" value="syd-hosting-02"/> > </rule> > </rsc_location> > <rsc_location id="location_mysql" rsc="mysql"> > <rule id="mysql_on_syd-hosting-02_rule1" score="100"> > <expression id="mysql_on_syd-hosting-02_expression1" > attribute="#uname" operation="eq" value="syd-hosting-02"/> > </rule> > </rsc_location> > <rsc_location id="location_ip_vweb" rsc="ip_vweb"> > <rule id="ip_vweb_on_syd-hosting-02_rule1" score="100"> > <expression id="ip_vweb_on_syd-hosting-02_expression1" > attribute="#uname" operation="eq" value="syd-hosting-02"/> > </rule> > </rsc_location> > <rsc_location id="cli-prefer-rg_vweb" rsc="rg_vweb"> > <rule id="cli-prefer-rule-rg_vweb" score="INFINITY"> > <expression id="cli-prefer-expr-rg_vweb" attribute="#uname" > operation="eq" value="syd-hosting-02" type="string"/> > </rule> > </rsc_location> > </constraints> > </configuration> > > > On Thu, Aug 28, 2008 at 2:49 AM, Ben Beuchler <[EMAIL PROTECTED]> wrote: > >> Please include your resources and constraints. >> >> -Ben >> >> On Wed, Aug 27, 2008 at 3:27 AM, Craig Ayliffe <[EMAIL PROTECTED]> wrote: >> > Hi, >> > >> > Having troubles configuring HeartBeat with DRBD using the OCF agents in a >> > Master/Slave setup. >> > >> > Trying the get 2 nodes running with one node acting as Master, and >> running >> > all services ie apache2/mysql from one server. >> > >> > DRDB seems to get started ok, but it doesn't mount the filesystem. >> > (PS: I am also using another DRBD instance that is using the drbddisk >> method >> > - this one is working) >> > >> > >> > crm_mon returns the following >> > >> > Node: syd-hosting-02 (522a9cb2-7a03-4600-988c-b9c7a977421a): online >> > Node: syd-hosting-01 (49671172-3296-4c25-aa8b-38cb4cea923e): online >> > >> > Resource Group: rg_vmail >> > drbddisk_vmail (heartbeat:drbddisk): Started syd-hosting-01 >> > fs_vmail (heartbeat::ocf:Filesystem): Started syd-hosting-01 >> > ip_vmail (heartbeat::ocf:IPaddr2): Started syd-hosting-01 >> > dovecot (lsb:dovecot): Started syd-hosting-01 >> > postfix (lsb:postfix): Started syd-hosting-01 >> > mailbox-daemon (lsb:mailbox-daemon): Started syd-hosting-01 >> > Master/Slave Set: ms_drbd_vweb >> > drbd_vweb:0 (heartbeat::ocf:drbd): Master syd-hosting-02 >> > drbd_vweb:1 (heartbeat::ocf:drbd): Started syd-hosting-01 >> > >> > Failed actions: >> > fs_vweb_start_0 (node=syd-hosting-02, call=26, rc=1): Error >> > fs_vweb_start_0 (node=syd-hosting-01, call=25, rc=1): Error >> > >> > [EMAIL PROTECTED]:~# crm_verify -L -V >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: unpack_rsc_op: Processing >> > failed op fs_vweb_start_0 on syd-hosting-02: Error >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: unpack_rsc_op: Compatability >> > handling for failed op fs_vweb_start_0 on syd-hosting-02 >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: unpack_rsc_op: Processing >> > failed op fs_vweb_start_0 on syd-hosting-01: Error >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: unpack_rsc_op: Compatability >> > handling for failed op fs_vweb_start_0 on syd-hosting-01 >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: native_color: Resource >> fs_vweb >> > cannot run anywhere >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: native_color: Resource >> ip_vweb >> > cannot run anywhere >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: native_color: Resource >> apache2 >> > cannot run anywhere >> > crm_verify[10998]: 2008/08/27_18:19:54 WARN: native_color: Resource mysql >> > cannot run anywhere >> > Warnings found during check: config may not be valid >> > >> > >> > What do these errors mean??? >> > >> > -- >> > Craig Ayliffe >> > _______________________________________________ >> > Linux-HA mailing list >> > [email protected] >> > http://lists.linux-ha.org/mailman/listinfo/linux-ha >> > See also: http://linux-ha.org/ReportingProblems >> > >> _______________________________________________ >> Linux-HA mailing list >> [email protected] >> http://lists.linux-ha.org/mailman/listinfo/linux-ha >> See also: http://linux-ha.org/ReportingProblems >> > > > > -- > Craig Ayliffe > _______________________________________________ > Linux-HA mailing list > [email protected] > http://lists.linux-ha.org/mailman/listinfo/linux-ha > See also: http://linux-ha.org/ReportingProblems > _______________________________________________ Linux-HA mailing list [email protected] http://lists.linux-ha.org/mailman/listinfo/linux-ha See also: http://linux-ha.org/ReportingProblems
