Woo-hoo I can finally give an answer on this list! :)

You need to set the no-quorum-policy to ignore:

# crm configure property no-quorum-policy=ignore

Mine is now running very nice indeed. The DRBD-pacemaker1.0 howto is superb and 
if you follow it and understand it then you should be fine.

-----Original Message-----
From: [email protected] 
[mailto:[email protected]] On Behalf Of Andre Eckstein
Sent: 02 May 2009 21:47
To: [email protected]
Subject: [Linux-HA] Problems With SLES11 + DRBD

Hi,

i do have the same setup, SLES 11 with the HASI extension.
I'm also not able to get drbd running correctly with pacemaker.
I followed the DRBD Howto on the pacemaker homepage step by step,
the resources start correctly and when i deactivate one node within the hb_gui 
all resources migrate succesfully to the other node. (including, drbd and a 
corresponding filesystem)
When i kill one of the cluster nodes the resources don't migrate.
If i kill one of the nodes it is shown as offline in the hb gui.

Have you solved your problem in the meantime ?

Am i missing something or is this a problem with the drbd ra in the hasi 
extension ?


Here's my cib xml,

<cib validate-with="pacemaker-1.0" crm_feature_set="3.0.1" have-quorum="1" 
dc-uuid="hacluster1" admin_epoch="0" epoch="30" num_updates="19">
  <configuration>
    <crm_config>
      <cluster_property_set id="cib-bootstrap-options">
        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" 
value="1.0.3-0080ec086ae9c20ad5c4c3562000c0ad68374f0a"/>
        <nvpair id="cib-bootstrap-options-expected-quorum-votes" 
name="expected-quorum-votes" value="2"/>
        <nvpair name="last-lrm-refresh" 
id="cib-bootstrap-options-last-lrm-refresh" value="1241241230"/>
      </cluster_property_set>
    </crm_config>
    <nodes>
      <node uname="hacluster1" type="normal" id="hacluster1">
        <instance_attributes id="nodes-hacluster1">
          <nvpair name="standby" id="standby-hacluster1" value="false"/>
        </instance_attributes>
      </node>
      <node id="hacluster2" uname="hacluster2" type="normal"/>
    </nodes>
    <resources>
      <master id="ms-drbd0">
        <meta_attributes id="ms-drbd0-meta_attributes">
          <nvpair id="ms-drbd0-meta_attributes-clone-max" name="clone-max" 
value="2"/>
          <nvpair id="ms-drbd0-meta_attributes-notify" name="notify" 
value="true"/>
          <nvpair id="ms-drbd0-meta_attributes-globally-unique" 
name="globally-unique" value="false"/>
          <nvpair id="ms-drbd0-meta_attributes-target-role" name="target-role" 
value="started"/>
        </meta_attributes>
        <primitive class="ocf" id="drbd0" provider="heartbeat" type="drbd">
          <instance_attributes id="drbd0-instance_attributes">
            <nvpair id="drbd0-instance_attributes-drbd_resource" 
name="drbd_resource" value="drbd0"/>
          </instance_attributes>
          <operations>
            <op id="drbd0-monitor-60s" interval="60s" name="monitor" 
role="Master" timeout="30s"/>
            <op id="drbd0-monitor-60s-0" interval="60s" name="monitor" 
role="Slave" timeout="30s"/>
          </operations>
        </primitive>
      </master>
      <primitive class="ocf" id="fs0" provider="heartbeat" type="Filesystem">
        <instance_attributes id="fs0-instance_attributes">
          <nvpair id="fs0-instance_attributes-fstype" name="fstype" 
value="ext3"/>
          <nvpair id="fs0-instance_attributes-directory" name="directory" 
value="/vmfs"/>
          <nvpair id="fs0-instance_attributes-device" name="device" 
value="/dev/drbd0"/>
        </instance_attributes>
        <meta_attributes id="fs0-meta_attributes">
          <nvpair id="fs0-meta_attributes-target-role" name="target-role" 
value="started"/>
        </meta_attributes>
      </primitive>
      <primitive class="stonith" id="prim_stonith" type="external/ssh">
        <meta_attributes id="prim_stonith-meta_attributes">
          <nvpair id="nvpair-0cf56a77-c86d-4328-a60c-8b27183d7273" 
name="target-role" value="Started"/>
        </meta_attributes>
        <operations id="prim_stonith-operations">
          <op id="prim_stonith-op-monitor-15" interval="15" name="monitor" 
start-delay="15" timeout="15"/>
        </operations>
        <instance_attributes id="prim_stonith-instance_attributes">
          <nvpair id="nvpair-8fcba37c-9581-4b99-9107-d42985e4c3d9" 
name="hostlist" value="hacluster1 hacluster2"/>
        </instance_attributes>
      </primitive>
    </resources>
    <constraints>
      <rsc_location id="ms-drbd0-placement" rsc="ms-drbd0">
        <rule boolean-op="and" id="ms-drbd0-placement-rule" score="-INFINITY">
          <expression attribute="#uname" id="ms-drbd0-placement-expression" 
operation="ne" value="hacluster1"/>
          <expression attribute="#uname" id="ms-drbd0-placement-expression-0" 
operation="ne" value="hacluster2"/>
        </rule>
      </rsc_location>
      <rsc_location id="ms-drbd0-master-on-hacluster1" rsc="ms-drbd0">
        <rule id="ms-drbd0-master-on-hacluster1-rule" role="master" score="100">
          <expression attribute="#uname" 
id="ms-drbd0-master-on-hacluster1-expression" operation="eq" 
value="hacluster1"/>
        </rule>
      </rsc_location>
      <rsc_order first="ms-drbd0" first-action="promote" 
id="ms-drbd0-before-fs0" score="INFINITY" then="fs0" then-action="start"/>
      <rsc_colocation id="fs0-on-ms-drbd0" rsc="fs0" score="INFINITY" 
with-rsc="ms-drbd0" with-rsc-role="Master"/>
    </constraints>
    <rsc_defaults/>
    <op_defaults/>
  </configuration>
  Best regards,

Andre


-- 
Psssst! Schon vom neuen GMX MultiMessenger gehört? Der kann`s mit allen: 
http://www.gmx.net/de/go/multimessenger01
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to