On Nov 26, 2007, at 11:56 AM, Adrian Revill wrote:

Hi
Im trying to set up a DRBD/Heartbeat pair using V2

I have a working V1 configuration, where if the active node (nodeA) is stopped/dies the standby node (nodeB) takes over the DRBD resources. Then when the stopped/dead node (nodeA) is restarted, the resources stay on nodeB till manually forced to change over.

I have converted this configuration to V2 and i cannot fully disable auto failback.

I have set the default_stickiness to INFINITY

no you don't. well you may have in the past, but thats not what the config below says now.


using -INFINITY tells the cluster to _ALWAYS_ move the resource from where its running.
to disable auto_failback, set it to INFINITY instead.

and this works fine if i use the standby flag to control the resource failover. But if i stop and restart the heartbeat on the active node the resources failover to the standby node then fail back to the active node.

It seems Heartbeat remembers where a resource was first run, then will always try and return it there, even if the default_stickiness is set.

How can i make Heartbeat V2 to not failback when a resource is re- started?



<cib admin_epoch="0" generated="false" have_quorum="false" ignore_dtd="false" num_peers="2" cib_feature_revision="1.3" epoch="8" num_updates="1" cib-last-written="Mon Nov 26 09:42:34 2007">
 <configuration>
   <crm_config>
     <cluster_property_set id="cib-bootstrap-options">
       <attributes>
<nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="true"/> <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="stop"/> <nvpair name="default-resource-stickiness" id="cib- bootstrap-options-default-resource-stickiness" value="-INFINITY"/> <nvpair id="cib-bootstrap-options-default-resource-failure- stickiness" name="default-resource-failure-stickiness" value="0"/> <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/> <nvpair id="cib-bootstrap-options-stonith-action" name="stonith-action" value="reboot"/> <nvpair id="cib-bootstrap-options-stop-orphan-resources" name="stop-orphan-resources" value="true"/> <nvpair id="cib-bootstrap-options-stop-orphan-actions" name="stop-orphan-actions" value="true"/> <nvpair id="cib-bootstrap-options-remove-after-stop" name="remove-after-stop" value="false"/> <nvpair id="cib-bootstrap-options-short-resource-names" name="short-resource-names" value="true"/> <nvpair id="cib-bootstrap-options-transition-idle-timeout" name="transition-idle-timeout" value="5min"/> <nvpair id="cib-bootstrap-options-default-action-timeout" name="default-action-timeout" value="15s"/> <nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"/>
       </attributes>
     </cluster_property_set>
   </crm_config>
   <nodes>
<node id="46e5126a-e064-44f4-adde-8434c8a10dc0" uname="boot2" type="normal"/> <node id="b0995799-b215-4a64-b575-cc858f95569e" uname="boot1" type="normal"> <instance_attributes id="nodes-b0995799-b215-4a64-b575- cc858f95569e">
         <attributes>
<nvpair id="standby-b0995799-b215-4a64-b575-cc858f95569e" name="standby" value="off"/>
         </attributes>
       </instance_attributes>
     </node>
   </nodes>
   <resources>
     <group id="group_1">
<primitive class="ocf" id="IPaddr_172_1_0_5" provider="heartbeat" type="IPaddr">
         <operations>
<op id="IPaddr_172_1_0_5_mon" interval="5s" name="monitor" timeout="5s"/>
         </operations>
         <instance_attributes id="IPaddr_172_1_0_5_inst_attr">
           <attributes>
<nvpair id="IPaddr_172_1_0_5_attr_0" name="ip" value="172.1.0.5"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="heartbeat" id="drbddisk_2" provider="heartbeat" type="drbddisk">
         <operations>
<op id="drbddisk_2_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
         <instance_attributes id="drbddisk_2_inst_attr">
           <attributes>
             <nvpair id="drbddisk_2_attr_1" name="1" value="r0"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="heartbeat" id="drbddisk_3" provider="heartbeat" type="drbddisk">
         <operations>
<op id="drbddisk_3_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
         <instance_attributes id="drbddisk_3_inst_attr">
           <attributes>
             <nvpair id="drbddisk_3_attr_1" name="1" value="r1"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="ocf" id="Filesystem_4" provider="heartbeat" type="Filesystem">
         <operations>
<op id="Filesystem_4_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
         <instance_attributes id="Filesystem_4_inst_attr">
           <attributes>
<nvpair id="Filesystem_4_attr_0" name="device" value="/ dev/drbd0"/> <nvpair id="Filesystem_4_attr_1" name="directory" value="/ha"/> <nvpair id="Filesystem_4_attr_2" name="fstype" value="ext3"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="ocf" id="Filesystem_5" provider="heartbeat" type="Filesystem">
         <operations>
<op id="Filesystem_5_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
         <instance_attributes id="Filesystem_5_inst_attr">
           <attributes>
<nvpair id="Filesystem_5_attr_0" name="device" value="/ dev/drbd1"/> <nvpair id="Filesystem_5_attr_1" name="directory" value="/shazam"/> <nvpair id="Filesystem_5_attr_2" name="fstype" value="ext3"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="lsb" id="nfslock_6" provider="heartbeat" type="nfslock">
         <operations>
<op id="nfslock_6_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
       </primitive>
<primitive class="heartbeat" id="nfsinit_7" provider="heartbeat" type="nfsinit">
         <operations>
<op id="nfsinit_7_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
       </primitive>
<primitive class="ocf" id="IPaddr_172_16_10_37" provider="heartbeat" type="IPaddr">
         <operations>
<op id="IPaddr_172_16_10_37_mon" interval="5s" name="monitor" timeout="5s"/>
         </operations>
         <instance_attributes id="IPaddr_172_16_10_37_inst_attr">
           <attributes>
<nvpair id="IPaddr_172_16_10_37_attr_0" name="ip" value="172.16.10.37"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="ocf" id="Xinetd_9" provider="heartbeat" type="Xinetd">
         <operations>
<op id="Xinetd_9_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
         <instance_attributes id="Xinetd_9_inst_attr">
           <attributes>
<nvpair id="Xinetd_9_attr_0" name="service" value="tftp"/>
           </attributes>
         </instance_attributes>
       </primitive>
<primitive class="lsb" id="dhcpd_10" provider="heartbeat" type="dhcpd">
         <operations>
<op id="dhcpd_10_mon" interval="120s" name="monitor" timeout="60s"/>
         </operations>
       </primitive>
     </group>
   </resources>
   <constraints>
     <rsc_location id="rsc_location_group_1" rsc="group_1">
       <rule id="prefered_rsc_location_group_1" score="100">
<expression attribute="#uname" id="prefered_location_group_1_expr" operation="eq" value="boot1"/> <expression attribute="#uname" id="0f841fe6-1d23-40ae- b973-87ee0c98e324" operation="eq" value="boot2"/>
       </rule>
     </rsc_location>
   </constraints>
 </configuration>
</cib>

_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to