hi,
  all

  I have a 2 nodes active/passive cluster,and

cib.xml:
  <cib admin_epoch="0" have_quorum="true" num_peers="2" 
cib_feature_revision="1.3" ccm_transition="2" generated="true" dc_uuid
="ccf55a75-b061-4722-b173-4cd1e0f5fed9" epoch="92" num_updates="2126" 
cib-last-written="Tue Nov 21 14:28:51 2006">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <attributes>
           <nvpair id="cib-bootstrap-options-symmetric_cluster" 
name="symmetric_cluster" value="true"/>
           <nvpair id="cib-bootstrap-options-no_quorum_policy" 
name="no_quorum_policy" value="stop"/>
           <nvpair id="cib-bootstrap-options-default_resource_stickiness" 
name="default_resource_stickiness" value="INFINITY"
/>
           <nvpair 
id="cib-bootstrap-options-default_resource_failure_stickiness" 
name="default_resource_failure_stickiness" 
value="-INFINITY"/>
           <nvpair id="cib-bootstrap-options-stonith_enabled" 
name="stonith_enabled" value="false"/>
           <nvpair id="cib-bootstrap-options-stonith_action" 
name="stonith_action" value="reboot"/>
           <nvpair id="cib-bootstrap-options-stop_orphan_resources" 
name="stop_orphan_resources" value="true"/>
           <nvpair id="cib-bootstrap-options-stop_orphan_actions" 
name="stop_orphan_actions" value="true"/>
           <nvpair id="cib-bootstrap-options-remove_after_stop" 
name="remove_after_stop" value="false"/>
           <nvpair id="cib-bootstrap-options-short_resource_names" 
name="short_resource_names" value="true"/>
           <nvpair id="cib-bootstrap-options-transition_idle_timeout" 
name="transition_idle_timeout" value="5min"/>
           <nvpair id="cib-bootstrap-options-default_action_timeout" 
name="default_action_timeout" value="50s"/>
           <nvpair id="cib-bootstrap-options-is_managed_default" 
name="is_managed_default" value="true"/>
         </attributes>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="c034a071-07b9-46e6-afce-ca01fc9bf126" uname="iqsvrd-suse-003" 
type="normal"/>
       <node id="ccf55a75-b061-4722-b173-4cd1e0f5fed9" uname="iqsvrd-suse-005" 
type="normal"/>
     </nodes>
     <resources>
       <group id="group_1">
         <primitive class="ocf" id="IPaddr_192_168_67_224" provider="heartbeat" 
type="IPaddr">
           <operations>
             <op id="IPaddr_192_168_67_224_mon" interval="5s" name="monitor" 
timeout="50s"/>
           </operations>
           <instance_attributes id="IPaddr_192_168_67_224_inst_attr">
             <attributes>
               <nvpair id="IPaddr_192_168_67_224_attr_0" name="ip" 
value="192.168.67.224"/>
               <nvpair id="IPaddr_192_168_67_224_attr_1" name="netmask" 
value="32"/>
               <nvpair id="IPaddr_192_168_67_224_attr_2" name="nic" 
value="eth0"/>
             </attributes>
           </instance_attributes>
         </primitive>
         <primitive class="lsb" id="DbsvrdMonitor.sh_2" provider="heartbeat" 
type="DbsvrdMonitor.sh">
           <operations>
             <op id="DbsvrdMonitor.sh_2_stop" name="stop" timeout="30s"/>
             <op id="DbsvrdMonitor.sh_2_start" name="start" timeout="30s"/>
             <op id="DbsvrdMonitor.sh_2_mon" interval="10s" name="monitor" 
timeout="60s"/>
           </operations>
         </primitive>
       </group>
     </resources>
     <constraints>
       <rsc_location id="rsc_location_group_1" rsc="group_1">
         <rule id="prefered_location_group_1" score="100">
           <expression attribute="#uname" id="prefered_location_group_1_expr" 
operation="eq" value="iqsvrd-suse-003"/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
 </cib>

ha.cf:
  crm 1
  debugfile /var/log/ha-debug
  logfile /var/log/ha-log
  logfacility     local0
  keepalive 10
  deadtime 60
  udpport 694
  bcast   eth0            # Linux
  ucast eth0 192.168.67.223
  node    iqsvrd-suse-005
  node    iqsvrd-suse-003
  ping 192.168.66.253
  respawn hacluster /usr/lib/heartbeat/pingd -m 50 -d 5s

  so, have realized functions:
  1.when iqsvrd-suse-003 and iqsvrd-suse-005 start heartbeat, the resource can 
run at iqsvrd-suse-003;
  2.then iqsvrd-suse-003 stop heartbeat,the resource can move to 
iqsvrd-suse-005;
  3.and iqsvrd-suse-003 start heartbeat again, the resuorce stay in 
iqsvrd-suse-005;
  this is good!!!

  but now,i want to realize:
  1.when iqsvrd-suse-003 and iqsvrd-suse-005 start heartbeat, the resource can 
run at iqsvrd-suse-003;
  2.then unplug the network cable from iqsvrd-suse-003, the resource can stop 
at iqsvrd-suse-003 right now,meanwhile,start at iqsvrd-suse-005;
  3.and plug back the cable to iqsvrd-suse-003,the resource can stay at 
iqsvrd-suse-005, don't move back to iqsvrd-suse-003;

  or
  1.when iqsvrd-suse-003 and iqsvrd-suse-005 start heartbeat, the resource can 
run at iqsvrd-suse-003;
  2.then unplug the network cable from iqsvrd-suse-005;
  3.and iqsvrd-suse-003 stop heartbeat, iqsvrd-suse-005 won't start resource;

  how can i do?
  Waiting and thanks for your reply.






Thanks & Regards,
JamesJue


_______________________________________________
Linux-HA mailing list
[email protected]
http://listas.linuxchix.org.br/mailman/listinfo/linux-ha

Responder a