Hi everyone,

many thanks to Yan Gao for his fast answer of my HB Gui related
questions. Now, after the initial configuration with the GUI, I have
some problem with the fine tuning.

The following scenario:

We defined two resource groups, one groups consists of a virtual ip
address, an apache web server and an application (the foo group). The
other one consists a virtual ip address and an application (the bar
group). We want that, if any member of the resource fails, the
resource group is switched to the 2nd server (server-2.domain.com) and
the 1st server (server-1.domain.com) is marked with a constraint so
that the service will not be switched to the 1st node, until someone
has checked the 2nd server.

We tried to set up a monitoring process, with prereq "nothing" and
on_fail "fence". But it does not work as expected. When we kill the
application on the first node, it is switched to the 2nd server. So
far, so good. But when we stop the application on the 1st server
(while the service is down on the 2nd server), heartbeat stops both
resource groups, the whole foo and the whole bar resource group, and
moves all services to the first node, even though no constraint is
defined an no dependency can be found between the two resource groups.

I know that node fencing is implemented via STONITH, and that is not
what we want. Neither do we want to restart the complete node, if a
service or resource group fails.

Hope that someone can resolve this mystery for me. I attached the
cib.xml for further analysis.

Best regards,
  ~./Jason
 <cib admin_epoch="0" have_quorum="true" ignore_dtd="false" num_peers="2" cib_feature_revision="1.3" generated="true" ccm_transition="14" dc_uuid="56eaf6ad-c3cf-4f75-8955-8b4850599836" epoch="24" num_updates="1620" cib-last-written="Mon Feb  9 15:40:27 2009">
   <configuration>
     <crm_config/>
     <nodes>
       <node uname="server-1.domain.com" type="normal" id="fb0cb3cd-5ee5-417a-a94f-ed6e921fb093">
         <instance_attributes id="nodes-fb0cb3cd-5ee5-417a-a94f-ed6e921fb093">
           <attributes>
             <nvpair name="standby" id="standby-fb0cb3cd-5ee5-417a-a94f-ed6e921fb093" value="false"/>
           </attributes>
         </instance_attributes>
       </node>
       <node uname="server-2.domain.com" type="normal" id="56eaf6ad-c3cf-4f75-8955-8b4850599836">
         <instance_attributes id="nodes-56eaf6ad-c3cf-4f75-8955-8b4850599836">
           <attributes>
             <nvpair name="standby" id="standby-56eaf6ad-c3cf-4f75-8955-8b4850599836" value="false"/>
           </attributes>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <group id="foo_group" collocated="false" ordered="true">
         <primitive class="ocf" type="IPaddr2" provider="heartbeat" id="foo_ip" is_managed="true">
           <instance_attributes id="foo_ip_instance_attrs">
             <attributes>
               <nvpair id="7f76283a-acab-4013-800f-70505e001fe2" name="ip" value="172.31.46.33"/>
               <nvpair id="foo_ip_target_role" name="target_role" value="started"/>
             </attributes>
           </instance_attributes>
         </primitive>
         <primitive id="resource_mdba" class="lsb" type="mdba" provider="heartbeat" is_managed="true">
           <instance_attributes id="resource_mdba_instance_attrs">
             <attributes>
               <nvpair id="resource_mdba_target_role" name="target_role" value="started"/>
             </attributes>
           </instance_attributes>
         </primitive>
         <instance_attributes id="foo_group_instance_attrs">
           <attributes>
             <nvpair id="foo_group_target_role" name="target_role" value="started"/>
           </attributes>
         </instance_attributes>
         <primitive id="resource_foo" class="lsb" type="foo" provider="heartbeat" is_managed="true">
           <instance_attributes id="resource_foo_instance_attrs">
             <attributes/>
           </instance_attributes>
         </primitive>
         <primitive class="ocf" type="apache" provider="heartbeat" id="resource_apache" is_managed="true">
           <instance_attributes id="resource_apache_instance_attrs">
             <attributes>
               <nvpair id="40027e45-9849-4a57-84f1-fbbd885e7205" name="configfile" value="/etc/httpd/conf/httpd.conf"/>
               <nvpair id="resource_apache_target_role" name="target_role" value="started"/>
             </attributes>
           </instance_attributes>
           <operations/>
         </primitive>
       </group>
       <group collocated="false" ordered="true" id="bar_group">
         <primitive class="ocf" type="IPaddr2" provider="heartbeat" is_managed="true" id="bar_ip">
           <instance_attributes id="bar_ip_instance_attrs">
             <attributes>
               <nvpair id="362a828e-5700-46e8-b12a-dc3d53554925" name="ip" value="172.31.46.34"/>
             </attributes>
           </instance_attributes>
           <operations>
             <op id="c7f88814-e547-42e3-a4f6-2d9049f0216c" name="monitor" description="monitor" interval="10s" timeout="20s" start_delay="5s" disafooed="false" role="Started" prereq="nothing" on_fail="fence"/>
           </operations>
         </primitive>
         <instance_attributes id="bar_group_instance_attrs">
           <attributes/>
         </instance_attributes>
         <primitive class="lsb" type="bar" provider="heartbeat" is_managed="true" id="resource_bar">
           <instance_attributes id="resource_bar_instance_attrs">
             <attributes/>
           </instance_attributes>
           <operations>
             <op name="monitor" description="monitor" interval="15" timeout="15" disafooed="false" role="Started" prereq="nothing" on_fail="fence" id="b8f3ec81-e584-4a92-82a6-b902a7bb7832" start_delay="5"/>
           </operations>
         </primitive>
         <primitive class="lsb" type="orbd" provider="heartbeat" is_managed="true" id="resource_orbd">
           <instance_attributes id="resource_orbd_instance_attrs">
             <attributes/>
           </instance_attributes>
           <operations>
             <op name="monitor" description="monitor" interval="10" timeout="15" start_delay="5" disafooed="false" role="Started" prereq="nothing" id="b1001aba-0430-4595-9363-ab637bee68c0" on_fail="fence"/>
           </operations>
         </primitive>
       </group>
     </resources>
     <constraints>
       <rsc_location id="place_foo_pref" rsc="foo_group">
         <rule id="prefered_place_foo_pref" score="100">
           <expression attribute="#uname" id="0742bd21-98f4-4364-8747-2a8140c942b4" operation="eq" value="server-2.domain.com"/>
         </rule>
       </rsc_location>
       <rsc_location id="place_foo_fallback" rsc="foo_group">
         <rule id="prefered_place_foo_fallback" score="50">
           <expression attribute="#uname" id="d842c483-da1e-4358-8f6a-61500b373ebe" operation="eq" value="server-1.domain.com"/>
         </rule>
       </rsc_location>
       <rsc_location id="place_bar_pref" rsc="bar_group">
         <rule id="prefered_place_bar_pref" score="100">
           <expression attribute="#uname" id="ec4f4f58-d0fd-4f27-b156-2faa7f8b424e" operation="eq" value="server-1.domain.com"/>
         </rule>
       </rsc_location>
       <rsc_location id="place_bar_fallback" rsc="bar_group">
         <rule id="prefered_place_bar_fallback" score="50">
           <expression attribute="#uname" id="c9cdc258-9609-4a1c-82a3-105d5d1a52be" operation="eq" value="server-2.domain.com"/>
         </rule>
       </rsc_location>
       <rsc_location id="cli-prefer-bar_group" rsc="bar_group">
	 <rule id="cli-prefer-rule-bar_group" score="INFINITY">
           <expression id="cli-prefer-expr-bar_group" attribute="#uname" operation="eq" value="server-1.domain.com" type="string"/>
         </rule>
       </rsc_location>
     </constraints>
   </configuration>
 </cib>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to