Hello,

After upgrading to 2.1.3 (built from CentOS5 src rpm, since in their release
rpm crm_mon was not compiled with ncurses...), the cluster stopped honouring
my location constraints so that my DRBD resource prefers node1 (meaning,
after starting node1 again, all resources stay on node2, which is not what i
want, previously it would all go back to node1). I then noticed the cluster
changed to symmetrical (and i didn't do it..), so i changed that setting
back to false. But if i do that, DRBD and dependent resources will not start
at all.

Effectively, crm_verify shows:

crm_verify[26625]: 2008/03/29_02:37:48 WARN: native_color: Resource drbd0:0
cannot run anywhere
crm_verify[26625]: 2008/03/29_02:37:48 WARN: native_color: Resource drbd0:1
cannot run anywhere
crm_verify[26625]: 2008/03/29_02:37:48 WARN: native_color: Resource fs0
cannot run anywhere
crm_verify[26625]: 2008/03/29_02:37:48 WARN: native_color: Resource vm-svc1
cannot run anywhere
crm_verify[26625]: 2008/03/29_02:37:48 WARN: native_color: Resource vm-ebx1
cannot run anywhere

So, any thoughts on why i get this behaviour now with 2.1.3. Do I have to do
something differently?

Below is my cib.xml:

 <cib generated="false" admin_epoch="0" have_quorum="true"
ignore_dtd="false" num_peers="0" cib_feature_revision="1.3" epoch="218"
num_updates="1" cib-last-written="Sat Mar 29 02:09:04 2008"
ccm_transition="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <attributes>
           <nvpair name="last-lrm-refresh"
id="cib-bootstrap-options-last-lrm-refresh" value="1206756315"/>
           <nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="False"/>
           <nvpair
id="cib-bootstrap-options-default-resource-failure-stickiness"
name="default-resource-failure-stickiness" value="-INFINITY"/>
           <nvpair id="cib-bootstrap-options-default-resource-stickiness"
name="default-resource-stickiness" value="INFINITY"/>
           <nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="2.1.3-node: 552305612591183b1628baa5bc6e903e0f1e26a3"/>
         </attributes>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node uname="node2.loc" type="normal"
id="e58c06b8-6300-4a83-8bf3-b8beb2f908a5">
         <instance_attributes
id="nodes-e58c06b8-6300-4a83-8bf3-b8beb2f908a5">
           <attributes>
             <nvpair name="standby"
id="standby-e58c06b8-6300-4a83-8bf3-b8beb2f908a5" value="off"/>
           </attributes>
         </instance_attributes>
       </node>
       <node uname="node1.loc" type="normal"
id="1d1a7816-7025-4537-8353-30cfd95e7bea">
         <instance_attributes
id="nodes-1d1a7816-7025-4537-8353-30cfd95e7bea">
           <attributes>
             <nvpair name="standby"
id="standby-1d1a7816-7025-4537-8353-30cfd95e7bea" value="off"/>
           </attributes>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="mail0" type="MailTo" class="ocf" provider="heartbeat">
         <instance_attributes id="ia-mail0">
           <attributes>
             <nvpair id="ia-mail0-1" name="email" value="[EMAIL PROTECTED]"/>
             <nvpair id="ia-mail0-2" name="subject" value="CLUSTER"/>
           </attributes>
         </instance_attributes>
       </primitive>
       <master_slave id="ms-drbd0">
         <meta_attributes id="ma-ms-drbd0">
           <attributes>
             <nvpair id="ma-ms-drbd0-1" name="clone_max" value="2"/>
             <nvpair id="ma-ms-drbd0-2" name="clone_node_max" value="1"/>
             <nvpair id="ma-ms-drbd0-3" name="master_max" value="1"/>
             <nvpair id="ma-ms-drbd0-4" name="master_node_max" value="1"/>
             <nvpair id="ma-ms-drbd0-5" name="notify" value="yes"/>
             <nvpair id="ma-ms-drbd0-6" name="globally_unique"
value="false"/>
             <nvpair id="ma-ms-drbd0-7" name="target_role" value="started"/>
           </attributes>
         </meta_attributes>
         <primitive id="drbd0" class="ocf" provider="heartbeat" type="drbd">
           <instance_attributes id="ia-drbd0">
             <attributes>
               <nvpair id="ia-drbd0-1" name="drbd_resource" value="drbd0"/>
             </attributes>
           </instance_attributes>
         </primitive>
       </master_slave>
       <primitive id="fs0" type="Filesystem" class="ocf"
provider="heartbeat">
         <meta_attributes id="ma-fs0">
           <attributes/>
         </meta_attributes>
         <instance_attributes id="ia-fs0">
           <attributes>
             <nvpair id="ia-fs0-1" name="fstype" value="ext3"/>
             <nvpair id="ia-fs0-2" name="directory" value="/drbd0"/>
             <nvpair id="ia-fs0-3" name="device" value="/dev/drbd0"/>
             <nvpair id="ia-fs0-4" name="target_role" value="started"/>
           </attributes>
         </instance_attributes>
       </primitive>
       <primitive id="vm-svc1" class="ocf" type="Xen" provider="heartbeat">
         <operations>
           <op id="op-vm-svc1-1" name="monitor" interval="10s"
timeout="120s" prereq="nothing"/>
           <op id="op-vm-svc1-2" name="start" timeout="60s"
start_delay="0"/>
           <op id="op-vm-svc1-3" name="stop" timeout="300s"/>
         </operations>
         <instance_attributes id="ia-vm-svc1">
           <attributes>
             <nvpair id="ia-vm-svc1-1" name="xmfile"
value="/etc/xen/vm/vm-svc1"/>
             <nvpair id="ia-vm-svc1-2" name="target_role" value="started"/>
           </attributes>
         </instance_attributes>
         <meta_attributes id="ma-vm-svc1">
           <attributes>
             <nvpair id="ma-vm-svc1-1" name="allow_migrate" value="false"/>
           </attributes>
         </meta_attributes>
       </primitive>
       <primitive id="vm-ebx1" class="ocf" type="Xen" provider="heartbeat">
         <operations>
           <op id="op-vm-ebx1-1" name="monitor" interval="10s"
timeout="120s" prereq="nothing"/>
           <op id="op-vm-ebx1-2" name="start" timeout="60s"
start_delay="0"/>
           <op id="op-vm-ebx1-3" name="stop" timeout="300s"/>
         </operations>
         <instance_attributes id="ia-vm-ebx1">
           <attributes>
             <nvpair id="ia-vm-ebx1-1" name="xmfile"
value="/etc/xen/vm/vm-ebx1"/>
             <nvpair id="ia-vm-ebx1-2" name="target_role" value="started"/>
           </attributes>
         </instance_attributes>
         <meta_attributes id="ma-vm-ebx1">
           <attributes>
             <nvpair id="ma-vm-ebx1-1" name="allow_migrate" value="false"/>
           </attributes>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints>
       <rsc_location id="mail0-prefnode" rsc="mail0">
         <rule id="r1-mail0-prefnode" score="100">
           <expression attribute="#uname" id="e1-r1-mail0-prefnode"
operation="eq" value="node1.loc"/>
         </rule>
       </rsc_location>
       <rsc_location id="ms-drbd0-prefnode" rsc="ms-drbd0">
         <rule id="r1-drbd0-prefnode" score="100" boolean_op="and"
role="master">
           <expression attribute="#uname" id="e1-ms-drbd0-prefnode"
operation="eq" value="node1.loc"/>
         </rule>
       </rsc_location>
       <rsc_colocation id="fs0_on_drbd0" from="fs0" to="ms-drbd0"
to_role="master" score="INFINITY"/>
       <rsc_colocation id="vm-svc1_on_fs0" from="vm-svc1" to="fs0"
score="INFINITY"/>
       <rsc_colocation id="vm-ebx1_on_fs0" from="vm-ebx1" to="fs0"
score="INFINITY"/>
       <rsc_order id="fs0_after_drbd0" from="fs0" to="ms-drbd0"
to_action="promote" action="start"/>
       <rsc_order id="vm-svc1_after_fs0" from="vm-svc1" action="start"
type="after" to="fs0"/>
       <rsc_order id="vm-ebx1_after_fs0" from="vm-ebx1" action="start"
type="after" to="fs0"/>
     </constraints>
   </configuration>
 </cib> 


--
tks

_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to