Don't even think about trying to use master/slave resources with
anything less than heartbeat 2.1.4 (and preferably Pacemaker 1.0.1)
2.0.7 is far too old/broken in this respect.

On Mon, Dec 1, 2008 at 12:35, guillermo <[EMAIL PROTECTED]> wrote:
> I am having the following problem. Despite I was try to solve it , I
> can't do it correctly. I am configuring hearbeat v2  and drbd but  other
> services, all of them, are workingAwell execpt for drbd, in drbd I need
> to set up a partition, the problem is that the partition sets up before
> starting to master the node in which I need to set it up, as I said it
> before, I have tried different ways to configure the constraint in
> particular rsc_orden and I can't manage it so far. When the heartbeat
> starts, it begins the included drbd services, but before promoting to
> master the node in wich I indicate to do it, it begins the file system
> mount in the same node, as the node couldn't promote to the master the
> services stop to pass to the other node, and when it passes to other
> node is not on the master they stop again. I am using, Debian stable,
> Heartbeat 2.0.7-2, drbd 0.7.21-4.
>
> This is may cib.xml
>
>      <cib generated="true" admin_epoch="0" have_quorum="true"
> num_peers="2" cib_feature_revision="1.3" epoch="127" num_updates="3328"
> cib-last-written="Mon Dec  1 08:04:44 2008" ccm_transition="2"
> dc_uuid="0f4f1743-aebd-4282-b038-f8ac1cdffa51">
>   <configuration>
>     <crm_config>
>       <cluster_property_set id="cib-bootstrap-options">
>         <attributes>
>           <nvpair id="cib-bootstrap-options-symmetric_cluster"
> name="symmetric_cluster" value="true"/>
>           <nvpair id="cib-bootstrap-options-no_quorum_policy"
> name="no_quorum_policy" value="stop"/>
>           <nvpair
> id="cib-bootstrap-options-default_resource_stickiness"
> name="default_resource_stickiness" value="0"/>
>           <nvpair
> id="cib-bootstrap-options-default_resource_failure_stickiness"
> name="default_resource_failure_stickiness" value="0"/>
>           <nvpair id="cib-bootstrap-options-stonith_enabled"
> name="stonith_enabled" value="false"/>
>           <nvpair id="cib-bootstrap-options-stonith_action"
> name="stonith_action" value="reboot"/>
>           <nvpair id="cib-bootstrap-options-stop_orphan_resources"
> name="stop_orphan_resources" value="true"/>
>           <nvpair id="cib-bootstrap-options-stop_orphan_actions"
> name="stop_orphan_actions" value="true"/>
>           <nvpair id="cib-bootstrap-options-remove_after_stop"
> name="remove_after_stop" value="false"/>
>           <nvpair id="cib-bootstrap-options-short_resource_names"
> name="short_resource_names" value="true"/>
>           <nvpair id="cib-bootstrap-options-transition_idle_timeout"
> name="transition_idle_timeout" value="5min"/>
>           <nvpair id="cib-bootstrap-options-default_action_timeout"
> name="default_action_timeout" value="10s"/>
>           <nvpair id="cib-bootstrap-options-is_managed_default"
> name="is_managed_default" value="true"/>
>         </attributes>
>       </cluster_property_set>
>     </crm_config>
>     <nodes>
>       <node id="0f4f1743-aebd-4282-b038-f8ac1cdffa51" uname="pbx-2"
> type="normal"/>
>       <node id="1e405e9c-c76c-47cd-967b-68c646c6ac27" uname="pbx-1"
> type="normal"/>
>     </nodes>
>     <resources>
>       <master_slave id="ms-r0">
>         <meta_attributes id="ma-ms-r0">
>           <attributes>
>             <nvpair id="ma-ms-r0-1" name="clone_max" value="2"/>
>             <nvpair id="ma-ms-r0-2" name="clone_node_max" value="1"/>
>             <nvpair id="ma-ms-r0-3" name="master_max" value="1"/>
>             <nvpair id="ma-ms-r0-4" name="master_node_max" value="1"/>
>             <nvpair id="ma-ms-r0-5" name="notify" value="yes"/>
>             <nvpair id="ma-ms-r0-6" name="globally_unique"
> value="false"/>
>             <nvpair id="ma-ms-r0-7" name="target_role"
> value="started"/>
>           </attributes>
>         </meta_attributes>
>         <primitive id="r0" class="ocf" provider="heartbeat"
> type="drbd">
>           <instance_attributes id="ia-r0">
>             <attributes>
>               <nvpair id="ia-r0-1" name="drbd_resource" value="r0"/>
>             </attributes>
>           </instance_attributes>
>           <operations>
>             <op id="op-r0-1" name="monitor" interval="59s"
> timeout="10s" role="Master"/>
>             <op id="op-r0-2" name="monitor" interval="60s"
> timeout="10s" role="Slave"/>
>           </operations>
>         </primitive>
>       </master_slave>
>       <group id="group_1">
>         <primitive class="ocf" id="IPaddr_192_168_123_205"
> provider="heartbeat" type="IPaddr">
>           <operations>
>             <op id="IPaddr_192_168_123_205_mon" interval="5s"
> name="monitor" timeout="5s"/>
>           </operations>
>           <instance_attributes id="IPaddr_192_168_123_205_inst_attr">
>             <attributes>
>               <nvpair id="IPaddr_192_168_123_205_attr_0" name="ip"
> value="192.168.123.205"/>
>               <nvpair id="IPaddr_192_168_123_205_attr_1" name="netmask"
> value="24"/>
>               <nvpair id="IPaddr_192_168_123_205_attr_2" name="nic"
> value="eth1"/>
>             </attributes>
>           </instance_attributes>
>         </primitive>
>         <primitive class="lsb" id="apache2_2" provider="heartbeat"
> type="apache2">
>           <operations>
>             <op id="apache2_2_mon" interval="120s" name="monitor"
> timeout="60s"/>
>           </operations>
>         </primitive>
>         <primitive class="lsb" id="asterisk_3" provider="heartbeat"
> type="asterisk">
>           <operations>
>             <op id="asterisk_3_mon" interval="120s" name="monitor"
> timeout="60s"/>
>           </operations>
>         </primitive>
>       </group>
>       <primitive class="ocf" provider="heartbeat" type="Filesystem"
> id="fs0">
>         <instance_attributes id="ia-group_1">
>           <attributes>
>             <nvpair id="ia-fs0-1" name="fstype" value="ext3"/>
>             <nvpair id="ia-fs0-2" name="directory"
> value="/replicated"/>
>             <nvpair id="ia-fs0-3" name="device" value="/dev/drbd0"/>
>           </attributes>
>         </instance_attributes>
>       </primitive>
>     </resources>
>     <constraints>
>       <rsc_location id="rsc_location_group_1" rsc="group_1">
>         <rule id="prefered_location_group_1" score="100">
>           <expression attribute="#uname"
> id="prefered_location_group_1_expr" operation="eq" value="pbx-2"/>
>         </rule>
>       </rsc_location>
>       <rsc_location id="r0_master_on_pbx-2" rsc="ms-r0">
>         <rule id="r0_master_on_pbx-2_rule1" role="master" score="100">
>           <expression id="r0_master_on_pbx-2_expression1"
> attribute="#uname" operation="eq" value="pbx-2"/>
>         </rule>
>       </rsc_location>
>       <rsc_order id="r0_before_fs0" to_action="start" to="fs0"
> type="before" action="promote" from="ms-r0"/>
>       <rsc_colocation id="fs0_on_r0" to="ms-r0" to_role="master"
> from="fs0" score="100"/>
>     </constraints>
>   </configuration>
>  </cib>
>
>
>
> And this is what it give me back when i carry out the ptest -L
>
> /usr/lib/heartbeat/ptest -L element rsc_order: validity error : No
> declaration for attribute to_action of element rsc_order
> element rsc_order: validity error : Value "promote" for attribute action
> of rsc_order is not among the enumerated set
> element rsc_colocation: validity error : No declaration for attribute
> to_role of element rsc_colocation
> element rsc_colocation: validity error : Value "100" for attribute score
> of rsc_colocation is not among the enumerated set
>  <transition_graph global_timeout="10s" transition_id="0"/>
>
>
> Any help greatly appreciated, thanks in advance;
>
> Guillermo
>
> _______________________________________________
> Linux-HA mailing list
> [email protected]
> http://lists.linux-ha.org/mailman/listinfo/linux-ha
> See also: http://linux-ha.org/ReportingProblems
>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to