Him

On Fri, Feb 29, 2008 at 04:59:51PM +0100, Adrian Chapela wrote:
> Hello,
>
> I attached my newest config.

Which version do you run? Apparently, Andrew improved multistate
resources in 2.1.3.

Thanks,

Dejan

> This config has a clone resource, a master/slave resource and a "normal" 
> resource. I have two nodes (debian and semsor10) and I have serious 
> problems to understand why happens some situations.
>
> I think this is a normal situation:
>
> Node: semsor10 (2658f3c0-66a9-4348-b598-b9ff49769be7): online
> Node: debian (965ec846-da89-468a-b31e-9eea5af911e5): online
>
> Resource Group: IP_ADDR
>    IPaddr      (heartbeat::ocf:IPaddr):        Started semsor10
> Master/Slave Set: MySQL_Server
>    mysqld-child:0      (heartbeat::ocf:mysql_slave_master):    Master 
> semsor10
>    mysqld-child:1      (heartbeat::ocf:mysql_slave_master):    Started 
> debian
> Clone Set: pingd
>    pingd-child:0       (heartbeat::ocf:pingd): Started semsor10
>    pingd-child:1       (heartbeat::ocf:pingd): Started debian
>
> If I started semsor10 first, Semsor10 went to master well. Then I started 
> debian and in a moment, all resources disappeared from crm_mon. All except 
> pingd clone. Then mysql resource is restarted and resources are in sample 
> out above. IP_ADDR goes down because location constraint, I think but what 
> about mysql master/slave ?? Why is it going down or making a new 
> negociation ?? If I have default-resource-stickiness set to INFINITY I have 
> the same result. I want if a resource is master on a node, this resource 
> must remains on that node, what is the option to do that ??
>
> Thank you!
>

>  <cib admin_epoch="1" have_quorum="false" ignore_dtd="false" num_peers="0" 
> cib_feature_revision="1.3" generated="false" epoch="7" num_updates="68" 
> cib-last-written="Fri Feb 29 13:26:41 2008">
>    <configuration>
>      <crm_config>
>        <cluster_property_set id="cps1">
>          <attributes>
>            <nvpair id="transition_idle_timeout" name="cluster-delay" 
> value="120s"/>
>            <nvpair id="symmetric_cluster" name="symmetric-cluster" 
> value="true"/>
>            <nvpair id="no_quorum_policy" name="no-quorum-policy" 
> value="stop"/>
>            <nvpair id="stop-orphan-resources" name="stop-orphan-resources" 
> value="true"/>
>            <nvpair id="stop-orphan-actions" name="stop-orphan-actions" 
> value="true"/>
>          </attributes>
>        </cluster_property_set>
>        <cluster_property_set id="cib-bootstrap-options">
>          <attributes>
>            <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" 
> value="2.1.3-node: 552305612591183b1628baa5bc6e903e0f1e26a3"/>
>            <nvpair id="stonith-action" name="stonith-action" 
> value="poweroff"/>
>            <nvpair id="cib-bootstrap-options-default-resource-stickiness" 
> name="default-resource-stickiness" value="50"/>
>            <nvpair 
> id="cib-bootstrap-options-default-resource-failure-stickiness" 
> name="default-resource-failure-stickiness" value="-INFINITY"/>
>          </attributes>
>        </cluster_property_set>
>      </crm_config>
>      <nodes>
>        <node id="2658f3c0-66a9-4348-b598-b9ff49769be7" uname="semsor10" 
> type="normal"/>
>        <node id="965ec846-da89-468a-b31e-9eea5af911e5" uname="debian" 
> type="normal"/>
>      </nodes>
>      <resources>
>        <group id="IP_ADDR" restart_type="restart">
>          <primitive class="ocf" provider="heartbeat" type="IPaddr" 
> id="IPaddr">
>            <operations>
>              <op id="4" interval="5s" name="monitor" timeout="5s"/>
>              <op id="5" name="start" timeout="5s"/>
>              <op id="6" name="stop" timeout="5s"/>
>            </operations>
>            <instance_attributes id="ia1">
>              <attributes>
>                <nvpair id="ip5" name="ip" value="192.168.18.1"/>
>                <nvpair id="ip6" name="netmask" value="24"/>
>                <nvpair id="ip7" name="gw" value="192.168.18.254"/>
>                <nvpair id="ip8" name="nic" value="ha1"/>
>              </attributes>
>            </instance_attributes>
>            <meta_attributes id="IPaddr:0_meta_attrs">
>              <attributes>
>                <nvpair name="target_role" id="IPaddr:0_metaattr_target_role" 
> value="started"/>
>              </attributes>
>            </meta_attributes>
>          </primitive>
>        </group>
>        <master_slave id="MySQL_Server">
>          <instance_attributes id="mysql_server_1">
>            <attributes>
>              <nvpair id="mysql_server_nv1" name="clone_max" value="2"/>
>              <nvpair id="mysql_server_nv2" name="clone_node_max" value="1"/>
>              <nvpair id="mysql_server_nv3" name="master_max" value="1"/>
>              <nvpair id="mysql_server_nv4" name="master_node_max" value="1"/>
>            </attributes>
>          </instance_attributes>
>          <primitive id="mysqld-child" class="ocf" type="mysql_slave_master" 
> provider="heartbeat">
>            <operations>
>              <op id="mysqld-child-monitor" name="monitor" interval="20s" 
> timeout="40s" prereq="nothing"/>
>              <op id="mysqld-child-start" name="start" prereq="nothing"/>
>              <op name="monitor" id="mysql-child-start-Slave" interval="10s" 
> timeout="10s" role="Slave"/>
>              <op name="monitor" id="mysql-child-start-Master" interval="5s" 
> timeout="10s" role="Master"/>
>            </operations>
>          </primitive>
>        </master_slave>
>        <clone id="pingd">
>          <instance_attributes id="pingd">
>            <attributes>
>              <nvpair id="pingd-clone_node_max" name="clone_node_max" 
> value="1"/>
>            </attributes>
>          </instance_attributes>
>          <primitive id="pingd-child" provider="heartbeat" class="ocf" 
> type="pingd">
>            <operations>
>              <op id="pingd-child-monitor" name="monitor" interval="20s" 
> timeout="40s" prereq="nothing"/>
>              <op id="pingd-child-start" name="start" prereq="nothing"/>
>            </operations>
>            <instance_attributes id="pingd_inst_attr">
>              <attributes>
>                <nvpair id="pingd-dampen" name="dampen" value="5s"/>
>                <nvpair id="pingd-multiplier" name="multiplier" value="100"/>
>              </attributes>
>            </instance_attributes>
>          </primitive>
>        </clone>
>      </resources>
>      <constraints>
>        <rsc_location id="my_resource:loc:debian" rsc="IP_ADDR">
>          <rule id="my_resource:connected:rule" score="-INFINITY" 
> boolean_op="or">
>            <expression id="my_resource:connected:expr:undefined" 
> attribute="pingd" operation="not_defined"/>
>            <expression id="my_resource:connected:expr:zero" attribute="pingd" 
> operation="lte" value="0"/>
>            <expression id="my_resource:connected:expr:mysql_is_running" 
> attribute="mysql_is_running" operation="ne" value="1"/>
>          </rule>
>        </rsc_location>
>        <rsc_location id="loc:mysql_slave_master" rsc="MySQL_Server">
>          <rule id="loc:mysql_slave_master:semsor10" role="master" score="100">
>            <expression id="loca:mysql_slave_master:semsor10" 
> attribute="#uname" operation="eq" value="semsor10"/>
>          </rule>
>          <rule id="loc:mysql_slave_master:debian" role="master" score="50">
>            <expression id="loca:mysql_slave_master:debian" attribute="#uname" 
> operation="eq" value="debian"/>
>          </rule>
>        </rsc_location>
>      </constraints>
>    </configuration>
>  </cib>

> _______________________________________________
> Linux-HA mailing list
> [email protected]
> http://lists.linux-ha.org/mailman/listinfo/linux-ha
> See also: http://linux-ha.org/ReportingProblems
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to