Hi all. Please help.
I have node1 & node2.
Node1 has network interface named bond0.
Node2 has network interface named eth1.
When Node1 is the only started node - all is ok. But when I start Node2 after
that - makes resource takeover.
Strange is that in Heartbeat logs of Node2 there is a strings:
IPaddr2[4478]: 2008/11/21_18:03:23 ERROR: Failed: /usr/lib/heartbeat/findif
192.168.0.14/30/bond0/ . Parameter error.
crmd[4430]: 2008/11/21_18:03:23 WARN: process_lrm_event:lrm.c LRM operation (4)
stop_0 on IPaddr2_1 Error: (1) unknown error
IPaddr2[4486]: 2008/11/21_18:03:23 INFO: /sbin/ip -f inet addr add
192.168.0.14/30 brd 192.168.0.15 dev eth1 label eth1:0
IPaddr2[4486]: 2008/11/21_18:03:23 INFO: /sbin/ip link set eth1 up
IPaddr2[4486]: 2008/11/21_18:03:23 INFO: /usr/lib/heartbeat/send_arp -i 200 -r
5 -p /var/run/heartbeat/rsctmp/send_arp/send_arp-192.168.0.14 eth1 192.168.0.14
auto 192.168.0.14 ffffffffffff
So, Node2 tryeis to takeover bond0 (which does not exists on Node2, it jas eth1
instead). And then it brings up eth1 - that is ok.
So strange things are:
1. Why does Node2 make takeover after heartbet start on it, while Node1 is
absolutely ok
2. Why does Node2 monitor bond0, while it is interface for Node1.
Here is my config:
<cib have_quorum="true" cib_feature_revision="1.3" generated="true"
num_peers="2" admin_epoch="0" epoch="29" num_updates="604"
cib-last-written="Fri Nov 21 18:03:25 2008
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair value="true" id="cib-bootstrap-options-symmetric_cluster"
name="symmetric_cluster"/>
<nvpair value="stop" id="cib-bootstrap-options-no_quorum_policy"
name="no_quorum_policy"/>
<nvpair value="0"
id="cib-bootstrap-options-default_resource_stickiness"
name="default_resource_stickiness"/>
<nvpair value="0"
id="cib-bootstrap-options-default_resource_failure_stickiness"
name="default_resource_failure_stickiness"/>
<nvpair value="false" id="cib-bootstrap-options-stonith_enabled"
name="stonith_enabled"/>
<nvpair value="reboot" id="cib-bootstrap-options-stonith_action"
name="stonith_action"/>
<nvpair value="true"
id="cib-bootstrap-options-stop_orphan_resources" name="stop_orphan_resources"/>
<nvpair value="true" id="cib-bootstrap-options-stop_orphan_actions"
name="stop_orphan_actions"/>
<nvpair value="false" id="cib-bootstrap-options-remove_after_stop"
name="remove_after_stop"/>
<nvpair value="true" id="cib-bootstrap-options-short_resource_names"
name="short_resource_names"/>
<nvpair value="5min"
id="cib-bootstrap-options-transition_idle_timeout"
name="transition_idle_timeout"/>
<nvpair value="5s" id="cib-bootstrap-options-default_action_timeout"
name="default_action_timeout"/>
<nvpair value="true" id="cib-bootstrap-options-is_managed_default"
name="is_managed_default"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node type="normal" uname="deb-1"
id="fa6315c1-b57a-4666-84eb-9c7756cb5f8c"/>
<node type="normal" uname="deb-2"
id="8a874e02-7190-4cd9-83c6-56155339ece9"/>
</nodes>
<resources>
<group id="group_1">
<primitive class="ocf" provider="heartbeat" type="IPaddr2"
id="IPaddr2_1">
<operations>
<op timeout="5s" id="IPaddr2_1_mon" name="monitor" interval="5s"/>
</operations>
<instance_attributes id="IPaddr2_1_inst_attr">
<attributes>
<nvpair value="192.168.0.14" id="IPaddr2_1_attr_0" name="ip"/>
<nvpair value="30" id="IPaddr2_1_attr_1" name="netmask"/>
<nvpair value="bond0" id="IPaddr2_1_attr_2" name="nic"/>
<nvpair value="0" id="IPaddr2_1_attr_3" name="iflabel"/>
</attributes>
</instance_attributes>
</primitive>
</group>
<group id="group_2">
<primitive class="ocf" provider="heartbeat" type="IPaddr2"
id="IPaddr2_2">
<operations>
<op timeout="5s" id="IPaddr2_2_mon" name="monitor" interval="5s"/>
</operations>
<instance_attributes id="IPaddr2_2_inst_attr">
<attributes>
<nvpair value="192.168.0.14" id="IPaddr2_2_attr_0" name="ip"/>
<nvpair value="30" id="IPaddr2_2_attr_1" name="netmask"/>
<nvpair value="eth1" id="IPaddr2_2_attr_2" name="nic"/>
<nvpair value="0" id="IPaddr2_2_attr_3" name="iflabel"/>
</attributes>
</instance_attributes>
</primitive>
</group>
</resources>
<constraints>
<rsc_location id="rsc_location_group_1" rsc="group_1">
<rule score="INFINITY" id="allow_location_group_1">
<expression operation="eq" value="deb-1" attribute="#uname"
id="allow_location_group_1_expr"/>
</rule>
<rule score="-INFINITY" id="deny_location_group_1">
<expression operation="ne" value="deb-1" attribute="#uname"
id="deny_location_group_1_expr"/>
</rule>
</rsc_location>
<rsc_location id="rsc_location_group_2" rsc="group_2">
<rule score="INFINITY" id="allow_location_group_2">
<expression operation="eq" value="deb-2" attribute="#uname"
id="allow_location_group_2_expr"/>
</rule>
<rule score="-INFINITY" id="deny_location_group_2">
<expression operation="ne" value="deb-2" attribute="#uname"
id="deny_location_group_2_expr"/>
</rule>
</rsc_location>
<rsc_location id="rsc_master_group_1" rsc="group_1">
<rule score="INFINITY" id="master_rule_group_1" role="master">
<expression operation="eq" value="deb-1" attribute="#uname"
id="master_rule_group_1_expr"/>
</rule>
</rsc_location>
<rsc_location id="rsc_slave_group_2" rsc="group_2">
<rule score="INFINITY" id="slave_rule_group_2" role="slave">
<expression operation="eq" value="deb-2" attribute="#uname"
id="slave_rule_group_2_expr"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
Thank you.
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems