Hi all! I have some troubles with HA V2.1.3 on SLES10 SP1, two-node cluster with 1 resource group=2 resources. Intended is forced failover of the group on the third failure of any resource in the group; one node is preferred over the other (see attached configuration). After start are resources running on the preferred node (demo), as expected, but with 1 failcount and with following score (script "showscores"): Resource Score Node Stick. Failcount Fail.-Stickiness IPaddr_193_27_40_57 0 dbora 2 0 -3 IPaddr_193_27_40_57 2 demo 2 0 -3 ubis_udbmain_13 -INFINITY dbora 2 0 -3 ubis_udbmain_13 INFINITY demo 2 1 -3
Score of the first resource (IPaddr_193_27_40_57) is 2 as expected (group resource_stickiness=1) , but the second resource has score INFINITY - why ? Because of added colocation constraint for group ? Nikita Michalko AIP
<cib admin_epoch="0" epoch="2" generated="false" have_quorum="false"
ignore_dtd="false" num_peers="1" cib_feature_revision="2.0" num_updates="1"
cib-last-written="Thu Mar 6 15:21:33 2008">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="stop"/>
<nvpair id="cib-bootstrap-options-default-resource-stickiness"
name="default-resource-stickiness" value="2"/>
<nvpair
id="cib-bootstrap-options-default-resource-failure-stickiness"
name="default-resource-failure-stickiness" value="-3"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-stonith-action"
name="stonith-action" value="reboot"/>
<nvpair id="cib-bootstrap-options-startup-fencing"
name="startup-fencing" value="true"/>
<nvpair id="cib-bootstrap-options-stop-orphan-resources"
name="stop-orphan-resources" value="true"/>
<nvpair id="cib-bootstrap-options-stop-orphan-actions"
name="stop-orphan-actions" value="true"/>
<nvpair id="cib-bootstrap-options-remove-after-stop"
name="remove-after-stop" value="false"/>
<nvpair id="cib-bootstrap-options-short-resource-names"
name="short-resource-names" value="true"/>
<nvpair id="cib-bootstrap-options-transition-idle-timeout"
name="transition-idle-timeout" value="5min"/>
<nvpair id="cib-bootstrap-options-default-action-timeout"
name="default-action-timeout" value="110s"/>
<nvpair id="cib-bootstrap-options-is-managed-default"
name="is-managed-default" value="true"/>
<nvpair id="cib-bootstrap-options-cluster-delay"
name="cluster-delay" value="60s"/>
<nvpair id="cib-bootstrap-options-pe-error-series-max"
name="pe-error-series-max" value="-1"/>
<nvpair id="cib-bootstrap-options-pe-warn-series-max"
name="pe-warn-series-max" value="-1"/>
<nvpair id="cib-bootstrap-options-pe-input-series-max"
name="pe-input-series-max" value="-1"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="2.1.3-node: 552305612591183b1628baa5bc6e903e0f1e26a3"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh"
name="last-lrm-refresh" value="1204812151"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node id="7ce70870-4126-4bb7-b263-221a9e7efc7e" uname="dbora"
type="normal"/>
<node id="aa15721d-a88a-4ec8-9e01-cc7eeb780f79" uname="demo"
type="normal"/>
</nodes>
<resources>
<group id="group_1">
<meta_attributes id="ma-group1">
<attributes>
<nvpair name="target_role" id="ma-group1-1" value="started"/>
<nvpair name="resource_stickiness" id="ma-group1-2" value="1"/>
<nvpair name="resource_failure_stickiness" id="ma-group1-3"
value="-1"/>
</attributes>
</meta_attributes>
<primitive class="ocf" id="IPaddr_193_27_40_57" provider="heartbeat"
type="IPaddr">
<operations>
<op id="IPaddr_193_27_40_57_mon" interval="60s" name="monitor"
timeout="60s"/>
</operations>
<instance_attributes id="IPaddr_193_27_40_57_inst_attr">
<attributes>
<nvpair id="IPaddr_193_27_40_57_attr_0" name="ip"
value="193.27.40.57"/>
<nvpair id="IPaddr_193_27_40_57_attr_1" name="cidr_netmask"
value="26"/>
<nvpair id="IPaddr_193_27_40_57_attr_3" name="broadcast"
value="193.27.40.63"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="lsb" id="ubis_udbmain_13" provider="heartbeat"
type="ubis_udbmain">
<operations>
<op id="ubis_udbmain_13_mon" interval="120s" name="monitor"
timeout="110s"/>
</operations>
</primitive>
</group>
</resources>
<constraints>
<rsc_location id="rsc_location_group_1" rsc="group_1">
<rule id="prefered_location_group_1" score="1">
<expression attribute="#uname" id="prefered_location_group_1_expr"
operation="eq" value="demo"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
logfile /var/log/ha-log
debugfile /var/log/ha-debug
debug 0
logfacility local1
cluster AIP708
#use_logd yes
#udpport 694
udpport 708
bcast eth0
bcast eth1
coredumps true
auto_failback on
keepalive 3
warntime 10
deadtime 15
initdead 120
node demo
node dbora
compression bz2
traditional_compression false
crm yes
#autojoin any
#autojoin any|other
#stonith external/aipst /etc/ha.d/stonith.ssh
#stonith {stonith-device-type} {stonith-configuration-file}
##apiauth stonithd uid=root
#apiauth crmd uid=hacluster
#apiauth cib uid=hacluster
#respawn hacluster ccm
#respawn hacluster cib
#respawn root stonithd
#respawn root lrmd
#respawn hacluster crmd
#respawn hacluster /usr/lib64/heartbeat/ccm
#respawn hacluster /usr/lib64/heartbeat/ipfail
#respawn root /usr/lib64/heartbeat/hbagent
# uuidfrom file
# watchdog /dev/watchdog
# HA-Services dbora 193.27.40.56/26/193.27.40.63 192.168.163.56/24/192.168.163.255 ubis_up_mkctab ubis_nserv ubis_mserv demo 193.27.40.57/26/193.27.40.63 192.168.163.57/24/192.168.163.255 ubis_udbmain # Mit privatem NW - derzeit nicht möglich im Zi.17 ! #dbfix 193.27.40.56/26/193.27.40.63 192.168.163.56/26/193.27.40.63 ubis_up_mkctab ubis_nserv ubis_mserv #demo 193.27.40.57/26/193.27.40.63 192.168.163.57/26/192.168.163.63 ubis_udbmain # aipdemo 193.27.40.52/26/193.27.40.63 192.168.163.52/26/193.27.40.63 ubis_applmain -> das führt zu 4-fachen IP-Zuteilung : eth0,eth0:1,eth0:2,eth0:3 !! #dbfix 193.27.40.55/26/193.27.40.63 ubis_applmain #server1 193.27.40.181 192.168.163.181 ubis_up_mkctab ubis_nserv ubis_mserv ubis_fax #server1 193.27.40.53/26/193.27.40.63 192.168.163.53/26/193.27.40.63 ubis_up_mkctab ubis_nserv ubis_mserv ubis_fax #demo 193.27.40.53/26/193.27.40.63 aip_haservice #opteron 193.27.40.182/26/193.27.40.63 192.168.163.182/26/192.168.163.63 ubis_udbmain #opteron 193.27.40.54/26/193.27.40.63 192.168.163.54/26/192.168.163.63 ubis_udbmain
ha-log-test9.gz
Description: GNU Zip compressed data
_______________________________________________ Linux-HA mailing list [email protected] http://lists.linux-ha.org/mailman/listinfo/linux-ha See also: http://linux-ha.org/ReportingProblems
