Hi Andrew
ok that was not a good choice to take smard as test init script to
demonstrate the problem about group ... sorry , so I did some simple
scripts /pingalain1/ (resource respingal1) and /pingalain2/ (resource
respingal2)
which have for sure status eq 0 gathered in a group /groupresping/ :
crm configure group groupresping respingal1 respingal2 and so my problem
is that :
1/ when both respingal1 and respingal2 are successfully started/running,
if I stop respingal1, both resources are stopped. (Fine for me)
2/ when both respingal1 and respingal2 are successfully started/running,
if I stop respingal2, there is no impact on respingal1. (Not fine
for me ...)
As I told you, it seemed to be the behavior described p28 of your
documentation
but you told me in previous email, if I well understood, that it should
no more be
the behavior, and both resources should be stopped , whatever the one we
chose to stop.
Joined is the new cibadmin-Ql just after the case 2
Thanks
Regards
Alain
smartd2 can't run on node1 because it returned rc=5 for the monitor op.
EXECRA_NOT_INSTALLED = 5,
On Mon, Feb 22, 2010 at 3:34 PM, Alain.Moulle <[email protected]> wrote:
> Hi Andrew,
>
> sorry for the delay, but about my problem on groups I've reproduced ?it with
> only two resources resal1 and resal2 gathered in a group groupal1.
> The behavior is :
>
> 1/ when both res are started, and I stop resal1 , all the group is stopped.
> 2/ when both res are started, and I stop resal2 , there is no impact on
> resal1
> ? crm_mon gives :
> ? ?Resource Group: groupal1
> ? ?resal1 ? ? (lsb:smartd): ? Started node1
> ? ?resal2 ? ? (lsb:smartd2): ?Stopped
>
> Joined is the cibadmin -Ql after the 1/
>
> Thanks
> Regards
> Alain
>
>> Hi Andrew,
>> > the releases are those officially delivered with fc12 :
>> > pacemaker-1.0.5-4.fc12
>> > and :
>> > cluster-glue-1.0-0.11.b79635605337.hg.fc12
>> > corosync-1.1.2-1.fc12
>> > heartbeat-3.0.0-0.5.0daab7da36a8.hg.fc12
>> > openais-1.1.0-1.fc12
>> > all in x86_64 .
>> >
>>
>> When the cluster is in that state (4th resource stopped) can you run
>> cibadmin -Ql and attach the result?
<cib epoch="75" num_updates="2" admin_epoch="0" validate-with="pacemaker-1.0"
crm_feature_set="3.0.1" have-quorum="1" cib-last-written="Tue Feb 23 14:47:33
2010" dc-uuid="node1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="1.0.5-ee19d8e83c2a5d45988f1cee36d334a631d84fc7"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure"
name="cluster-infrastructure" value="openais"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes"
name="expected-quorum-votes" value="2"/>
<nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay"
value="60"/>
<nvpair id="cib-bootstrap-options-DC-deadtime" name="DC-deadtime"
value="60"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh"
name="last-lrm-refresh" value="1266932264"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node1" uname="node1" type="normal"/>
<node id="node3" uname="node3" type="normal"/>
</nodes>
<resources>
<primitive class="stonith" id="restofencenode1" type="external/ipmi">
<instance_attributes id="restofencenode1-instance_attributes">
<nvpair id="restofencenode1-instance_attributes-hostname"
name="hostname" value="node1"/>
<nvpair id="restofencenode1-instance_attributes-ipaddr" name="ipaddr"
value="12.1.1.121"/>
<nvpair id="restofencenode1-instance_attributes-userid" name="userid"
value="mylogin"/>
<nvpair id="restofencenode1-instance_attributes-passwd" name="passwd"
value="mypass"/>
<nvpair id="restofencenode1-instance_attributes-interface"
name="interface" value="lan"/>
</instance_attributes>
<meta_attributes id="restofencenode1-meta_attributes">
<nvpair id="restofencenode1-meta_attributes-target-role"
name="target-role" value="Started"/>
</meta_attributes>
</primitive>
<primitive class="stonith" id="restofencenode3" type="external/ipmi">
<instance_attributes id="restofencenode3-instance_attributes">
<nvpair id="restofencenode3-instance_attributes-hostname"
name="hostname" value="node3"/>
<nvpair id="restofencenode3-instance_attributes-ipaddr" name="ipaddr"
value="12.1.1.123"/>
<nvpair id="restofencenode3-instance_attributes-userid" name="userid"
value="mylogin"/>
<nvpair id="restofencenode3-instance_attributes-passwd" name="passwd"
value="mypass"/>
<nvpair id="restofencenode3-instance_attributes-interface"
name="interface" value="lan"/>
</instance_attributes>
<meta_attributes id="restofencenode3-meta_attributes">
<nvpair id="restofencenode3-meta_attributes-target-role"
name="target-role" value="Started"/>
</meta_attributes>
</primitive>
<group id="groupresping">
<primitive class="lsb" id="respingal1" type="pingalain1">
<meta_attributes id="respingal1-meta_attributes">
<nvpair id="respingal1-meta_attributes-target-role"
name="target-role" value="Started"/>
</meta_attributes>
</primitive>
<primitive class="lsb" id="respingal2" type="pingalain2">
<meta_attributes id="respingal2-meta_attributes">
<nvpair id="respingal2-meta_attributes-target-role"
name="target-role" value="Stopped"/>
</meta_attributes>
</primitive>
</group>
</resources>
<constraints>
<rsc_location id="loc1-restofencenode1" node="node3"
rsc="restofencenode1" score="+INFINITY"/>
<rsc_location id="neverloc-restofencenode1" node="node1"
rsc="restofencenode1" score="-INFINITY"/>
<rsc_location id="loc1-restofencenode3" node="node1"
rsc="restofencenode3" score="+INFINITY"/>
<rsc_location id="neverloc-restofencenode3" node="node3"
rsc="restofencenode3" score="-INFINITY"/>
<rsc_location id="loc1-respingal1" node="node1" rsc="groupresping"
score="INFINITY"/>
<rsc_location id="loc1-respingal2" node="node3" rsc="groupresping"
score="INFINITY"/>
</constraints>
<rsc_defaults/>
<op_defaults/>
</configuration>
<status>
<node_state id="node1" uname="node1" ha="active" in_ccm="true"
crmd="online" join="member" expected="member"
crm-debug-origin="do_update_resource" shutdown="0">
<transient_attributes id="node1">
<instance_attributes id="status-node1">
<nvpair id="status-node1-probe_complete" name="probe_complete"
value="true"/>
<nvpair id="status-node1-last-failure-respingal1"
name="last-failure-respingal1" value="1266934770"/>
<nvpair id="status-node1-last-failure-respingal2"
name="last-failure-respingal2" value="1266934575"/>
</instance_attributes>
</transient_attributes>
<lrm id="node1">
<lrm_resources>
<lrm_resource id="restofencenode3" type="external/ipmi"
class="stonith">
<lrm_rsc_op id="restofencenode3_monitor_0" operation="monitor"
crm-debug-origin="build_active_RAs" crm_feature_set="3.0.1"
transition-key="5:0:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;5:0:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="3"
rc-code="7" op-status="0" interval="0" last-run="1266932994"
last-rc-change="1266932994" exec-time="0" queue-time="0"
op-digest="b97178514d2e3f7c7be5d1b03af40398"/>
<lrm_rsc_op id="restofencenode3_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:5:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;5:5:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="4"
rc-code="0" op-status="0" interval="0" last-run="1266933212"
last-rc-change="1266933212" exec-time="60" queue-time="0"
op-digest="b97178514d2e3f7c7be5d1b03af40398"/>
</lrm_resource>
<lrm_resource id="restofencenode1" type="external/ipmi"
class="stonith">
<lrm_rsc_op id="restofencenode1_monitor_0" operation="monitor"
crm-debug-origin="build_active_RAs" crm_feature_set="3.0.1"
transition-key="4:0:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;4:0:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="2"
rc-code="7" op-status="0" interval="0" last-run="1266932993"
last-rc-change="1266932993" exec-time="10" queue-time="0"
op-digest="c34b3d233ae99c45e5022222f75b63b8"/>
</lrm_resource>
<lrm_resource id="respingal1" type="pingalain1" class="lsb">
<lrm_rsc_op id="respingal1_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="4:55:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;4:55:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="23"
rc-code="7" op-status="0" interval="0" last-run="1266935404"
last-rc-change="1266935404" exec-time="20" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="respingal1_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="9:73:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;9:73:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="29"
rc-code="0" op-status="0" interval="0" last-run="1266935824"
last-rc-change="1266935824" exec-time="0" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="respingal1_stop_0" operation="stop"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="9:72:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;9:72:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="28"
rc-code="0" op-status="0" interval="0" last-run="1266935801"
last-rc-change="1266935801" exec-time="20" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="respingal2" type="pingalain2" class="lsb">
<lrm_rsc_op id="respingal2_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="4:57:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;4:57:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="24"
rc-code="7" op-status="0" interval="0" last-run="1266935408"
last-rc-change="1266935408" exec-time="20" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="respingal2_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="11:74:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;11:74:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
call-id="30" rc-code="0" op-status="0" interval="0" last-run="1266935825"
last-rc-change="1266935825" exec-time="10" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="respingal2_stop_0" operation="stop"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="11:75:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;11:75:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
call-id="31" rc-code="0" op-status="0" interval="0" last-run="1266935839"
last-rc-change="1266935839" exec-time="20" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="node3" uname="node3" ha="active" in_ccm="true"
crmd="online" join="member" expected="member"
crm-debug-origin="do_update_resource" shutdown="0">
<lrm id="node3">
<lrm_resources>
<lrm_resource id="restofencenode1" type="external/ipmi"
class="stonith">
<lrm_rsc_op id="restofencenode1_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:3:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;5:3:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="2"
rc-code="7" op-status="0" interval="0" last-run="1266929866"
last-rc-change="1266929866" exec-time="0" queue-time="0"
op-digest="c34b3d233ae99c45e5022222f75b63b8"/>
<lrm_rsc_op id="restofencenode1_start_0" operation="start"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:6:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:0;5:6:0:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="4"
rc-code="0" op-status="0" interval="0" last-run="1266930080"
last-rc-change="1266930080" exec-time="60" queue-time="0"
op-digest="c34b3d233ae99c45e5022222f75b63b8"/>
</lrm_resource>
<lrm_resource id="restofencenode3" type="external/ipmi"
class="stonith">
<lrm_rsc_op id="restofencenode3_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="6:3:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;6:3:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="3"
rc-code="7" op-status="0" interval="0" last-run="1266929866"
last-rc-change="1266929866" exec-time="0" queue-time="0"
op-digest="b97178514d2e3f7c7be5d1b03af40398"/>
</lrm_resource>
<lrm_resource id="respingal1" type="pingalain1" class="lsb">
<lrm_rsc_op id="respingal1_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:56:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;5:56:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="23"
rc-code="7" op-status="0" interval="0" last-run="1266932259"
last-rc-change="1266932259" exec-time="10" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="respingal2" type="pingalain2" class="lsb">
<lrm_rsc_op id="respingal2_monitor_0" operation="monitor"
crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
transition-key="5:58:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6"
transition-magic="0:7;5:58:7:c4ea9901-f2e3-4ab7-a1ae-c3efb3228fc6" call-id="24"
rc-code="7" op-status="0" interval="0" last-run="1266932263"
last-rc-change="1266932263" exec-time="10" queue-time="0"
op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="node3">
<instance_attributes id="status-node3">
<nvpair id="status-node3-probe_complete" name="probe_complete"
value="true"/>
<nvpair id="status-node3-last-failure-respingal2"
name="last-failure-respingal2" value="1266934555"/>
<nvpair id="status-node3-last-failure-respingal1"
name="last-failure-respingal1" value="1266934790"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems