Well, On a 2 nodes cluster using heartbeat 2.1.2, I have one cloned
ressource and 1 group executed preferably on nodeA but if clone if
failed or off, group should be migrated to nodeB
When I stop the service, cloned ressource mysql_orb fails on nodeA but I
don't know what to do to make the group_1 goes to nodeB.
I don't know what to put in the configuration file to make it work, the
following don't work :
<rsc_colocation from="group_1" id="web_if_mysql" score="INFINITY"
to="MySQL_ORB"/>
You can find cib.xml attached and log messages.
Thanks,
Franck.
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="true"/>
<nvpair id="cib-bootstrap-options-no_quorum-policy" name="no_quorum-policy" value="stop"/>
<nvpair id="cib-bootstrap-options-default-resource-stickiness" name="default-resource-stickiness" value="0"/>
<nvpair id="cib-bootstrap-options-default-resource-failure-stickiness" name="default-resource-failure-stickiness" value="0"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-stonith-action" name="stonith-action" value="reboot"/>
<nvpair id="cib-bootstrap-options-stop-orphan-resources" name="stop-orphan-resources" value="true"/>
<nvpair id="cib-bootstrap-options-stop-orphan-actions" name="stop-orphan-actions" value="true"/>
<nvpair id="cib-bootstrap-options-remove-after-stop" name="remove-after-stop" value="false"/>
<nvpair id="cib-bootstrap-options-short-resource-names" name="short-resource-names" value="true"/>
<nvpair id="cib-bootstrap-options-transition-idle-timeout" name="transition-idle-timeout" value="5min"/>
<nvpair id="cib-bootstrap-options-default-action-timeout" name="default-action-timeout" value="5s"/>
<nvpair id="cib-bootstrap-options-is-managed-default" name="is-managed-default" value="true"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node id="eeb85dbb-ed18-4fd8-b722-ab0941a196f8" type="normal" uname="nodeA"/>
<node id="8fe02fc0-e7e2-4faf-9a9b-92c88c5c9c62" type="normal" uname="nodeB"/>
</nodes>
<resources>
<clone globally_unique="false" id="MySQL_ORB" interleave="false" is_managed="true" notify="false" ordered="false">
<instance_attributes id="MySQL_ORB_inst_attr">
<attributes>
<nvpair id="MySQL_ORB_attr_0" name="clone_max" value="2"/>
<nvpair id="MySQL_ORB_attr_1" name="clone_node_max" value="1"/>
</attributes>
</instance_attributes>
<primitive class="ocf" id="mysql_orb1" is_managed="true" provider="heartbeat" type="mysql_orb">
<operations>
<op id="mysql_orb_mon" interval="30s" name="monitor" timeout="30s"/>
</operations>
</primitive>
</clone>
<group id="group_1" restart_type="restart">
<primitive class="ocf" id="IPaddr_Cluster" provider="heartbeat" type="IPaddr">
<operations>
<op id="IPaddr_Cluster_mon" interval="5s" name="monitor" timeout="5s"/>
</operations>
<instance_attributes id="IPaddr_Cluster_inst_attr">
<attributes>
<nvpair id="IPaddr_Cluster_attr_0" name="ip" value="192.168.0.100"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="ocf" id="apache_2" provider="heartbeat" type="apache">
<operations>
<op id="apache_2_mon" interval="30s" name="monitor" timeout="30s"/>
</operations>
<instance_attributes id="apache_2_inst_attr">
<attributes>
<nvpair id="apache_2_attr_0" name="configfile" value="/produits/apache/conf/httpd.conf"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="heartbeat" id="jboss_3" provider="heartbeat" type="jboss">
<operations>
<op id="jboss_3_mon" interval="30s" name="monitor" on_fail="fence" timeout="30s"/>
</operations>
</primitive>
<primitive class="heartbeat" id="crontab_4" provider="heartbeat" type="crontab">
<operations>
<op id="crontab_4_mon" interval="120s" name="monitor" timeout="60s"/>
</operations>
</primitive>
</group>
<clone globally_unique="false" id="ORBacus" is_managed="true" ordered="false" restart_type="restart">
<instance_attributes id="ORBacus_opt">
<attributes>
<nvpair id="ORBacus_attr_0" name="clone_max" value="2"/>
<nvpair id="ORBacus_attr_1" name="clone_node_max" value="1"/>
</attributes>
</instance_attributes>
<primitive class="lsb" id="ORBacus1" provider="lsb" type="obbalancer"/>
</clone>
</resources>
<constraints>
<rsc_location id="rsc_location_group_1" rsc="group_1">
<rule id="prefered_location_group_1" score="100">
<expression attribute="#uname" id="prefered_location_group_1_expr" operation="eq" value="nodeA"/>
</rule>
</rsc_location>
<rsc_colocation from="group_1" id="web_if_mysql" score="INFINITY" to="MySQL_ORB"/>
</constraints>
</configuration>
Feb 19 15:19:19 nodeA syslogd 1.4.1: restart.
Feb 19 15:19:23 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:34 nodeA last message repeated 3 times
Feb 19 15:19:35 nodeA mysql_orb[27725]: [27752]: INFO: database is KO
Feb 19 15:19:35 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 140 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:35 nodeA crmd: [8136]: info: process_lrm_event: LRM operation
mysql_orb1:1_monitor_30000 (call=232, rc=7) complete
Feb 19 15:19:35 nodeA tengine: [8140]: info: process_graph_event: Action
mysql_orb1:1_monitor_30000 arrived after a completed transition
Feb 19 15:19:35 nodeA tengine: [8140]: info: update_abort_priority: Abort
priority upgraded to 1000000
Feb 19 15:19:35 nodeA crmd: [8136]: info: do_state_transition: State transition
S_IDLE -> S_POLICY_ENGINE [ input=I_PE_CALC cause=C_IPC_MESSAGE
origin=route_message ]
Feb 19 15:19:35 nodeA tengine: [8140]: WARN: update_failcount: Updating
failcount for mysql_orb1:1 on eeb85dbb-ed18-4fd8-b722-ab0941a196f8 after failed
monitor: rc=7
Feb 19 15:19:35 nodeA crmd: [8136]: info: do_state_transition: All 2 cluster
nodes are eligible to run resources.
Feb 19 15:19:35 nodeA tengine: [8140]: info: extract_event: Aborting on
transient_attributes changes for eeb85dbb-ed18-4fd8-b722-ab0941a196f8
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value 'stop' for cluster option 'no-quorum-policy'
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value '60s' for cluster option 'cluster-delay'
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-error-series-max'
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-warn-series-max'
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-input-series-max'
Feb 19 15:19:35 nodeA pengine: [8141]: notice: cluster_option: Using default
value 'true' for cluster option 'startup-fencing'
Feb 19 15:19:35 nodeA pengine: [8141]: info: determine_online_status: Node
nodeA is online
Feb 19 15:19:35 nodeA pengine: [8141]: WARN: unpack_rsc_op: Processing failed
op (mysql_orb1:1_monitor_30000) on nodeA
Feb 19 15:19:35 nodeA pengine: [8141]: info: determine_online_status: Node
nodeB is online
Feb 19 15:19:35 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_resource id="mysql_orb1:1"
type="mysql_orb" class="ocf" provider="heartbeat">
Feb 19 15:19:35 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op id="mysql_orb1:1_stop_0"
operation="stop" crm-debug-origin="do_update_resource"
transition_key="13:87:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;13:87:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="33"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="0"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:35 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op id="mysql_orb1:1_start_0"
operation="start" crm-debug-origin="build_active_RAs"
transition_key="14:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;14:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="30"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="0"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:35 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op
id="mysql_orb1:1_monitor_30000" operation="monitor"
crm-debug-origin="build_active_RAs"
transition_key="15:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;15:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="31"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="30000"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:35 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource </lrm_resource>
Feb 19 15:19:35 nodeA pengine: [8141]: info: unpack_find_resource: Making sure
orphan mysql_orb1:1/mysql_orb1:2 is stopped on nodeB
Feb 19 15:19:36 nodeA pengine: [8141]: info: unpack_find_resource: Internally
renamed mysql_orb1:1 on nodeB to mysql_orb1:2
Feb 19 15:19:36 nodeA pengine: [8141]: info: unpack_find_resource: Internally
renamed ORBacus1:0 on nodeB to ORBacus1:1
Feb 19 15:19:36 nodeA pengine: [8141]: info: clone_print: Clone Set: MySQL_ORB
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: mysql_orb1:0
(heartbeat::ocf:mysql_orb): Started nodeB
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: mysql_orb1:1
(heartbeat::ocf:mysql_orb): Started nodeA FAILED
Feb 19 15:19:36 nodeA pengine: [8141]: info: group_print: Resource Group:
group_1
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: IPaddr_Cluster
(heartbeat::ocf:IPaddr): Started nodeA
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: apache_2
(heartbeat::ocf:apache): Started nodeA
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: jboss_3
(heartbeat:jboss): Started nodeA
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: crontab_4
(heartbeat:crontab): Started nodeA
Feb 19 15:19:36 nodeA pengine: [8141]: info: clone_print: Clone Set: ORBacus
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: ORBacus1:0
(lsb:obbalancer): Started nodeA
Feb 19 15:19:36 nodeA pengine: [8141]: info: native_print: ORBacus1:1
(lsb:obbalancer): Started nodeB
Feb 19 15:19:36 nodeA obbalancer: balancer shutdown succeeded
Feb 19 15:19:36 nodeA pengine: [8141]: info: log_data_element:
check_action_definition: params:all <parameters/>
Feb 19 15:19:36 nodeA pengine: [8141]: WARN: check_action_definition:
Parameters to mysql_orb1:0_monitor_0 on nodeB changed: recorded
08b7001b97ccdaa1ca23a9f165256bc1 vs. calculated (all)
f2317cad3d54cec5d7d7aa7d0bf35cf8
Feb 19 15:19:36 nodeA pengine: [8141]: info: log_data_element:
check_action_definition: params:all <parameters/>
Feb 19 15:19:36 nodeA pengine: [8141]: WARN: check_action_definition:
Parameters to mysql_orb1:0_monitor_30000 on nodeB changed: recorded
08b7001b97ccdaa1ca23a9f165256bc1 vs. calculated (all)
f2317cad3d54cec5d7d7aa7d0bf35cf8
Feb 19 15:19:36 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
mysql_orb1:0 (nodeB)
Feb 19 15:19:36 nodeA pengine: [8141]: notice: RecurringOp: nodeB
mysql_orb1:0_monitor_30000
Feb 19 15:19:36 nodeA pengine: [8141]: notice: NoRoleChange: Recover resource
mysql_orb1:1 (nodeA)
Feb 19 15:19:36 nodeA pengine: [8141]: notice: StopRsc: nodeA Stop
mysql_orb1:1
Feb 19 15:19:36 nodeA pengine: [8141]: notice: StartRsc: nodeA Start
mysql_orb1:1
Feb 19 15:19:36 nodeA pengine: [8141]: notice: RecurringOp: nodeA
mysql_orb1:1_monitor_30000
Feb 19 15:19:36 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
IPaddr_Cluster (nodeA)
Feb 19 15:19:36 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
apache_2 (nodeA)
Feb 19 15:19:36 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
jboss_3 (nodeA)
Feb 19 15:19:37 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
crontab_4 (nodeA)
Feb 19 15:19:37 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
ORBacus1:0 (nodeA)
Feb 19 15:19:37 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
ORBacus1:1 (nodeB)
Feb 19 15:19:37 nodeA pengine: [8141]: info: native_color: Stopping orphan
resource mysql_orb1:2
Feb 19 15:19:37 nodeA pengine: [8141]: info: process_pe_message: Transition 91:
PEngine Input stored in: /var/lib/heartbeat/pengine/pe-input-103.bz2
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value 'stop' for cluster option 'no-quorum-policy'
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value '60s' for cluster option 'cluster-delay'
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-error-series-max'
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-warn-series-max'
Feb 19 15:19:37 nodeA obbalancer: daemonize startup succeeded
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value '-1' for cluster option 'pe-input-series-max'
Feb 19 15:19:37 nodeA pengine: [8141]: notice: cluster_option: Using default
value 'true' for cluster option 'startup-fencing'
Feb 19 15:19:37 nodeA pengine: [8141]: info: determine_online_status: Node
nodeA is online
Feb 19 15:19:37 nodeA pengine: [8141]: WARN: unpack_rsc_op: Processing failed
op (mysql_orb1:1_monitor_30000) on nodeA
Feb 19 15:19:37 nodeA pengine: [8141]: info: determine_online_status: Node
nodeB is online
Feb 19 15:19:37 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_resource id="mysql_orb1:1"
type="mysql_orb" class="ocf" provider="heartbeat">
Feb 19 15:19:37 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op id="mysql_orb1:1_stop_0"
operation="stop" crm-debug-origin="do_update_resource"
transition_key="13:87:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;13:87:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="33"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="0"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:37 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op id="mysql_orb1:1_start_0"
operation="start" crm-debug-origin="build_active_RAs"
transition_key="14:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;14:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="30"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="0"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:37 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource <lrm_rsc_op
id="mysql_orb1:1_monitor_30000" operation="monitor"
crm-debug-origin="build_active_RAs"
transition_key="15:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3"
transition_magic="0:0;15:75:98541f28-b149-475e-b9b0-dfd1d0ef82d3" call_id="31"
crm_feature_set="1.0.9" rc_code="0" op_status="0" interval="30000"
op_digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
Feb 19 15:19:37 nodeA pengine: [8141]: info: log_data_element:
create_fake_resource: Orphan resource </lrm_resource>
Feb 19 15:19:37 nodeA pengine: [8141]: info: unpack_find_resource: Making sure
orphan mysql_orb1:1/mysql_orb1:2 is stopped on nodeB
Feb 19 15:19:37 nodeA pengine: [8141]: info: unpack_find_resource: Internally
renamed mysql_orb1:1 on nodeB to mysql_orb1:2
Feb 19 15:19:37 nodeA pengine: [8141]: info: unpack_find_resource: Internally
renamed ORBacus1:0 on nodeB to ORBacus1:1
Feb 19 15:19:37 nodeA pengine: [8141]: info: clone_print: Clone Set: MySQL_ORB
Feb 19 15:19:37 nodeA pengine: [8141]: info: native_print: mysql_orb1:0
(heartbeat::ocf:mysql_orb): Started nodeB
Feb 19 15:19:37 nodeA pengine: [8141]: info: native_print: mysql_orb1:1
(heartbeat::ocf:mysql_orb): Started nodeA FAILED
Feb 19 15:19:37 nodeA pengine: [8141]: info: group_print: Resource Group:
group_1
Feb 19 15:19:37 nodeA pengine: [8141]: info: native_print: IPaddr_Cluster
(heartbeat::ocf:IPaddr): Started nodeA
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_print: apache_2
(heartbeat::ocf:apache): Started nodeA
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_print: jboss_3
(heartbeat:jboss): Started nodeA
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_print: crontab_4
(heartbeat:crontab): Started nodeA
Feb 19 15:19:38 nodeA pengine: [8141]: info: clone_print: Clone Set: ORBacus
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_print: ORBacus1:0
(lsb:obbalancer): Started nodeA
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_print: ORBacus1:1
(lsb:obbalancer): Started nodeB
Feb 19 15:19:38 nodeA pengine: [8141]: info: log_data_element:
check_action_definition: params:all <parameters/>
Feb 19 15:19:38 nodeA pengine: [8141]: WARN: check_action_definition:
Parameters to mysql_orb1:0_monitor_0 on nodeB changed: recorded
08b7001b97ccdaa1ca23a9f165256bc1 vs. calculated (all)
f2317cad3d54cec5d7d7aa7d0bf35cf8
Feb 19 15:19:38 nodeA pengine: [8141]: info: log_data_element:
check_action_definition: params:all <parameters/>
Feb 19 15:19:38 nodeA pengine: [8141]: WARN: check_action_definition:
Parameters to mysql_orb1:0_monitor_30000 on nodeB changed: recorded
08b7001b97ccdaa1ca23a9f165256bc1 vs. calculated (all)
f2317cad3d54cec5d7d7aa7d0bf35cf8
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
mysql_orb1:0 (nodeB)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: RecurringOp: nodeB
mysql_orb1:0_monitor_30000
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Recover resource
mysql_orb1:1 (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: StopRsc: nodeA Stop
mysql_orb1:1
Feb 19 15:19:38 nodeA pengine: [8141]: notice: StartRsc: nodeA Start
mysql_orb1:1
Feb 19 15:19:38 nodeA pengine: [8141]: notice: RecurringOp: nodeA
mysql_orb1:1_monitor_30000
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
IPaddr_Cluster (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
apache_2 (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
jboss_3 (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
crontab_4 (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
ORBacus1:0 (nodeA)
Feb 19 15:19:38 nodeA pengine: [8141]: notice: NoRoleChange: Leave resource
ORBacus1:1 (nodeB)
Feb 19 15:19:38 nodeA pengine: [8141]: info: native_color: Stopping orphan
resource mysql_orb1:2
Feb 19 15:19:38 nodeA crmd: [8136]: info: do_state_transition: State transition
S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS cause=C_IPC_MESSAGE
origin=route_message ]
Feb 19 15:19:39 nodeA tengine: [8140]: info: unpack_graph: Unpacked transition
92: 8 actions in 8 synapses
Feb 19 15:19:39 nodeA tengine: [8140]: info: send_rsc_command: Initiating
action 7: mysql_orb1:0_monitor_30000 on nodeB
Feb 19 15:19:39 nodeA tengine: [8140]: info: te_pseudo_action: Pseudo action 17
fired and confirmed
Feb 19 15:19:39 nodeA tengine: [8140]: info: send_rsc_command: Initiating
action 2: mysql_orb1:1_stop_0 on nodeA
Feb 19 15:19:39 nodeA pengine: [8141]: info: process_pe_message: Transition 92:
PEngine Input stored in: /var/lib/heartbeat/pengine/pe-input-104.bz2
Feb 19 15:19:39 nodeA crmd: [8136]: info: do_lrm_rsc_op: Performing
op=mysql_orb1:1_stop_0 key=2:92:98541f28-b149-475e-b9b0-dfd1d0ef82d3)
Feb 19 15:19:39 nodeA crmd: [8136]: info: process_lrm_event: LRM operation
mysql_orb1:1_monitor_30000 (call=232, rc=-2) Cancelled
Feb 19 15:19:39 nodeA lrmd: [8133]: info: RA output: (mysql_orb1:1:stop:stdout)
Shutting down ORBacus Balancer service:
Feb 19 15:19:39 nodeA tengine: [8140]: info: match_graph_event: Action
mysql_orb1:0_monitor_30000 (7) confirmed on nodeB
Feb 19 15:19:39 nodeA lrmd: [8133]: info: RA output: (mysql_orb1:1:stop:stdout)
[
Feb 19 15:19:39 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 70 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:39 nodeA lrmd: [8133]: info: RA output: (mysql_orb1:1:stop:stdout)
OK ]
Feb 19 15:19:39 nodeA crmd: [8136]: info: process_lrm_event: LRM operation
mysql_orb1:1_stop_0 (call=233, rc=0) complete
Feb 19 15:19:39 nodeA tengine: [8140]: info: match_graph_event: Action
mysql_orb1:1_stop_0 (2) confirmed on nodeA
Feb 19 15:19:39 nodeA tengine: [8140]: info: te_pseudo_action: Pseudo action 18
fired and confirmed
Feb 19 15:19:39 nodeA tengine: [8140]: info: te_pseudo_action: Pseudo action 15
fired and confirmed
Feb 19 15:19:39 nodeA tengine: [8140]: info: send_rsc_command: Initiating
action 14: mysql_orb1:1_start_0 on nodeA
Feb 19 15:19:39 nodeA crmd: [8136]: info: do_lrm_rsc_op: Performing
op=mysql_orb1:1_start_0 key=14:92:98541f28-b149-475e-b9b0-dfd1d0ef82d3)
Feb 19 15:19:39 nodeA lrmd: [8133]: info: RA output:
(mysql_orb1:1:start:stdout) Starting ORBacus Balancer service:
Feb 19 15:19:39 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 80 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:39 nodeA lrmd: [8133]: info: RA output:
(mysql_orb1:1:start:stdout) [ OK ]
Feb 19 15:19:39 nodeA crmd: [8136]: info: process_lrm_event: LRM operation
mysql_orb1:1_start_0 (call=234, rc=0) complete
Feb 19 15:19:39 nodeA crmd: [8136]: info: build_operation_update: Digest for
0:0;14:92:98541f28-b149-475e-b9b0-dfd1d0ef82d3 (mysql_orb1:1_start_0) was
f2317cad3d54cec5d7d7aa7d0bf35cf8
Feb 19 15:19:39 nodeA crmd: [8136]: info: log_data_element:
build_operation_update: digest:source <parameters/>
Feb 19 15:19:39 nodeA crmd: [8136]: ERROR: parse_xml: Expected: longdesc
Feb 19 15:19:39 nodeA crmd: [8136]: ERROR: parse_xml: Error parsing token:
Mismatching close tag
Feb 19 15:19:39 nodeA crmd: [8136]: ERROR: parse_xml: Error at or before:
</resource-agent>
Feb 19 15:19:39 nodeA crmd: [8136]: ERROR: parse_xml: Error parsing token:
error parsing child
Feb 19 15:19:39 nodeA crmd: [8136]: ERROR: parse_xml: Error at or before: >
<longdesc lang="e
Feb 19 15:19:40 nodeA crmd: [8136]: ERROR: get_rsc_restart_list: Metadata for
heartbeat::ocf:mysql_orb is not valid XML
Feb 19 15:19:40 nodeA tengine: [8140]: info: match_graph_event: Action
mysql_orb1:1_start_0 (14) confirmed on nodeA
Feb 19 15:19:40 nodeA tengine: [8140]: info: send_rsc_command: Initiating
action 1: mysql_orb1:1_monitor_30000 on nodeA
Feb 19 15:19:40 nodeA tengine: [8140]: info: te_pseudo_action: Pseudo action 16
fired and confirmed
Feb 19 15:19:40 nodeA crmd: [8136]: info: do_lrm_rsc_op: Performing
op=mysql_orb1:1_monitor_30000 key=1:92:98541f28-b149-475e-b9b0-dfd1d0ef82d3)
Feb 19 15:19:40 nodeA mysql_orb[27862]: [27890]: INFO: database is KO
Feb 19 15:19:40 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:40 nodeA crmd: [8136]: info: process_lrm_event: LRM operation
mysql_orb1:1_monitor_30000 (call=235, rc=7) complete
Feb 19 15:19:40 nodeA tengine: [8140]: info: match_graph_event: Action
mysql_orb1:1_monitor_30000 (1) confirmed on nodeA
Feb 19 15:19:40 nodeA tengine: [8140]: info: run_graph: Transition 92:
(Complete=8, Pending=0, Fired=0, Skipped=0, Incomplete=0)
Feb 19 15:19:40 nodeA tengine: [8140]: info: notify_crmd: Transition 92 status:
te_complete - <null>
Feb 19 15:19:40 nodeA crmd: [8136]: info: do_state_transition: State transition
S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS cause=C_IPC_MESSAGE
origin=route_message ]
Feb 19 15:19:40 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:44 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:45 nodeA apache[27920]: [27965]: INFO: 15:19:45
URL:http://192.168.87.100:80/server-status [1634/1634] -> "-" [1]
Feb 19 15:19:45 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:47 nodeA jboss[27966]: [28014]: INFO: Running OK
Feb 19 15:19:47 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:50 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 140 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:19:55 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 140 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:00 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:06 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 140 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:08 nodeA mysql_orb[28080]: [28107]: INFO: database is KO
Feb 19 15:20:08 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 140 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:11 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:15 nodeA apache[28122]: [28167]: INFO: 15:20:15
URL:http://192.168.87.100:80/server-status [1634/1634] -> "-" [1]
Feb 19 15:20:15 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:17 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
Feb 19 15:20:17 nodeA jboss[28182]: [28231]: INFO: Running OK
Feb 19 15:20:18 nodeA lrmd: [8133]: WARN: G_SIG_dispatch: Dispatch function for
SIGCHLD took too long to execute: 150 ms (> 30 ms) (GSource: 0x51be08)
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems