Dejan Muhamedagic schrieb:
Hi,
On Fri, May 23, 2008 at 08:45:18AM +0200, Florin wrote:
Hi,
I have the following 3 node configuration:
Node1: Service drbd0
Node2: Service drbd0 and drbd1
Node3: Service drbd1
The services drbd0 and drbd1 should never run on node2 at the same time.
My scenarios looks like this:
Node1:up | Node2:up | Node3:up = its all fine, drbd0 run on node1 and drbd1
run on node2
Node1:up | Node2:up | Node3:down = its ok
Node1:up | Node2:down | Node3:up = drbd0 still run on node1 and drbd1 will
be started on node3
Node1:down | Node2:up | Node3:up = this is the part where I stuck. The
drbd0 will started on node2 but the drbd1 is still running, so how can it
be done that the drbd1 will automaticly migrate to node3 before starting
drbd0.
Add a colocation constraint which prevents the two resources
running on the same node. Then express preferences for
resources's location using the location constraints, e.g. drbd0
has a higher score than drbd1 on node2.
Thanks,
Dejan
this is my cib.xml:
<cib admin_epoch="0" have_quorum="false" num_peers="0"
cib_feature_revision="1.3" ignore_dtd="false" generated="false" epoch="51"
num_updates="12" cib-last-written="Fri Feb 22 10:57:18 2008"
ccm_transition="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="true"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-default-resource-stickiness"
name="default-resource-stickiness" value="0"/>
<nvpair
id="cib-bootstrap-options-default-resource-failure-stickiness"
name="default-resource-failure-stickiness" value="0"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-stonith-action"
name="stonith-action" value="reboot"/>
<nvpair id="cib-bootstrap-options-stop-orphan-resources"
name="stop-orphan-resources" value="true"/>
<nvpair id="cib-bootstrap-options-stop-orphan-actions"
name="stop-orphan-actions" value="true"/>
<nvpair id="cib-bootstrap-options-remove-after-stop"
name="remove-after-stop" value="false"/>
<nvpair id="cib-bootstrap-options-cluster-delay"
name="cluster-delay" value="5min"/>
<nvpair id="cib-bootstrap-options-default-action-timeout"
name="default-action-timeout" value="5m"/>
<nvpair id="cib-bootstrap-options-is-managed-default"
name="is-managed-default" value="true"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="2.1.3-node: 552305612591183b1628baa5bc6e903e0f1e26a3"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node id="2743d23e-4a65-4eca-8cca-1304c2dd6e32" uname="ioc-node1"
type="normal"/>
<node id="138a7f68-711f-48ea-a347-aaccdc9ae88c" uname="ioc-node2"
type="normal"/>
<node id="f8526e47-b2dc-4f71-aa9f-05f021808ee8" uname="ioc-node3"
type="normal"/>
</nodes>
<resources>
<group id="group_ve0">
<primitive class="heartbeat" id="drbddisk_ve0" provider="heartbeat"
type="drbddisk">
<instance_attributes id="drbddisk_ve0_inst_attr">
<attributes>
<nvpair id="drbddisk_ve0_attr_1" name="1" value="r0"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="ocf" id="Filesystem_ve0" provider="heartbeat"
type="Filesystem">
<instance_attributes id="Filesystem_ve0_inst_attr">
<attributes>
<nvpair id="Filesystem_ve0_attr_0" name="device"
value="/dev/drbd0"/>
<nvpair id="Filesystem_ve0_attr_1" name="directory"
value="/cluster/ve0"/>
<nvpair id="Filesystem_ve0_attr_2" name="fstype"
value="ext3"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="lsb" id="run_ve0" provider="heartbeat"
type="ve0"/>
</group>
<group id="group_ve1">
<primitive class="heartbeat" id="drbddisk_ve1" provider="heartbeat"
type="drbddisk">
<instance_attributes id="drbddisk_ve1_inst_attr">
<attributes>
<nvpair id="drbddisk_ve1_attr_1" name="1" value="r1"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="ocf" id="Filesystem_ve1" provider="heartbeat"
type="Filesystem">
<instance_attributes id="Filesystem_ve1_inst_attr">
<attributes>
<nvpair id="Filesystem_ve1_attr_0" name="device"
value="/dev/drbd1"/>
<nvpair id="Filesystem_ve1_attr_1" name="directory"
value="/cluster/ve1"/>
<nvpair id="Filesystem_ve1_attr_2" name="fstype"
value="ext3"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="lsb" id="run_ve1" provider="heartbeat"
type="ve1"/> </group>
</resources>
<constraints> <rsc_location id="rsc_location_ve0"
rsc="group_ve0">
<rule id="not_prefered_location_ve0" score="-INFINITY">
<expression attribute="#uname"
id="not_prefered_location_ve0_node3" operation="eq" value="ioc-node3"/>
</rule>
<rule id="prefered_location_ve0_node1" score="INFINITY">
<expression attribute="#uname"
id="prefered_location_ve0_node1_exp" operation="eq" value="ioc-node1"/>
</rule>
</rsc_location>
<rsc_location id="rsc_location_ve1" rsc="group_ve1">
<rule id="not_prefered_location_ve1" score="-INFINITY">
<expression attribute="#uname"
id="not_prefered_location_ve1_node1" operation="eq" value="ioc-node1"/>
</rule>
<rule id="prefered_location_ve1_node2" score="INFINITY">
<expression attribute="#uname"
id="prefered_location_ve1_node2_exp" operation="eq" value="ioc-node2"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
Thanks,
Florin
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems
Hi,
I changed my constraints but still no luck. It seems that there is
something wrong in my config.
This is my constraints config:
--------------------------------
<constraints>
<rsc_colocation id="not_same" from="group_ve0" to="group_ve1"
score="-INFINITY"/>
<rsc_location id="rsc_location_ve0" rsc="group_ve0">
<rule id="prefered_location_ve0_node1" score="400">
<expression attribute="#uname"
id="prefered_location_ve0_node1_exp" operation="eq" value="node1"/>
</rule>
<rule id="prefered_location_ve0_node2" score="300">
<expression attribute="#uname"
id="prefered_location_ve0_node2_exp" operation="eq" value="node2"/>
</rule>
<rule id="not_prefered_location_ve0" score="-INFINITY">
<expression attribute="#uname"
id="not_prefered_location_ve0_node3" operation="eq" value="node3"/>
</rule>
</rsc_location>
<rsc_location id="rsc_location_ve1" rsc="group_ve1">
<rule id="prefered_location_ve1_node2" score="200">
<expression attribute="#uname"
id="prefered_location_ve1_node2_exp" operation="eq" value="node2"/>
</rule>
<rule id="prefered_location_ve1_node3" score="100">
<expression attribute="#uname"
id="prefered_location_ve1_node3_exp" operation="eq" value="node3"/>
</rule>
<rule id="not_prefered_location_ve1" score="-INFINITY">
<expression attribute="#uname"
id="not_prefered_location_ve1_node1" operation="eq" value="node1"/>
</rule>
</rsc_location>
</constraints>
--------------------------------
This is the log from node2:
--------------------------------
heartbeat[14354]: 2008/05/23_04:43:03 WARN: node node1: is dead
heartbeat[14354]: 2008/05/23_04:43:03 info: Link node1:vmbr0 dead.
crmd[14376]: 2008/05/23_04:43:03 notice: crmd_ha_status_callback: Status
update: Node node1 now has status [dead]
crmd[14376]: 2008/05/23_04:43:03 info: mem_handle_event: Got an event
OC_EV_MS_NOT_PRIMARY from ccm
crmd[14376]: 2008/05/23_04:43:03 info: mem_handle_event: instance=3,
nodes=3, new=1, lost=0, n_idx=0, new_idx=3, old_idx=6
crmd[14376]: 2008/05/23_04:43:03 info: crmd_ccm_msg_callback: Quorum
lost after event=NOT PRIMARY (id=3)
cib[14372]: 2008/05/23_04:43:03 info: mem_handle_event: Got an event
OC_EV_MS_NOT_PRIMARY from ccm
cib[14372]: 2008/05/23_04:43:03 info: mem_handle_event: instance=3,
nodes=3, new=1, lost=0, n_idx=0, new_idx=3, old_idx=6
crmd[14376]: 2008/05/23_04:43:04 info: mem_handle_event: Got an event
OC_EV_MS_INVALID from ccm
crmd[14376]: 2008/05/23_04:43:04 info: mem_handle_event: no mbr_track info
crmd[14376]: 2008/05/23_04:43:04 info: mem_handle_event: Got an event
OC_EV_MS_NEW_MEMBERSHIP from ccm
crmd[14376]: 2008/05/23_04:43:04 info: mem_handle_event: instance=4,
nodes=2, new=0, lost=1, n_idx=0, new_idx=2, old_idx=5
crmd[14376]: 2008/05/23_04:43:04 info: crmd_ccm_msg_callback: Quorum
(re)attained after event=NEW MEMBERSHIP (id=4)
crmd[14376]: 2008/05/23_04:43:04 WARN: check_dead_member: Our DC node
(node1) left the cluster
crmd[14376]: 2008/05/23_04:43:04 info: ccm_event_detail: NEW MEMBERSHIP:
trans=4, nodes=2, new=0, lost=1 n_idx=0, new_idx=2, old_idx=5
crmd[14376]: 2008/05/23_04:43:04 info: ccm_event_detail: CURRENT:
node2 [nodeid=1, born=2]
crmd[14376]: 2008/05/23_04:43:04 info: ccm_event_detail: CURRENT:
node3 [nodeid=2, born=3]
crmd[14376]: 2008/05/23_04:43:04 info: ccm_event_detail: LOST:
node1 [nodeid=0, born=1]
crmd[14376]: 2008/05/23_04:43:04 info: do_state_transition: State
transition S_NOT_DC -> S_ELECTION [ input=I_ELECTION
cause=C_FSA_INTERNAL origin=check_dead_member ]
crmd[14376]: 2008/05/23_04:43:04 info: update_dc: Unset DC node1
cib[14372]: 2008/05/23_04:43:04 info: mem_handle_event: Got an event
OC_EV_MS_INVALID from ccm
cib[14372]: 2008/05/23_04:43:04 info: mem_handle_event: no mbr_track info
cib[14372]: 2008/05/23_04:43:04 info: mem_handle_event: Got an event
OC_EV_MS_NEW_MEMBERSHIP from ccm
cib[14372]: 2008/05/23_04:43:04 info: mem_handle_event: instance=4,
nodes=2, new=0, lost=1, n_idx=0, new_idx=2, old_idx=5
cib[14372]: 2008/05/23_04:43:04 info: cib_ccm_msg_callback: LOST: node1
cib[14372]: 2008/05/23_04:43:04 info: cib_ccm_msg_callback: PEER: node2
cib[14372]: 2008/05/23_04:43:04 info: cib_ccm_msg_callback: PEER: node3
crmd[14376]: 2008/05/23_04:43:04 info: do_election_count_vote: Updated
voted hash for node2 to vote
crmd[14376]: 2008/05/23_04:43:04 info: do_election_count_vote: Election
ignore: our vote (node2)
crmd[14376]: 2008/05/23_04:43:04 info: do_election_check: Still waiting
on 1 non-votes (2 total)
crmd[14376]: 2008/05/23_04:43:05 info: do_election_count_vote: Updated
voted hash for node3 to no-vote
crmd[14376]: 2008/05/23_04:43:05 info: do_election_count_vote: Election
ignore: no-vote from node3
crmd[14376]: 2008/05/23_04:43:05 info: do_state_transition: State
transition S_ELECTION -> S_INTEGRATION [ input=I_ELECTION_DC
cause=C_FSA_INTERNAL origin=do_election_check ]
crmd[14376]: 2008/05/23_04:43:05 info: start_subsystem: Starting
sub-system "tengine"
tengine[15627]: 2008/05/23_04:43:05 info: G_main_add_SignalHandler:
Added signal handler for signal 15
tengine[15627]: 2008/05/23_04:43:05 info: G_main_add_TriggerHandler:
Added signal manual handler
tengine[15627]: 2008/05/23_04:43:05 info: G_main_add_TriggerHandler:
Added signal manual handler
cib[14372]: 2008/05/23_04:43:05 info: cib_null_callback: Setting
cib_diff_notify callbacks for tengine: on
tengine[15627]: 2008/05/23_04:43:05 info: te_init: Registering TE UUID:
698b6e08-f606-45fa-98c2-2c5b330d36f6
tengine[15627]: 2008/05/23_04:43:05 info: set_graph_functions: Setting
custom graph functions
tengine[15627]: 2008/05/23_04:43:05 info: unpack_graph: Unpacked
transition -1: 0 actions in 0 synapses
tengine[15627]: 2008/05/23_04:43:05 info: te_init: Starting tengine
tengine[15627]: 2008/05/23_04:43:05 info: te_connect_stonith: Attempting
connection to fencing daemon...
crmd[14376]: 2008/05/23_04:43:05 info: start_subsystem: Starting
sub-system "pengine"
pengine[15628]: 2008/05/23_04:43:05 info: G_main_add_SignalHandler:
Added signal handler for signal 15
pengine[15628]: 2008/05/23_04:43:05 info: pe_init: Starting pengine
crmd[14376]: 2008/05/23_04:43:05 info: do_dc_takeover: Taking over DC
status for this partition
cib[14372]: 2008/05/23_04:43:05 info: cib_process_readwrite: We are now
in R/W mode
crmd[14376]: 2008/05/23_04:43:05 info: join_make_offer: Making join
offers based on membership 4
crmd[14376]: 2008/05/23_04:43:05 info: do_dc_join_offer_all: join-1:
Waiting on 2 outstanding join acks
crmd[14376]: 2008/05/23_04:43:05 info: update_dc: Set DC to node2 (2.0)
tengine[15627]: 2008/05/23_04:43:06 info: te_connect_stonith: Connected
crmd[14376]: 2008/05/23_04:43:06 info: do_state_transition: State
transition S_INTEGRATION -> S_FINALIZE_JOIN [ input=I_INTEGRATED
cause=C_FSA_INTERNAL origin=check_join_state ]
crmd[14376]: 2008/05/23_04:43:06 info: do_state_transition: All 2
cluster nodes responded to the join offer.
cib[14372]: 2008/05/23_04:43:06 info: sync_our_cib: Syncing CIB to all peers
crmd[14376]: 2008/05/23_04:43:06 info: update_attrd: Connecting to attrd...
attrd[14375]: 2008/05/23_04:43:06 info: attrd_local_callback: Sending
full refresh
crmd[14376]: 2008/05/23_04:43:06 info: update_dc: Set DC to node2 (2.0)
crmd[14376]: 2008/05/23_04:43:06 info: do_dc_join_ack: join-1: Updating
node state to member for node2
crmd[14376]: 2008/05/23_04:43:07 info: do_dc_join_ack: join-1: Updating
node state to member for node3
crmd[14376]: 2008/05/23_04:43:07 info: do_state_transition: State
transition S_FINALIZE_JOIN -> S_POLICY_ENGINE [ input=I_FINALIZED
cause=C_FSA_INTERNAL origin=check_join_state ]
crmd[14376]: 2008/05/23_04:43:07 info: do_state_transition: All 2
cluster nodes are eligible to run resources.
tengine[15627]: 2008/05/23_04:43:07 info: update_abort_priority: Abort
priority upgraded to 1000000
tengine[15627]: 2008/05/23_04:43:07 info: update_abort_priority: 'DC
Takeover' abort superceeded
pengine[15628]: 2008/05/23_04:43:07 notice: unpack_config: On loss of
CCM Quorum: Ignore
pengine[15628]: 2008/05/23_04:43:07 info: determine_online_status: Node
node2 is online
pengine[15628]: 2008/05/23_04:43:07 info: determine_online_status: Node
node3 is online
pengine[15628]: 2008/05/23_04:43:07 notice: group_print: Resource Group:
group_ve0
pengine[15628]: 2008/05/23_04:43:07 notice: native_print:
drbddisk_ve0 (heartbeat:drbddisk): Stopped
pengine[15628]: 2008/05/23_04:43:07 notice: native_print:
Filesystem_ve0 (heartbeat::ocf:Filesystem): Stopped
pengine[15628]: 2008/05/23_04:43:07 notice: native_print: run_ve0
(lsb:ve0): Stopped
pengine[15628]: 2008/05/23_04:43:07 notice: group_print: Resource Group:
group_ve1
pengine[15628]: 2008/05/23_04:43:07 notice: native_print:
drbddisk_ve1 (heartbeat:drbddisk): Started node2
pengine[15628]: 2008/05/23_04:43:07 notice: native_print:
Filesystem_ve1 (heartbeat::ocf:Filesystem): Started node2
pengine[15628]: 2008/05/23_04:43:07 notice: native_print: run_ve1
(lsb:ve1): Started node2
pengine[15628]: 2008/05/23_04:43:07 WARN: native_color: Resource
drbddisk_ve0 cannot run anywhere
pengine[15628]: 2008/05/23_04:43:07 WARN: native_color: Resource
Filesystem_ve0 cannot run anywhere
pengine[15628]: 2008/05/23_04:43:07 WARN: native_color: Resource run_ve0
cannot run anywhere
pengine[15628]: 2008/05/23_04:43:07 notice: NoRoleChange: Leave resource
drbddisk_ve1 (node2)
pengine[15628]: 2008/05/23_04:43:07 notice: NoRoleChange: Leave resource
Filesystem_ve1 (node2)
pengine[15628]: 2008/05/23_04:43:07 notice: NoRoleChange: Leave resource
run_ve1 (node2)
crmd[14376]: 2008/05/23_04:43:07 info: do_state_transition: State
transition S_POLICY_ENGINE -> S_TRANSITION_ENGINE [ input=I_PE_SUCCESS
cause=C_IPC_MESSAGE origin=route_message ]
tengine[15627]: 2008/05/23_04:43:07 info: unpack_graph: Unpacked
transition 0: 0 actions in 0 synapses
tengine[15627]: 2008/05/23_04:43:07 info: run_graph: Transition 0:
(Complete=0, Pending=0, Fired=0, Skipped=0, Incomplete=0)
tengine[15627]: 2008/05/23_04:43:07 info: notify_crmd: Transition 0
status: te_complete - <null>
crmd[14376]: 2008/05/23_04:43:07 info: do_state_transition: State
transition S_TRANSITION_ENGINE -> S_IDLE [ input=I_TE_SUCCESS
cause=C_IPC_MESSAGE origin=route_message ]
pengine[15628]: 2008/05/23_04:43:07 WARN: process_pe_message: Transition
0: WARNINGs found during PE processing. PEngine Input stored in:
/var/lib/heartbeat/pengine/pe-warn-24.bz2
pengine[15628]: 2008/05/23_04:43:07 info: process_pe_message:
Configuration WARNINGs found during PE processing. Please run
"crm_verify -L" to identify issues.
cib[14372]: 2008/05/23_04:43:08 info: sync_our_cib: Syncing CIB to node3
--------------------------------
This is the log from node3:
--------------------------------
heartbeat[21876]: 2008/05/23_05:47:08 WARN: node node1: is dead
heartbeat[21876]: 2008/05/23_05:47:08 info: Link node1:vmbr0 dead.
crmd[21893]: 2008/05/23_05:47:08 notice: crmd_ha_status_callback: Status
update: Node node1 now has status [dead]
crmd[21893]: 2008/05/23_05:47:08 info: mem_handle_event: Got an event
OC_EV_MS_NOT_PRIMARY from ccm
crmd[21893]: 2008/05/23_05:47:08 info: mem_handle_event: instance=3,
nodes=3, new=3, lost=0, n_idx=0, new_idx=0, old_idx=6
crmd[21893]: 2008/05/23_05:47:08 info: crmd_ccm_msg_callback: Quorum
lost after event=NOT PRIMARY (id=3)
cib[21889]: 2008/05/23_05:47:08 info: mem_handle_event: Got an event
OC_EV_MS_NOT_PRIMARY from ccm
cib[21889]: 2008/05/23_05:47:08 info: mem_handle_event: instance=3,
nodes=3, new=3, lost=0, n_idx=0, new_idx=0, old_idx=6
crmd[21893]: 2008/05/23_05:47:10 info: do_election_count_vote: Election
check: vote from node2
crmd[21893]: 2008/05/23_05:47:10 info: update_dc: Unset DC node1
crmd[21893]: 2008/05/23_05:47:10 info: do_state_transition: State
transition S_NOT_DC -> S_PENDING [ input=I_PENDING cause=C_FSA_INTERNAL
origin=do_election_count_vote ]
crmd[21893]: 2008/05/23_05:47:10 info: mem_handle_event: Got an event
OC_EV_MS_INVALID from ccm
crmd[21893]: 2008/05/23_05:47:10 info: mem_handle_event: no mbr_track info
crmd[21893]: 2008/05/23_05:47:10 info: mem_handle_event: Got an event
OC_EV_MS_NEW_MEMBERSHIP from ccm
crmd[21893]: 2008/05/23_05:47:10 info: mem_handle_event: instance=4,
nodes=2, new=0, lost=1, n_idx=0, new_idx=2, old_idx=5
crmd[21893]: 2008/05/23_05:47:10 info: crmd_ccm_msg_callback: Quorum
(re)attained after event=NEW MEMBERSHIP (id=4)
crmd[21893]: 2008/05/23_05:47:10 info: ccm_event_detail: NEW MEMBERSHIP:
trans=4, nodes=2, new=0, lost=1 n_idx=0, new_idx=2, old_idx=5
crmd[21893]: 2008/05/23_05:47:10 info: ccm_event_detail: CURRENT:
node2 [nodeid=1, born=2]
crmd[21893]: 2008/05/23_05:47:10 info: ccm_event_detail: CURRENT:
node3 [nodeid=2, born=3]
crmd[21893]: 2008/05/23_05:47:10 info: ccm_event_detail: LOST:
node1 [nodeid=0, born=1]
cib[21889]: 2008/05/23_05:47:10 info: mem_handle_event: Got an event
OC_EV_MS_INVALID from ccm
cib[21889]: 2008/05/23_05:47:10 info: mem_handle_event: no mbr_track info
cib[21889]: 2008/05/23_05:47:10 info: mem_handle_event: Got an event
OC_EV_MS_NEW_MEMBERSHIP from ccm
cib[21889]: 2008/05/23_05:47:10 info: mem_handle_event: instance=4,
nodes=2, new=0, lost=1, n_idx=0, new_idx=2, old_idx=5
cib[21889]: 2008/05/23_05:47:10 info: cib_ccm_msg_callback: LOST: node1
cib[21889]: 2008/05/23_05:47:10 info: cib_ccm_msg_callback: PEER: node2
cib[21889]: 2008/05/23_05:47:10 info: cib_ccm_msg_callback: PEER: node3
crmd[21893]: 2008/05/23_05:47:11 info: update_dc: Set DC to node2 (2.0)
crmd[21893]: 2008/05/23_05:47:12 info: update_dc: Set DC to node2 (2.0)
crmd[21893]: 2008/05/23_05:47:12 info: do_state_transition: State
transition S_PENDING -> S_NOT_DC [ input=I_NOT_DC cause=C_HA_MESSAGE
origin=do_cl_join_finalize_respond ]
cib[21889]: 2008/05/23_05:47:14 info: apply_xml_diff: Digest mis-match:
expected 24cde001da832e01ab2b45a6f34927e9, calculated
7078043222ff90d8455734d19f719168
cib[21889]: 2008/05/23_05:47:14 info: cib_process_diff: Diff 0.53.22 ->
0.53.23 not applied to 0.53.22: Failed application of a global update.
Requesting full refresh.
cib[21889]: 2008/05/23_05:47:14 info: cib_process_diff: Requesting
re-sync from peer: Failed application of a global update. Requesting
full refresh.
cib[21889]: 2008/05/23_05:47:14 WARN: do_cib_notify: cib_apply_diff of
<diff > FAILED: Application of an update diff failed, requesting a full
refresh
cib[21889]: 2008/05/23_05:47:14 WARN: cib_process_request:
cib_apply_diff operation failed: Application of an update diff failed,
requesting a full refresh
cib[21889]: 2008/05/23_05:47:14 info: cib_replace_notify: Replaced:
0.53.22 -> 0.53.23 from <null>
crmd[21893]: 2008/05/23_05:47:14 info: populate_cib_nodes: Requesting
the list of configured nodes
crmd[21893]: 2008/05/23_05:47:15 notice: populate_cib_nodes: Node: node3
(uuid: e30ae69f-42c8-4464-96c7-4ff9ad32fe96)
crmd[21893]: 2008/05/23_05:47:15 notice: populate_cib_nodes: Node: node2
(uuid: bf4f1d62-b04c-4ca9-89e0-f8a5ccbca608)
crmd[21893]: 2008/05/23_05:47:15 notice: populate_cib_nodes: Node: node1
(uuid: 682802e4-504d-4aa6-b418-49ff6a7b1780)
--------------------------------
I hope someone can help me out.
Thanks
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems