I have two servers which are both Dell 990's. Each server has two 1tb hard
drives configured in RAID0. I have installed CentOS on both and they have the
same partition sizes. I am using /dev/KNTCLFS00X/Storage as a drbd partition
and attaching it to /dev/drbd0. DRBD syncing appears to working fine.
I am trying to setup an Active/Active cluster. I have set up CMAN. I want to
use this cluster just for NFS storage.
I want to have these services running on both nodes at the same time:
* IP Address
* DRBD
* Filesystem (gcfs2)
Through a combination of official documentation and LCMC, I have this setup.
However, I am getting this:
============
Last updated: Fri Jun 8 23:11:58 2012
Last change: Fri Jun 8 23:11:37 2012 via crmd on KNTCLFS002
Stack: cman
Current DC: KNTCLFS002 - partition with quorum
Version: 1.1.6-3.el6-a02c0f19a00c1eb2527ad38f146ebc0834814558
2 Nodes configured, 2 expected votes
6 Resources configured.
============
Online: [ KNTCLFS001 KNTCLFS002 ]
Clone Set: cl_IPaddr2_1 [res_IPaddr2_1]
Started: [ KNTCLFS002 KNTCLFS001 ]
Master/Slave Set: ms_drbd_2 [res_drbd_2]
Masters: [ KNTCLFS002 ]
Stopped: [ res_drbd_2:0 ]
Clone Set: cl_Filesystem_1 [res_Filesystem_1] (unique)
res_Filesystem_1:0 (ocf::heartbeat:Filesystem): Stopped
res_Filesystem_1:1 (ocf::heartbeat:Filesystem): Started KNTCLFS002
Failed actions:
res_Filesystem_1:0_start_0 (node=KNTCLFS002, call=42, rc=1,
status=complete): unknown error
res_Filesystem_1:0_start_0 (node=KNTCLFS001, call=87, rc=1,
status=complete): unknown error
I have attached my nfs.res, cluster.conf and corosync.conf files. Please let
me know if I can provide any other information to help resolve this.
Thanks,
William Yount | Systems Analyst | Menlo Worldwide | Cell: 901-654-9933
Safety | Leadership | Integrity | Commitment | Excellence
Please consider the environment before printing this e-mail
<?xml version="1.0"?>
<cluster config_version="1" name="Storage">
<logging debug="off"/>
<clusternodes>
<clusternode name="KNTCLFS001" nodeid="1">
<fence>
<method name="pcmk-redirect">
<device name="pcmk" port="KNTCLFS001"/>
</method>
</fence>
</clusternode>
<clusternode name="KNTCLFS002" nodeid="2">
<fence>
<method name="pcmk-redirect">
<device name="pcmk" port="KNTCLFS002"/>
</method>
</fence>
</clusternode>
</clusternodes>
<fencedevices>
<fencedevice name="pcmk" agent="fence_pcmk"/>
</fencedevices>
</cluster>
## generated by drbd-gui 1.3.14
aisexec {
user: root
## generated by drbd-gui 1.3.14
aisexec {
user: root
group: root
}
corosync {
user: root
group: root
}
amf {
mode: disabled
}
logging {
to_stderr: yes
debug: off
timestamp: on
to_file: no
to_syslog: yes
syslog_facility: daemon
}
totem {
version: 2
token: 3000
token_retransmits_before_loss_const: 10
join: 60
consensus: 4000
vsftype: none
max_messages: 20
clear_node_high_bit: yes
heartbeat_failures_allowed=100
secauth: on
threads: 0
# nodeid: 1234
rrp_mode: active
interface {
ringnumber: 0
bindnetaddr: 192.168.1.0
mcastaddr: 226.94.1.1
mcastport: 5405
}
interface {
ringnumber: 1
bindnetaddr: 10.89.96.0
mcastaddr: 226.94.1.1
mcastport: 5405
}
}
service {
ver: 0
name: pacemaker
use_mgmtd: yes
clustername: Storage
}
resource nfs {
protocol C;
handlers {
fence-peer /usr/lib/drbd/crm-fence-peer.sh;
split-brain "/usr/lib/drbd/notify-split-brain.sh root";
after-resync-target /usr/lib/drbd/crm-unfence-peer.sh;
}
on KNTCLFS001 {
device /dev/drbd0;
disk /dev/VG1/Storage;
meta-disk /dev/VG1/Metadata[0];
address 192.168.1.5:7788;
}
on KNTCLFS002 {
device /dev/drbd0;
disk /dev/VG1/Storage;
meta-disk /dev/VG1/Metadata[0];
address 192.168.1.10:7788;
}
}
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems