I know this isn't the right group for this, but I am hoping someone can point
me in the right direction.
I have set up a DRBD cluster for replicating a drbd volume across two servers.
Each server has its own IP address (10.89.99.31 and 10.89.99.32). I also have a
collocated IP address (10.89.99.30) used for mounting the volume on other
computers. I am using cman and pacemaker.
I can mount the volume with no issues using 10.89.99.30. When I unplug the
network cable from my second storage server, the computer will lose
connectivity to the NFS share, even though the IP address shows it is shared. I
can still ping 10.89.99.30, but I can't find the share using
showmount -e 10.89.99.30
I have attached the cib.xml and cluster.conf files from my drbd cluster. I know
this is more of a pacemaker issue, but maybe someone here has experience with
this?
William
<cib epoch="109" num_updates="0" admin_epoch="0" validate-with="pacemaker-1.2"
crm_feature_set="3.0.6" update-origin="SERVER1" update-client="cibadmin"
cib-last-written="Fri Jul 20 01:33:05 2012" have-quorum="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-action" name="stonith-action"
value="poweroff"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version"
value="1.1.7-6.el6-148fccfd5985c5590cc601123c6c16e966b85d14"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy"
name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-recheck-interval"
name="cluster-recheck-interval" value="5min"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure"
name="cluster-infrastructure" value="cman"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="SERVER1" type="normal" uname="SERVER1"/>
<node id="SERVER2" type="normal" uname="SERVER2"/>
</nodes>
<resources>
<master id="StorageDataClone">
<meta_attributes id="StorageDataClone-meta_attributes">
<nvpair id="StorageDataClone-meta_attributes-master-max"
name="master-max" value="2"/>
<nvpair id="StorageDataClone-meta_attributes-master-node-max"
name="master-node-max" value="1"/>
<nvpair id="StorageDataClone-meta_attributes-clone-max"
name="clone-max" value="2"/>
<nvpair id="StorageDataClone-meta_attributes-clone-node-max"
name="clone-node-max" value="1"/>
<nvpair id="StorageDataClone-meta_attributes-notify" name="notify"
value="true"/>
</meta_attributes>
<primitive class="ocf" id="StorageData" provider="linbit" type="drbd">
<instance_attributes id="StorageData-instance_attributes">
<nvpair id="StorageData-instance_attributes-drbd_resource"
name="drbd_resource" value="nfs"/>
</instance_attributes>
<operations>
<op id="StorageData-monitor-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
</master>
<clone id="StorageIP">
<meta_attributes id="StorageIP-meta_attributes">
<nvpair id="StorageIP-meta_attributes-globally-unique"
name="globally-unique" value="true"/>
<nvpair id="StorageIP-meta_attributes-clone-max" name="clone-max"
value="2"/>
<nvpair id="StorageIP-meta_attributes-clone-node-max"
name="clone-node-max" value="2"/>
</meta_attributes>
<primitive class="ocf" id="ClusterIP" provider="heartbeat"
type="IPaddr2">
<instance_attributes id="ClusterIP-instance_attributes">
<nvpair id="ClusterIP-instance_attributes-ip" name="ip"
value="10.89.99.30"/>
<nvpair id="ClusterIP-instance_attributes-cidr_netmask"
name="cidr_netmask" value="22"/>
<nvpair id="ClusterIP-instance_attributes-clusterip_hash"
name="clusterip_hash" value="sourceip"/>
</instance_attributes>
<operations id="ClusterIP-operations">
<op id="ClusterIP-monitor-30s" interval="30" name="monitor"
start-delay="0"/>
</operations>
</primitive>
</clone>
<clone id="cl_fence_pcmk_1">
<meta_attributes id="cl_fence_pcmk_1-meta_attributes">
<nvpair id="cl_fence_pcmk_1-meta_attributes-clone-max"
name="clone-max" value="2"/>
<nvpair id="cl_fence_pcmk_1-meta_attributes-notify" name="notify"
value="true"/>
<nvpair id="cl_fence_pcmk_1-meta_attributes-interleave"
name="interleave" value="true"/>
</meta_attributes>
<primitive id="stonith_fence_pcmk_1" class="stonith" type="fence_pcmk">
<meta_attributes id="stonith_fence_pcmk_1-meta_attributes"/>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="loc_ClusterIP_SERVER1" node="SERVER1" rsc="StorageIP"
score="INFINITY"/>
<rsc_location id="loc_ClusterIP_SERVER2" node="SERVER2" rsc="StorageIP"
score="INFINITY"/>
<rsc_location id="loc_ClusterIP-ping-prefer" rsc="StorageIP">
<rule id="loc_ClusterIP-ping-prefer-rule" score-attribute="pingd">
<expression attribute="pingd"
id="loc_ClusterIP-ping-prefer-expression" operation="defined"/>
</rule>
</rsc_location>
<rsc_location id="loc_cl_fence_pcmk_1_SERVER1" rsc="cl_fence_pcmk_1"
node="SERVER1" score="INFINITY"/>
<rsc_location id="loc_cl_fence_pcmk_1_SERVER2" rsc="cl_fence_pcmk_1"
node="SERVER2" score="INFINITY"/>
</constraints>
<rsc_defaults>
<meta_attributes id="rsc-options">
<nvpair id="rsc-options-target-role" name="target-role"
value="started"/>
<nvpair id="rsc-options-allow-migrate" name="allow-migrate"
value="true"/>
</meta_attributes>
</rsc_defaults>
</configuration>
</cib>
<cluster name="Storage" config_version="2">
<logging syslog_facility="local3"/>
<cman two_node="1" expected_votes="1" broadcast="no"/>
<clusternodes>
<clusternode name="SERVER1" nodeid="1">
<fence>
</fence>
</clusternode>
<clusternode name="SERVER2" nodeid="2">
<fence>
</fence>
</clusternode>
</clusternodes>
<fencedevices>
<fencedevice name="ilo" agent="fence_ipmilan"/>
</fencedevices>
<rm>
<failoverdomains>
<failoverdomain name="Failover" nofailback="1" ordered="1"
restricted="0">
<failoverdomainnode name="SERVER1" priority="1"/>
<failoverdomainnode name="SERVER2" priority="2"/>
</failoverdomain>
</failoverdomains>
<resources>
<fs name="Storage" device="/dev/drbd0" mountpoint="/Storage"
fstype="gfs2"/>
<ip address="10.89.99.30" monitor_link="yes" sleeptime="10"/>
</resources>
</rm>
</cluster>
_______________________________________________
drbd-user mailing list
[email protected]
http://lists.linbit.com/mailman/listinfo/drbd-user