I have two servers: KNTCLFS001 and KNTCLFS002
I have a drbd partition named nfs, on each server. They are mirrored. The
mirroring works perfectly.
What I want is to serve this drbd partition up and have it so that if one
server goes down, the drbd partition is still available on the other server. I
am trying to do this in an active/active cluster. I have a cloned IP address
that is supposed to be running on both machines at the same time.
I can get the resources setup according to the "Clusters from Scratch" guide.
The issue is that as soon as I take KNTCLFS001 down, all my resources go down
as well. They won't even stay running on KNTCLFS002.
Someone suggested adding constraints but that doesn't really make sense. I want
the services running on all systems at once. Constraints are preconditions for
bringing the service up on another machine, but the service should already be
running so in theory, constraints shouldn't be necessary.
Any help will be greatly appreciated. I have attached my cib.xml to give anyone
a better idea of what I am trying to do. I have been going through the
"Pacemaker 1.1 Configuration Explained" but so far I haven't found anything.
William
<cib epoch="157" num_updates="0" admin_epoch="12" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="KNTCLFS001" update-client="cibadmin" cib-last-written="Tue Jul 24 02:53:32 2012" have-quorum="1" dc-uuid="KNTCLFS001">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.7-6.el6-148fccfd5985c5590cc601123c6c16e966b85d14"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-cluster-recheck-interval" name="cluster-recheck-interval" value="5min"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="cman"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1343113626"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="KNTCLFS001" type="normal" uname="KNTCLFS001"/>
<node id="KNTCLFS002" type="normal" uname="KNTCLFS002"/>
</nodes>
<resources>
<clone id="ClusterIPClone">
<meta_attributes id="ClusterIPClone-meta_attributes">
<nvpair id="ClusterIPClone-meta_attributes-globally-unique" name="globally-unique" value="true"/>
<nvpair id="ClusterIPClone-meta_attributes-clone-max" name="clone-max" value="2"/>
<nvpair id="ClusterIPClone-meta_attributes-clone-node-max" name="clone-node-max" value="2"/>
</meta_attributes>
<primitive class="ocf" id="ClusterIP" provider="heartbeat" type="IPaddr2">
<instance_attributes id="ClusterIP-instance_attributes">
<nvpair id="ClusterIP-instance_attributes-ip" name="ip" value="10.89.99.30"/>
<nvpair id="ClusterIP-instance_attributes-cidr_netmask" name="cidr_netmask" value="22"/>
<nvpair id="ClusterIP-instance_attributes-clusterip_hash" name="clusterip_hash" value="sourceip"/>
</instance_attributes>
<operations>
<op id="ClusterIP-monitor-30s" interval="30s" name="monitor"/>
</operations>
</primitive>
</clone>
<clone id="ClusterFSClone">
<meta_attributes id="ClusterFSClone-meta_attributes">
<nvpair id="ClusterFSClone-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="ClusterFSClone-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="ClusterFSClone-meta_attributes-clone-max" name="clone-max" value="2"/>
<nvpair id="ClusterFSClone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
<nvpair id="ClusterFSClone-meta_attributes-notify" name="notify" value="true"/>
</meta_attributes>
<primitive class="ocf" id="ClusterFS" provider="heartbeat" type="Filesystem">
<instance_attributes id="ClusterFS-instance_attributes">
<nvpair id="ClusterFS-instance_attributes-device" name="device" value="/dev/drbd/by-res/nfs"/>
<nvpair id="ClusterFS-instance_attributes-directory" name="directory" value="/Storage"/>
<nvpair id="ClusterFS-instance_attributes-fstype" name="fstype" value="gfs2"/>
</instance_attributes>
</primitive>
</clone>
<clone id="ClusterDataClone">
<meta_attributes id="ClusterDataClone-meta_attributes">
<nvpair id="ClusterDataClone-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="ClusterDataClone-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="ClusterDataClone-meta_attributes-clone-max" name="clone-max" value="2"/>
<nvpair id="ClusterDataClone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
<nvpair id="ClusterDataClone-meta_attributes-notify" name="notify" value="true"/>
</meta_attributes>
<primitive class="ocf" id="ClusterData" provider="linbit" type="drbd">
<instance_attributes id="ClusterData-instance_attributes">
<nvpair id="ClusterData-instance_attributes-drbd_resource" name="drbd_resource" value="nfs"/>
</instance_attributes>
<operations>
<op id="ClusterData-monitor-60s" interval="60s" name="monitor" role="Master"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_location id="loc_ClusterIPClone_KNTCLFS001" node="KNTCLFS001" rsc="ClusterIPClone" score="INFINITY"/>
<rsc_location id="loc_ClusterIPClone_KNTCLFS002" node="KNTCLFS002" rsc="ClusterIPClone" score="INFINITY"/>
</constraints>
<rsc_defaults>
<meta_attributes id="rsc-options">
<nvpair id="rsc-options-target-role" name="target-role" value="started"/>
<nvpair id="rsc-options-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="rsc-options-resource-stickiness" name="resource-stickiness" value="100"/>
</meta_attributes>
</rsc_defaults>
<op_defaults>
<meta_attributes id="op-options">
<nvpair id="op-options-timeout" name="timeout" value="240s"/>
</meta_attributes>
</op_defaults>
</configuration>
</cib>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems