Right about mysql.
I moved all mysql data (but the .sock) file on the drbd partition and I
created a symlink pointing to the new directory. I'm not sure about this
way but it seems to work. Then I added these lines (created with
haresources2cib.py) to my cib.xml file in order to add mysqld as resource.
<primitive class="heartbeat" id="mysqld_4" provider="heartbeat"
type="mysqld">
<operations>
<op id="mysqld_4_mon" interval="120s" name="monitor" timeout="60s"/>
</operations>
</primitive>
It doesn't work. The resource is seen as "unmanaged".
So, looking for a help on the web I found something which I tried on my
machines. I simply modified again the cib.xml which now looks like
below. Unfortunately I still have the same problem I can't get over.
I've also noticed that there's no a mysql script in
/etc/ha.d/resources.d. Is it normal?
<cib admin_epoch="0" have_quorum="true" num_peers="2"
cib_feature_revision="1.3" ccm_transition="2" generated="true"
dc_uuid="6bb5c4f4-1e8
7-4315-a8ec-903093c1909b" epoch="23" num_updates="444"
cib-last-written="Fri Apr 27 18:21:14 2007">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<attributes>
<nvpair id="cib-bootstrap-options-symmetric-cluster"
name="symmetric-cluster" value="true"/>
<nvpair id="cib-bootstrap-options-no_quorum-policy"
name="no_quorum-policy" value="stop"/>
<nvpair
id="cib-bootstrap-options-default-resource-stickiness"
name="default-resource-stickiness" value="0"/>
<nvpair
id="cib-bootstrap-options-default-resource-failure-stickiness"
name="default-resource-failure-stickiness" value="0"/>
<nvpair id="cib-bootstrap-options-stonith-enabled"
name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-stonith-action"
name="stonith-action" value="reboot"/>
<nvpair id="cib-bootstrap-options-stop-orphan-resources"
name="stop-orphan-resources" value="true"/>
<nvpair id="cib-bootstrap-options-stop-orphan-actions"
name="stop-orphan-actions" value="true"/>
<nvpair id="cib-bootstrap-options-remove-after-stop"
name="remove-after-stop" value="false"/>
<nvpair id="cib-bootstrap-options-short-resource-names"
name="short-resource-names" value="true"/>
<nvpair id="cib-bootstrap-options-transition-idle-timeout"
name="transition-idle-timeout" value="5min"/>
<nvpair id="cib-bootstrap-options-default-action-timeout"
name="default-action-timeout" value="5s"/>
<nvpair id="cib-bootstrap-options-is-managed-default"
name="is-managed-default" value="true"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh"
name="last-lrm-refresh" value="1177605190"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
<node id="00df8efc-5bf9-4918-9cd7-c3efca8143f1" uname="nodo1"
type="normal"/>
<node id="6bb5c4f4-1e87-4315-a8ec-903093c1909b" uname="nodo2"
type="normal"/>
</nodes>
<resources>
<group id="group_1">
<primitive class="ocf" id="IPaddr_192_168_1_93"
provider="heartbeat" type="IPaddr">
<instance_attributes id="IPaddr_192_168_1_93_inst_attr">
<attributes>
<nvpair id="IPaddr_192_168_1_93_attr_0" name="ip"
value="192.168.1.93"/>
<nvpair id="IPaddr_192_168_1_93_target_role"
name="target_role" value="started"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="heartbeat" id="drbddisk_2"
provider="heartbeat" type="drbddisk">
<instance_attributes id="drbddisk_2_instance_attrs">
<attributes>
<nvpair id="drbddisk_2_target_role" name="target_role"
value="started"/>
</attributes>
</instance_attributes>
</primitive>
<primitive class="ocf" id="apache_3" provider="heartbeat"
type="apache">
<instance_attributes id="apache_3_instance_attrs">
<attributes>
<nvpair id="apache_3_target_role" name="target_role"
value="started"/>
</attributes>
</instance_attributes>
</primitive>
</group>
<primitive class="heartbeat" id="mysqld_4" provider="heartbeat"
type="mysqld">
<operations>
<op id="mysqld_4_mon" interval="30s" name="monitor"
timeout="20s"/>
<op id="mysqld_4_start" name="start" timeout="10s"/>
<op id="mysqld_4_stop" name="stop" timeout="10s"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="rsc_location_group_1" rsc="group_1">
<rule id="prefered_location_group_1" score="100">
<expression attribute="#uname"
id="prefered_location_group_1_expr" operation="eq" value="node1"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
Eddie C wrote:
> Also you can do multimaster mysql replication now by specifying and
> auto_increment seed. t might be better then a futureware alternative like
> NDB cluster.
>
> On 4/27/07, Jan Kalcic <[EMAIL PROTECTED]> wrote:
>>
>>
>> >> You should never, ever modify the cib while heartbeat is running.
>> >> Also, if you do that while heartbeat is stopped, you have to
>> >> first remove the signature files (they're in the same directory).
>> >>
>> >>
>> Sorry I didn't know this. Now, I stopped heartbeat, removed the
>> cib.xml.sig file on both the nodes, put the cib.xml you sent and here we
>> are. It works great!
>>
>> Really thanks,
>> Jan
>> _______________________________________________
>> Linux-HA mailing list
>> [email protected]
>> http://lists.linux-ha.org/mailman/listinfo/linux-ha
>> See also: http://linux-ha.org/ReportingProblems
>>
> _______________________________________________
> Linux-HA mailing list
> [email protected]
> http://lists.linux-ha.org/mailman/listinfo/linux-ha
> See also: http://linux-ha.org/ReportingProblems
>
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems