Hi Ian,

On Thu, Aug 23, 2007 at 11:51:32AM +0800, Ian Jiang wrote:
> On 8/22/07, Ian Jiang <[EMAIL PROTECTED]> wrote:
> > Two nodes:
> > Primary hostname=IanVM-4, ipaddr=192.168.1.224
> > Slave    hostname=IanVM-5, ipaddr= 192.168.1.225
> >
> > What I want to make high-available: my_server - a program repeat to
> > accept tcp connection request (listen at 192.168.1.230:4567 ) and
> > disconnect in a few seconds.
> >
> 
> The following configuration were set up on both nodes:
> LSB scripts for my_server, ha,cf, authkeys, haresource, cib.xml.
> After starting heartbeat, however, the crm_mon found no configured resource.
> Here are the details. Could anyone tell where the problem is? Thanks a lot.
> 
> 
> ====================
> LSB script for my_server
> ====================
> 
> [EMAIL PROTECTED] scripts]# cat /etc/init.d/my_server
> #!/bin/bash
> #
> 
> 
> case "$1" in
>         start)
>                 echo $"$0 started"
This
>                 ;;
>         stop)
>                 echo $"$0 stopped"
and this are very wrong, but that's about shell programming, i.e.
you should find help elsewhere. And
>                 ;;
>         *)
>                 echo "$0 $1 not supported!"
>                 ;;
> esac

the whole script cannot work. You have to support the status
action too. Why don't you just use one of the existing agents.

> =========
> /etc/ha.d
> =========
> 
> [EMAIL PROTECTED] scripts]# ll /etc/ha.d/
> total 72
> -rw------- 1 root root   643 Aug 22 21:56 authkeys
> -rw-r--r-- 1 root root 10855 Aug 22 21:56 ha.cf
> -rwxr-xr-x 1 root root   745 Aug 22 17:25 harc
> lrwxrwxrwx 1 root root    56 Aug 23 10:42 haresource ->
> /home/ian/work/linux-ha/scripts/fc6-haresource.my_server
> drwxr-xr-x 2 root root  4096 Aug 22 17:45 rc.d
> -rw-r--r-- 1 root root   692 Aug 22 17:25 README.config
> drwxr-xr-x 2 root root  4096 Aug 23 11:08 resource.d
> -rw-r--r-- 1 root root  7184 Aug 22 17:25 shellfuncs
> 
> 
> ========
> authkeys
> ========
> 
> [EMAIL PROTECTED] scripts]# cat /etc/ha.d/authkeys
> auth 1
> 1 crc
> 
> =====
> ha.cf
> =====
> 
> [EMAIL PROTECTED] scripts]# cat /etc/ha.d/ha.cf
> #       Facility to use for syslog()/logger
> #
> logfacility     local0
> #       What interfaces to broadcast heartbeats over?
> #
> bcast   eth0        # Linux
> #       The default value for auto_failback is "legacy", which
> #       will issue a warning at startup.  So, make sure you put
> #       an auto_failback directive in your ha.cf file.
> #       (note: auto_failback can be any boolean or "legacy")
> #
> auto_failback on
> #       Tell what machines are in the cluster
> #       node    nodename ...    -- must match uname -n
> node    IanVM-4 IanVM-5
> 
> #       Do we use logging daemon?
> #       If logging daemon is used, logfile/debugfile/logfacility in this file
> #       are not meaningful any longer. You should check the config
> file for logging
> #       daemon (the default is /etc/logd.cf)
> #       more infomartion can be fould in
> http://www.linux-ha.org/ha_2ecf_2fUseLogdDirective
> #       Setting use_logd to "yes" is recommended
> #
> use_logd yes
> 
> crm on
> 
> 
> ==========
> haresource
> ==========
> 
> [EMAIL PROTECTED] scripts]# cat /etc/ha.d/haresource
> IanVM-4 192.168.1.230 my_server
> 
> =======
> cib.xml
> =======
> 
> [EMAIL PROTECTED] scripts]# cat /usr/local/var/lib/heartbeat/crm/cib.xml
> <?xml version="1.0" ?>
> <cib admin_epoch="0" epoch="0" num_updates="0">
>     <configuration>
>         <crm_config>
>             <cluster_property_set id="cib-bootstrap-options">
>                 <attributes>
>                     <nvpair
> id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster"
> value="true"/>
>                     <nvpair
> id="cib-bootstrap-options-no_quorum-policy" name="no_quorum-policy"
> value="stop"/>
>                     <nvpair
> id="cib-bootstrap-options-default-resource-stickiness"
> name="default-resource-stickiness" value="0"/>
>                     <nvpair
> id="cib-bootstrap-options-default-resource-failure-stickiness"
> name="default-resource-failure-stickiness" value="0"/>
>                     <nvpair id="cib-bootstrap-options-stonith-enabled"
> name="stonith-enabled" value="false"/>
>                     <nvpair id="cib-bootstrap-options-stonith-action"
> name="stonith-action" value="reboot"/>
>                     <nvpair
> id="cib-bootstrap-options-stop-orphan-resources"
> name="stop-orphan-resources" value="true"/>
>                     <nvpair
> id="cib-bootstrap-options-stop-orphan-actions"
> name="stop-orphan-actions" value="true"/>
>                     <nvpair
> id="cib-bootstrap-options-remove-after-stop" name="remove-after-stop"
> value="false"/>
>                     <nvpair
> id="cib-bootstrap-options-short-resource-names"
> name="short-resource-names" value="true"/>
>                     <nvpair
> id="cib-bootstrap-options-transition-idle-timeout"
> name="transition-idle-timeout" value="5min"/>
>                     <nvpair
> id="cib-bootstrap-options-default-action-timeout"
> name="default-action-timeout" value="5s"/>
>                     <nvpair
> id="cib-bootstrap-options-is-managed-default"
> name="is-managed-default" value="true"/>
>                 </attributes>
>             </cluster_property_set>
>         </crm_config>
>         <nodes/>
>         <resources>
>             <group id="group_1">
>                 <primitive class="ocf" id="IPaddr_192_168_1_230"
> provider="heartbeat" type="IPaddr">
>                     <operations>
>                         <op id="IPaddr_192_168_1_230_mon"
> interval="5s" name="monitor" timeout="5s"/>
>                     </operations>
>                     <instance_attributes id="IPaddr_192_168_1_230_inst_attr">
>                         <attributes>
>                             <nvpair id="IPaddr_192_168_1_230_attr_0"
> name="ip" value="192.168.1.230"/>
>                         </attributes>
>                     </instance_attributes>
>                 </primitive>
>                 <primitive class="lsb" id="my_server_2"
> provider="heartbeat" type="my_server">
>                     <operations>
>                         <op id="my_server_2_mon" interval="120s"
> name="monitor" timeout="60s"/>
>                     </operations>
>                 </primitive>
>             </group>
>         </resources>
>         <constraints>
>             <rsc_location id="rsc_location_group_1" rsc="group_1">
>                 <rule id="prefered_location_group_1" score="100">
>                     <expression attribute="#uname"
> id="prefered_location_group_1_expr" operation="eq" value="IanVM-4"/>
>                 </rule>
>             </rsc_location>
>         </constraints>
>     </configuration>
>     <status/>
> </cib>
> 
> 
> ===============
> Start heartbeat
> ===============
> [EMAIL PROTECTED] scripts]# /etc/init.d/heartbeat start
> Starting High-Availability services:
>                                                            [  OK  ]
> [EMAIL PROTECTED] scripts]# /etc/init.d/heartbeat start
> Starting High-Availability services:
>                                                            [  OK  ]
> 
> ===============
> Resource status
> ===============
> 
> [EMAIL PROTECTED] scripts]# /usr/sbin/crm_mon
> Refresh in 9s...
> 
> ============
> Last updated: Thu Aug 23 11:49:54 2007
> Current DC: ianvm-5 (7b2440d8-b23a-4192-b538-a8168a5414c9)
> 2 Nodes configured.
> 0 Resources configured.
> ============
> 
> Node: ianvm-4 (7299b236-da3e-4748-b284-be59b5d09ee7): online
> Node: ianvm-5 (7b2440d8-b23a-4192-b538-a8168a5414c9): online
> 

The configuration looks OK, given that you applied it properly.
The crm_mon output looks like one for an empty config.

Otherwise, there's nothing one can say without seeing the logs.
Apart from the fact that your resource agent is no good.

At the bottom of every post to this list and on the linux-ha.org
pages you can find a procedure on how to report problems. Did you
read it? Also, my impression from your posts is that you should
try and make more effort in learning a bit about Heartbeat.

> 
> -- 
> Ian Jiang
> _______________________________________________
> Linux-HA mailing list
> [email protected]
> http://lists.linux-ha.org/mailman/listinfo/linux-ha
> See also: http://linux-ha.org/ReportingProblems
_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to