Hi all,

 I have installed two kvm guests (CentOS 6.3) to do some tests using
RHCS under a CentOS 6.3 kvm host. As a fence device I am trying to use
fence_virt, but it doesn't works for me.

 fence_virt.conf in kvm host is:

fence_virtd {
        listener = "multicast";
        backend = "libvirt";
}

listeners {
        multicast {
                key_file = "/etc/fence_virt.key";
                interface = "siemif";
                address = "225.0.0.12";
                family = "ipv4";
        }
}

backends {
        libvirt {
                uri = "qemu:///system";
        }
}

fence_virt.key is located under /etc directory:

-r-------- 1 root root 18 Jul 12 09:48 /etc/fence_virt.key

cluster.conf on both kvm guest nodes is:

<?xml version="1.0"?>
<cluster config_version="1" name="TestCluster">
        <cman expected_votes="1" two_node="1"/>
        <clusternodes>
                <clusternode name="cosnode01.domain.local" nodeid="1">
                        <fence>
                                <method name="kvm">
                                        <device action="reboot" 
port="cosnode01" name="kvm_cosnode01"/>
                                </method>
                        </fence>
                </clusternode>
                <clusternode name="cosnode02.domain.local" nodeid="2">
                        <fence>
                                <method name="kvm">
                                        <device action="reboot" 
port="cosnode02" name="kvm_cosnode02"/>
                                </method>
                        </fence>
                </clusternode>
        </clusternodes>
        <fencedevices>
                <fencedevice agent="fence_virt" ip_family="ipv4"
multicast_address="225.0.0.12" key_file="/etc/cluster/fence_virt.key"
name="kvm_cosnode01"/>
                <fencedevice agent="fence_virt" ip_family="ipv4"
multicast_address="225.0.0.12" key_file="/etc/cluster/fence_virt.key"
name="kvm_cosnode02"/>
        </fencedevices>
        <fence_daemon post_join_delay="30"/>
        <totem rrp_mode="none" secauth="off"/>
        <rm log_level="5">
                <failoverdomains>
                        <failoverdomain name="only_node01" nofailback="1" 
ordered="0" restricted="1">
                                <failoverdomainnode 
name="cosnode01.domain.local"/>
                        </failoverdomain>
                        <failoverdomain name="only_node02" nofailback="1" 
ordered="0" restricted="1">
                                <failoverdomainnode 
name="cosnode02.domain.local"/>
                        </failoverdomain>
                        <failoverdomain name="primary_clu01" nofailback="1" 
ordered="1"
restricted="1">
                                <failoverdomainnode 
name="cosnode01.domain.local" priority="1"/>
                                <failoverdomainnode 
name="cosnode02.domain.local" priority="2"/>
                        </failoverdomain>
                        <failoverdomain name="primary_clu02" nofailback="1" 
ordered="1"
restricted="1">
                                <failoverdomainnode 
name="cosnode01.domain.local" priority="2"/>
                                <failoverdomainnode 
name="cosnode02.domain.local" priority="1"/>
                        </failoverdomain>
                </failoverdomains>
        </rm>
</cluster>

of course, fence_virt.key is copied under /etc/cluster dir in both nodes.

In cosnode01 I see this error:

fenced[4074]: fence cosnode02.domain.local dev 0.0 agent fence_virt
result: error from agent
fenced[4074]: fence cosnode02.domain.loca failed

What am I doing wrong?? Do I need to modify libvirtd.conf to listen in
siemif interface??

Thanks.

--
Linux-cluster mailing list
Linux-cluster@redhat.com
https://www.redhat.com/mailman/listinfo/linux-cluster

Reply via email to