Hi Ranjeet,

Thank you for the pointer on the compute nodes. We aren't actually doing
any Open Stack / Virtual Machine stuff so the only reason I even
provisioned compute is to turn up TOR/TSN after the basics are working.
I'll go ahead and do a separate host for compute.

* Your testbed.py


Attached


*  keepalived.conf in /etc/keepalived


vrrp_script chk_haproxy_INTERNAL_10_10_10_10 {
        script "killall -0 haproxy" # verify if pid exists
        interval 1
        timeout 3
        rise 2
        fall 2
}

vrrp_script chk_ctrldatanet_INTERNAL_10_10_10_10 {
    script "/opt/contrail/bin/chk_ctrldata.sh"
    interval 1
    timeout 3
    rise 1
    fall 1
}

vrrp_instance INTERNAL_10_10_10_10 {
        interface eth0
        state MASTER
        preempt_delay 7
        garp_master_delay 5
        garp_master_repeat 3
        garp_master_refresh 1
        advert_int 1
        virtual_router_id 100
        vmac_xmit_base
        priority  100
        virtual_ipaddress {
                10.10.10.10/28 dev eth0
        }
        track_script  {
                chk_haproxy_INTERNAL_10_10_10_10
        }

        track_script  {
            chk_ctrldatanet_INTERNAL_10_10_10_10
        }
        track_interface {
            eth0
            eth0
        }
}



* “ip a s” in all three controllers


root@contrail-ctrl1:~# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group
default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group
default qlen 1000
    link/ether 00:0c:29:3c:78:40 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.10/28 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe3c:7840/64 scope link
       valid_lft forever preferred_lft forever
3: pkt1: <> mtu 65535 qdisc noop state DOWN group default
    link/void 02:8c:c5:60:50:a4 brd 00:00:00:00:00:00
4: pkt3: <> mtu 65535 qdisc noop state DOWN group default
    link/void 2a:0f:6c:45:3b:2d brd 00:00:00:00:00:00
5: pkt2: <> mtu 65535 qdisc noop state DOWN group default
    link/void 36:48:d4:53:62:e1 brd 00:00:00:00:00:00
6: vhost0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
state UNKNOWN group default qlen 1000
    link/ether 00:0c:29:3c:78:40 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.11/28 brd 10.10.10.15 scope global vhost0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe3c:7840/64 scope link
       valid_lft forever preferred_lft forever
7: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state
DOWN group default
    link/ether fe:8b:4f:4d:ea:07 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
8: pkt0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state
UNKNOWN group default qlen 500
    link/ether a2:fe:56:9d:31:d3 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::a0fe:56ff:fe9d:31d3/64 scope link
       valid_lft forever preferred_lft forever

root@contrail-ctrl2:~# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group
default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group
default qlen 1000
    link/ether 00:0c:29:a8:f8:75 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.10/28 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fea8:f875/64 scope link
       valid_lft forever preferred_lft forever
3: pkt1: <> mtu 65535 qdisc noop state DOWN group default
    link/void ee:ee:d5:be:b2:da brd 00:00:00:00:00:00
4: pkt3: <> mtu 65535 qdisc noop state DOWN group default
    link/void b2:3c:0c:35:3a:ec brd 00:00:00:00:00:00
5: pkt2: <> mtu 65535 qdisc noop state DOWN group default
    link/void 12:1f:47:41:fe:76 brd 00:00:00:00:00:00
6: vhost0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
state UNKNOWN group default qlen 1000
    link/ether 00:0c:29:a8:f8:75 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.12/28 brd 10.10.10.15 scope global vhost0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fea8:f875/64 scope link
       valid_lft forever preferred_lft forever
7: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state
DOWN group default
    link/ether 92:2b:3d:68:2f:06 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
8: pkt0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state
UNKNOWN group default qlen 500
    link/ether be:23:b2:35:77:d4 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::bc23:b2ff:fe35:77d4/64 scope link
       valid_lft forever preferred_lft forever

root@contrail-ctrl3:~# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group
default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group
default qlen 1000
    link/ether 00:0c:29:31:bc:2a brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.10/28 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe31:bc2a/64 scope link
       valid_lft forever preferred_lft forever
3: pkt1: <> mtu 65535 qdisc noop state DOWN group default
    link/void 4e:6e:2b:09:36:b8 brd 00:00:00:00:00:00
4: pkt3: <> mtu 65535 qdisc noop state DOWN group default
    link/void ea:7b:f6:70:e0:85 brd 00:00:00:00:00:00
5: pkt2: <> mtu 65535 qdisc noop state DOWN group default
    link/void ea:72:86:5e:92:6e brd 00:00:00:00:00:00
6: vhost0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
state UNKNOWN group default qlen 1000
    link/ether 00:0c:29:31:bc:2a brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.13/28 brd 10.10.10.15 scope global vhost0
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe31:bc2a/64 scope link
       valid_lft forever preferred_lft forever
7: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state
DOWN group default
    link/ether 52:80:7d:6d:1b:76 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
8: pkt0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state
UNKNOWN group default qlen 500
    link/ether 0a:9c:03:a7:0a:e5 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::89c:3ff:fea7:ae5/64 scope link
       valid_lft forever preferred_lft forever


* /etc/network/interfaces in all three controllers


root@contrail-ctrl1:~# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet manual
    pre-up ifconfig eth0 up
    post-down ifconfig eth0 down
    pre-up ethtool --offload eth0 rx off
    pre-up ethtool --offload eth0 tx off


auto vhost0
iface vhost0 inet static
    pre-up /opt/contrail/bin/if-vhost0
    netmask 255.255.255.240
    network_name application
    address 10.10.10.11
    gateway 10.10.10.1
    dns-search zone1.ord6.hannibal.scnet.net
    dns-nameservers 172.16.18.1


root@contrail-ctrl2:~# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet manual
    pre-up ifconfig eth0 up
    post-down ifconfig eth0 down
    pre-up ethtool --offload eth0 rx off
    pre-up ethtool --offload eth0 tx off


auto vhost0
iface vhost0 inet static
    pre-up /opt/contrail/bin/if-vhost0
    netmask 255.255.255.240
    network_name application
    address 10.10.10.12
    gateway 10.10.10.1
    dns-search zone1.ord6.hannibal.scnet.net
    dns-nameservers 172.16.18.1


root@contrail-ctrl3:~# cat /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eth0
iface eth0 inet manual
    pre-up ifconfig eth0 up
    post-down ifconfig eth0 down
    pre-up ethtool --offload eth0 rx off
    pre-up ethtool --offload eth0 tx off


auto vhost0
iface vhost0 inet static
    pre-up /opt/contrail/bin/if-vhost0
    netmask 255.255.255.240
    network_name application
    address 10.10.10.13
    gateway 10.10.10.1
    dns-search zone1.ord6.hannibal.scnet.net
    dns-nameservers 172.16.18.1

On Thu, Aug 20, 2015 at 12:50 PM, Ranjeet R <[email protected]> wrote:

> Resending to make sure its sent to the list. Had some issues with list
> subscription.
>
>
>
> Ranjeet
>
>
>
> *From:* Ranjeet R
> *Sent:* Thursday, August 20, 2015 10:39 AM
> *To:* Sunil Bakhru <[email protected]>; Dan Houtz <[email protected]>;
> [email protected]
> *Cc:* Suresh Kumar Vinapamula Venkata <[email protected]>; Sanju
> Abraham <[email protected]>
> *Subject:* RE: [Users] Problems with VRRP/Keepalived on HA install
>
>
>
> Dan
>
>
>
> VRRP Advertisement are sourced from the local IP Address in keepalived as
> well.
>
>
>
> tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
>
> listening on p1p1, link-type EN10MB (Ethernet), capture size 65535 bytes
>
> 10:29:38.438090 IP 10.84.24.32 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 112, prio 99, authtype none, intvl 1s, length 20
>
>
>
> VIP address does not show up ifconfig, hence I am not sure how you have
> configured your network interfaces. The following will help us debug this
> further -
>
>
>
> * Your testbed.py
>
> *  keepalived.conf in /etc/keepalived
>
> * “ip a s” in all three controllers
>
> * /etc/network/interfaces in all three controllers
>
>
>
> I also see that you are using the controller as a compute node as well
> which is not supported in HA mode because of an existing bug in vrouter in
> handling multicast packets. I would recommend to use a separate compute
> node.
>
>
>
> Ranjeet
>
>
>
>
>
> *From:* Sunil Bakhru
> *Sent:* Thursday, August 20, 2015 10:25 AM
> *To:* Dan Houtz <[email protected]>; [email protected]
> *Cc:* Ranjeet R <[email protected]>; Suresh Kumar Vinapamula Venkata <
> [email protected]>; Sanju Abraham <[email protected]>
> *Subject:* Re: [Users] Problems with VRRP/Keepalived on HA install
>
>
>
> Dan,
>
>
>
>    Something is not adding up in your setup. All three controllers should
> not be the master.
>
> I am cc’ing the HA team members to follow up with you regarding this
> issue.
>
>
>
> Thanks,
>
> Sunil
>
>
>
> *From: *Dan Houtz <[email protected]>
> *Date: *Thursday, August 20, 2015 at 9:05 AM
> *To: *"[email protected]" <[email protected]>
> *Subject: *[Users] Problems with VRRP/Keepalived on HA install
>
>
>
> Hi everyone,
>
> We are currently working on our first HA build of Contrail as up until not
> we've only tested on single node installs.
>
> At this point the install seems to work well but we are seeing issue with
> keepalived and the VIP address. For some odd reason all 3 Contrail nodes
> are master for the VIP despite tcpdump showing each box receiving vrrp
> multicast packets from each other and showing each with a different
> priority.
>
> Should the mcast packets be sources from the VIP? I see our Juniper boxes
> source their VRRP packets from interfaces local address.
>
> I've done plenty of VRRP on hardware routers but don't have any previous
> experience with keepalived so If anyone has any pointers it would be
> greatly appreciated. If there is any other data that would help to diagnose
> I am happy to provide it.
>
> Thanks!
>
> Dan
>
>
>
> root@contrail-ctrl1:~# tcpdump -n vrrp
> tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
> listening on eth0, link-type EN10MB (Ethernet), capture size 65535 bytes
> 10:58:41.487505 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:58:41.564731 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 99, authtype none, intvl 1s, length 20
> 10:58:42.156237 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 100, authtype none, intvl 1s, length 20
> 10:58:42.178271 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
> 10:58:42.198405 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 98, authtype none, intvl 1s, length 20
> 10:58:42.567434 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:58:42.567685 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 99, authtype none, intvl 1s, length 20
> 10:58:43.157471 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 100, authtype none, intvl 1s, length 20
> 10:58:43.178394 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
> 10:58:43.178440 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 98, authtype none, intvl 1s, length 20
>
>
> root@contrail-ctrl2:~# tcpdump -n vrrp
> tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
> listening on eth0, link-type EN10MB (Ethernet), capture size 65535 bytes
> 10:57:46.197718 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
> 10:57:46.197844 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 98, authtype none, intvl 1s, length 20
> 10:57:46.411012 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:57:46.749765 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 100, authtype none, intvl 1s, length 20
> 10:57:46.749907 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 100, authtype none, intvl 1s, length 20
> 10:57:47.209753 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
>
>
> root@contrail-ctrl3:~# tcpdump -n vrrp
> tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
> listening on eth0, link-type EN10MB (Ethernet), capture size 65535 bytes
> 10:59:19.946640 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:19.949357 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:20.248377 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
> 10:59:20.631787 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 100, authtype none, intvl 1s, length 20
> 10:59:20.631853 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 100, authtype none, intvl 1s, length 20
> 10:59:21.067412 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:21.067490 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:21.332021 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
> 10:59:21.642771 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 100, authtype none, intvl 1s, length 20
> 10:59:21.645999 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 100, authtype none, intvl 1s, length 20
> 10:59:22.053416 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:22.059508 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, (ttl
> 254), vrid 100, prio 99, authtype none, intvl 1s, length 20
> 10:59:22.404645 IP 10.10.10.10 > 224.0.0.18: VRRPv2, Advertisement, vrid
> 100, prio 98, authtype none, intvl 1s, length 20
>
> root@contrail-ctrl1:~# ifconfig eth0
> eth0      Link encap:Ethernet  HWaddr 00:0c:29:3c:78:40
>           inet addr:10.10.10.10  Bcast:0.0.0.0  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fe3c:7840/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:2957429 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:2970106 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:693687361 (693.6 MB)  TX bytes:857618328 (857.6 MB)
>
> root@contrail-ctrl1:~# ifconfig vhost0
> vhost0    Link encap:Ethernet  HWaddr 00:0c:29:3c:78:40
>           inet addr:10.10.10.11  Bcast:10.10.10.15  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fe3c:7840/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:2278044 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:2221017 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:644167184 (644.1 MB)  TX bytes:5245411872 (5.2 GB)
>
>
> root@contrail-ctrl2:~# ifconfig eth0
> eth0      Link encap:Ethernet  HWaddr 00:0c:29:a8:f8:75
>           inet addr:10.10.10.10  Bcast:0.0.0.0  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fea8:f875/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:4370215 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:4785945 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:929798623 (929.7 MB)  TX bytes:907980495 (907.9 MB)
>
> root@contrail-ctrl2:~# ifconfig vhost0
> vhost0    Link encap:Ethernet  HWaddr 00:0c:29:a8:f8:75
>           inet addr:10.10.10.12  Bcast:10.10.10.15  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fea8:f875/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:3664174 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:4159902 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:878825685 (878.8 MB)  TX bytes:2547421114 (2.5 GB)
>
> root@contrail-ctrl3:~# ifconfig eth0
> eth0      Link encap:Ethernet  HWaddr 00:0c:29:31:bc:2a
>           inet addr:10.10.10.10  Bcast:0.0.0.0  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fe31:bc2a/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:539492 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:411385 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:266613501 (266.6 MB)  TX bytes:73900674 (73.9 MB)
>
>
> root@contrail-ctrl3:~# ifconfig vhost0
> vhost0    Link encap:Ethernet  HWaddr 00:0c:29:31:bc:2a
>           inet addr:10.10.10.13  Bcast:10.10.10.15  Mask:255.255.255.240
>           inet6 addr: fe80::20c:29ff:fe31:bc2a/64 Scope:Link
>           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
>           RX packets:406152 errors:0 dropped:0 overruns:0 frame:0
>           TX packets:395289 errors:0 dropped:0 overruns:0 carrier:0
>           collisions:0 txqueuelen:1000
>           RX bytes:258079952 (258.0 MB)  TX bytes:73573553 (73.5 MB)
>
>
from fabric.api import env

#Management ip addresses of hosts in the cluster
host1 = '[email protected]'
host2 = '[email protected]'
host3 = '[email protected]'
#host4 = '[email protected]'
#host5 = '[email protected]'


#External routers if any
#for eg. 
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = [('gw1z0', '192.168.1.70'),
                ('gw2z0', '192.168.1.71')]

#Autonomous system number
router_asn = 65412

#Host from which the fab commands are triggered to install and provision
host_build = '[email protected]'


#Role definition of the hosts.
env.roledefs = {
    'all': [host1, host2, host3],
    'cfgm': [host1, host2, host3],
    'openstack': [host1, host2, host3],
    'control': [host1, host2, host3],
    'compute': [host1, host2, host3],
    'collector': [host1, host2, host3],
    'webui': [host1, host2, host3],
    'database': [host1, host2, host3],
    'build': [host_build],
    #'storage-master': [host1],
    #'storage-compute': [host4, host5, host6, host7, host8, host9, host10],
    # 'vgw': [host4, host5], # Optional, Only to enable VGW. Only compute can support vgw
    #'tsn': [host4, host5], # Optional, Only to enable TSN. Only compute can support TSN
    #'toragent': [host4, host5], #Optional, Only to enable Tor Agent. Only compute can
    # support Tor Agent
    #   'backup':[backup_node],  # only if the backup_node is defined
}

env.hostnames = {
    #'all': ['contrail-ctrl1.zone1.ord6', 'contrail-ctrl2.zone1.ord6', 'contrail-ctrl3.zone1.ord6','contrail-tsn1.zone1.ord6', 'contrail-tsn2.zone1.ord6']
    'all': ['contrail-ctrl1.zone1.ord6', 'contrail-ctrl2.zone1.ord6', 'contrail-ctrl3.zone1.ord6']
}

#Openstack admin password
env.openstack_admin_password = 'password'

# Passwords of each host
# for passwordless login's no need to set env.passwords,
# instead populate env.key_filename in testbed.py with public key.
env.passwords = {
    host1: 'password',
    host2: 'password',
    host3: 'password',
#    host4: 'password',
#    host5: 'password',
#    host6: 'secret',
#    host7: 'secret',
#    host8: 'secret',
#    host9: 'secret',
#    host10: 'secret',
    #  backup_node: 'secret',
#    host_build: 'secret',
}

# SSH Public key file path for passwordless logins
# if env.passwords is not specified.
#env.key_filename = '/root/.ssh/id_rsa.pub'

#For reimage purpose
env.ostypes = {
    host1: 'ubuntu',
    host2: 'ubuntu',
    host3: 'ubuntu',
#    host4: 'ubuntu',
#    host5: 'ubuntu',
}
#env.orchestrator = 'openstack' #other values are 'vcenter', 'none' default:openstack

#ntp server the servers should point to
env.ntp_server = 'time.domain.net'

# OPTIONAL COMPUTE HYPERVISOR CHOICE:
#======================================
# Compute Hypervisor
#env.hypervisor = {
#    host5: 'docker',
#    host6: 'libvirt',
#    host10: 'docker',
#}
#  Specify the hypervisor to be provisioned in the compute node.(Default=libvirt)


# INFORMATION FOR DB BACKUP/RESTORE ..
#=======================================================
#Optional,Backup Host configuration if it is not available then it will put in localhost
#backup_node = '[email protected]'

# Optional, Local/Remote location of backup_data path
# if it is not passed then it will use default path
#backup_db_path= ['/home/','/root/']
#cassandra backup can be defined either "full" or "custom"
#full -> take complete snapshot of cassandra DB
#custom -> take snapshot except defined in skip_keyspace
#cassandra_backup='custom'  [ MUST OPTION]
#skip_keyspace=["ContrailAnalytics"]  IF cassandra_backup is selected as custom
#service token need to define to do  restore of backup data
#service_token = '53468cf7552bbdc3b94f'


#OPTIONAL ANALYTICS CONFIGURATION
#================================
# database_dir is the directory where cassandra data is stored
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#database_dir = '<separate-partition>/cassandra'
#
# analytics_data_dir is the directory where cassandra data for analytics
# is stored. This is used to seperate cassandra's main data storage [internal
# use and config data] with analytics data. That way critical cassandra's 
# system data and config data are not overrun by analytis data
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/data
#
#analytics_data_dir = '<separate-partition>/analytics_data'
#
# ssd_data_dir is the directory where cassandra can store fast retrievable
# temporary files (commit_logs). Giving cassandra an ssd disk for this
# purpose improves cassandra performance
#
# If it is not passed, we will use cassandra's default
# /var/lib/cassandra/commit_logs
#
#ssd_data_dir = '<seperate-partition>/commit_logs_data'

#following variables allow analytics data to have different TTL in cassandra database
#analytics_config_audit_ttl controls TTL for config audit logs
#analytics_statistics_ttl controls TTL for stats
#analytics_flow_ttl controls TTL for flow data
#database_ttl controls TTL for rest of the data
#
#database_ttl = 48
#analytics_config_audit_ttl = 48
#analytics_statistics_ttl = 48
#analytics_flow_ttl = 48

#following parameter allows to specify minimum amount of disk space in the analytics
#database partition, if configured amount of space is not present, it will fail provisioning
#minimum_diskGB = 10

#OPTIONAL BONDING CONFIGURATION
#==============================
#Inferface Bonding
#bond= {
#    host1 : { 'name': 'bond0', 'member': ['p1p1','p1p2'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#    host2 : { 'name': 'bond0', 'member': ['p1p1','p1p2'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#    host3 : { 'name': 'bond0', 'member': ['p1p1','p1p2'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#    host4 : { 'name': 'bond0', 'member': ['p2p1','p2p2'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#    host5 : { 'name': 'bond0', 'member': ['p2p1','p2p2'], 'mode': '802.3ad', 'xmit_hash_policy': 'layer3+4' },
#}

#OPTIONAL SEPARATION OF MANAGEMENT AND CONTROL + DATA and OPTIONAL VLAN INFORMATION
#==================================================================================
#control_data = {
#    host1 : { 'ip': '10.10.10.4/28', 'gw' : '10.10.10.1', 'device':'bond0' },
#    host2 : { 'ip': '10.10.10.5/28', 'gw' : '10.10.10.1', 'device':'bond0' },
#    host3 : { 'ip': '10.10.10.6/28', 'gw' : '10.10.10.1', 'device':'bond0' },
#    host4 : { 'ip': '10.10.10.7/28', 'gw' : '10.10.10.1', 'device':'bond0' },
#    host5 : { 'ip': '10.10.10.8/28', 'gw' : '10.10.10.1', 'device':'bond0' },
#}

#OPTIONAL STATIC ROUTE CONFIGURATION
#===================================
#static_route  = {
#    host1 : [{ 'ip': '172.18.0.0', 'netmask' : '255.255.0.0', 'gw':'172.18.11.1', 'intf': 'em1' },
#             { 'ip': '10.30.55.0', 'netmask' : '255.255.255.0', 'gw':'172.18.11.1', 'intf': 'em1' }],
#    host2 : [{ 'ip': '172.18.0.0', 'netmask' : '255.255.0.0', 'gw':'172.18.11.1', 'intf': 'em1' },
#             { 'ip': '10.30.55.0', 'netmask' : '255.255.255.0', 'gw':'172.18.11.1', 'intf': 'em1' }],
#    host3 : [{ 'ip': '172.18.0.0', 'netmask' : '255.255.0.0', 'gw':'172.18.11.1', 'intf': 'em1' },
#             { 'ip': '10.30.55.0', 'netmask' : '255.255.255.0', 'gw':'172.18.11.1', 'intf': 'em1' }],
#    host4 : [{ 'ip': '172.18.0.0', 'netmask' : '255.255.0.0', 'gw':'172.18.11.1', 'intf': 'em1' },
#             { 'ip': '10.30.55.0', 'netmask' : '255.255.255.0', 'gw':'172.18.11.1', 'intf': 'em1' }],
#    host5 : [{ 'ip': '172.18.0.0', 'netmask' : '255.255.0.0', 'gw':'172.18.11.1', 'intf': 'em1' },
#             { 'ip': '10.30.55.0', 'netmask' : '255.255.255.0', 'gw':'172.18.11.1', 'intf': 'em1' }],
#}

#storage compute disk config
#storage_node_config = {
#    host4 : { 'disks' : ['/dev/sdc', '/dev/sdd'], 'journal' : ['/dev/sde', '/dev/sdf'] },
#    host5 : { 'disks' : ['/dev/sdc:/dev/sde', '/dev/sdd:/dev/sde'], 'ssd-disks' : ['/dev/sdf', '/dev/sdg'] },
#    host6 : { 'disks' : ['/dev/sdc', '/dev/sdd'], 'local-disks' : ['/dev/sde'], 'local-ssd-disks' : ['/dev/sdf'] },
#    host7 : { 'nfs' : ['10.10.10.10:/nfs', '11.11.11.11:/nfs']},
#}
#
#Set Storage replica
#storage_replica_size = 3

#Base Openstack live migration configuration.
#live_migration = True
#Fix uid/gid for nova/libvirt-qemu so the ids are same across all nodes.
#nova_uid_fix = True

#Following are NFS based live migration configuration
#Enable this for External NFS server based live migration
#ext_nfs_livem = True
#ext_nfs_livem_mount = '11.1.0.1:/nfsvol'

#Enable this for Ceph based NFS VM server based live migration
#ceph_nfs_livem = True
#ceph_nfs_livem_subnet = '192.168.10.253/24'
#ceph_nfs_livem_image = '/ubuntu/livemnfs.qcow2'
#ceph_nfs_livem_host = host4

#To disable installing contrail interface rename package
#env.interface_rename = False


#Path where the CA certificate file is stored on the node where fab is run.
#Fab copies the file to node where TOR agent is run.
#This is optional and is required only when tor_ovs_protocol is pssl.
#The certificates on the TOR are based on this CA cert.
#env.ca_cert_file = '/root/file.pem'

#In environments where keystone is deployed outside of Contrail provisioning
#scripts , you can use the below options 
#
# Note : 
# "insecure" is applicable only when protocol is https
# The entries in env.keystone overrides the below options which used
# to be supported earlier :
#  service_token
#  keystone_ip
#  keystone_admin_user
#  keystone_admin_password
#  region_name
#
#env.keystone = {
#    'keystone_ip'     : 'x.y.z.a',
#    'auth_protocol'   : 'http',                  #Default is http
#    'auth_port'       : '35357',                 #Default is 35357
#    'admin_token'     : '33c57636fbc2c5552fd2',  #admin_token in keystone.conf
#    'admin_user'      : 'admin',                 #Default is admin
#    'admin_password'  : 'contrail123',           #Default is contrail123
#    'nova_password'   : 'contrail123',           #Default is the password set in admin_password
#    'neutron_password': 'contrail123',           #Default is the password set in admin_password
#    'service_tenant'  : 'service',               #Default is service
#    'admin_tenant'    : 'admin',                 #Default is admin
#    'region_name'     : 'RegionOne',             #Default is RegionOne
#    'insecure'        : 'True',                  #Default = False
#    'manage_neutron'  : 'no',                    #Default = 'yes' , Does configure neutron user/role in keystone required.
#}
#

#env.nova = {
#    'cpu_mode': 'host-passthrough', # Possible options: none, host-passthrough, host-model, and custom
#                                    # if cpu_mode is 'custom' specify cpu_model option too
#    'cpu_model': 'Nehalem',         # relevant only if cpu_mode is 'custom'
#}

# In Openstack or Contrail High Availability setups.
# internal_vip          : Virtual IP of the openstack HA Nodes in the data/control(internal) nerwork,
#                         all the Openstack services behind this VIP are accessed using this VIP.
# external_vip          : Virtual IP of the Openstack HA Nodes in the management(external) nerwork,
#                         Openstack dashboard and novncproxy  services behind this VIP are accessed using this VIP.
# contrail_internal_vip : Virtual IP of the Contrail HA Nodes in the data/control(internal) nerwork,
#                         all the Contrail services behind this VIP is accessed using this VIP.
# contrail_external_vip : Virtual IP of the Contrail HA Nodes in the management(external) nerwork,
#                         Contrail introspects are are accessed using this VIP.
# nfs_server            : NFS server to be used to store the glance images.
# nfs_glance_path       : NFS server image path, which will be mounted on the Openstack Nodes and
#                         the glance images will be placed/accesed in/from this location.
env.ha = {
    'internal_vip'   : '10.10.10.10',               #Internal Virtual IP of the openstack HA Nodes.
#    'external_vip'   : '172.18.11.21',               #External Virtual IP of the openstack HA Nodes.
#    'contrail_internal_vip'   : '1.1.1.10',       #Internal Virtual IP of the contrail HA Nodes.
#    'contrail_external_vip'   : '2.2.2.20',       #External Virtual IP of the contrail HA Nodes.
#    'nfs_server'      : '3.3.3.3',                #IP address of the NFS Server which will be mounted to /var/lib/glance/images of openstack Node, Defaults to env.roledefs['compute'][0]
#    'nfs_glance_path' : '/var/tmp/images/',       #NFS Server path to save images, Defaults to /var/tmp/glance-images/
}

# In environments where openstack services are deployed independently 
# from contrail, you can use the below options 
# service_token : Common service token for for all services like nova,
#                 neutron, glance, cinder etc
# amqp_host     : IP of AMQP Server to be used in openstack
# manage_amqp   : Default = 'no', if set to 'yes' provision's amqp in openstack nodes and
#                 openstack services uses the amqp in openstack nodes instead of config nodes.
#                 amqp_host is neglected if manage_amqp is set
#
#env.openstack = {
#    'service_token' : '33c57636fbc2c5552fd2', #Common service token for for all openstack services
#    'amqp_host' : '10.204.217.19',            #IP of AMQP Server to be used in openstack
#    'manage_amqp' : 'yes',                    #Default no, Manage seperate AMQP for openstack services in openstack nodes.
#    'osapi_compute_workers' : 40,             #Default 40, For low memory system reduce the osapi compute workers thread.
#    'conductor_workers' : 40,                 #Default 40, For low memory system reduce the conductor workers thread.
#}

# Link-Local Metadata Service
# By default fab scripts will retrieve metadata secret from openstack node.
# To override, Specify Metadata proxy secret from Openstack node
#neutron_metadata_proxy_shared_secret = <secret>

#To enable multi-tenancy feature
multi_tenancy = True

#To enable haproxy feature
haproxy = True

#To Enable prallel execution of task in multiple nodes
#do_parallel = True

# To configure the encapsulation priority. Default: MPLSoGRE 
env.encap_priority =  "'VXLAN'"

# Optional proxy settings.
# env.http_proxy = os.environ.get('http_proxy')

#To enable LBaaS feature
# Default Value: False
#env.enable_lbaas = True

# Ceilometer enable/disable installation and provisioning
# Default Value: False
#enable_ceilometer = True

#OPTIONAL REMOTE SYSLOG CONFIGURATION
#===================================
#For R1.10 this needs to be specified to enable rsyslog.
#For Later releases this would be enabled as part of provisioning,
#with following default values.
#
#port = 19876
#protocol = tcp
#collector = dynamic i.e. rsyslog clients will connect to servers in a round
#                         robin fasion. For static collector all clients will
#                         connect to a single collector. static - is a test
#                         only option.
#status = enable
#
#env.rsyslog_params = {'port':19876, 'proto':'tcp', 'collector':'dynamic', 'status':'enable'}

#OPTIONAL Virtual gateway CONFIGURATION
#=======================================

#Section vgw is only relevant when you want to use virtual gateway feature. 
#You can use one of your compute node as  gateway .

#Definition for the Key used
#-------------------------------------
#vn: Virtual Network fully qualified name. This particular VN will be used by VGW.
#ipam-subnets: Subnets used by vn. It can be single or multiple
#gateway-routes: If any route is present then only those routes will be published
#by VGW or Default route (0.0.0.0) will be published


#env.vgw = {host4: {'vgw1':{'vn':'default-domain:admin:public:public', 'ipam-subnets': ['10.204.220.128/29', '10.204.220.136/29', 'gateway-routes': ['8.8.8.0/24', '1.1.1.0/24']}]},
#                   'vgw2':{'vn':'default-domain:admin:public1:public1', 'ipam-subnets': ['10.204.220.144/29']}},
#           host5: {'vgw2':{'vn':'default-domain:admin:public1:public1', 'ipam-subnets': ['10.204.220.144/29']}}
#          }

#OPTIONAL optional tor agent and tsn CONFIGURATION
#==================================================
#Section tor agent is only relevant when you want to use Tor Agent feature. 
#You can use one of your compute node as  Tor Agent . Same or diffrent compute
#node should be enable as tsn

#Definition for the Key used
#-------------------------------------
# tor_ip: IP of the tor switch
# tor_agent_id: Unique Id of the tor switch to identify. Typicaly a numeric value.
# tor_agent_name: Unique name for TOR Agent. This is an optional field. If this is
#                 not specified, name used will be <hostname>-<tor_agent_id>
# tor_type: Always ovs
# tor_ovs_port: Port number to be used by ovs. If any redundant TOR Agent is
#               specified for this tor-agent, it should have the same 'tor_ovs_port'
# tor_ovs_protocol: Connection protocol between TOR Agent and TOR (tcp / pssl)
# tor_tsn_ip: TSN node ip 
# tor_tsn_name: Name of the TSN node
# tor_name: Name of the tor switch. If any redundant TOR Agent is specified for
#           this tor-agent, it should have the same 'tor_name'
# tor_tunnel_ip: Data plane IP for the tor switch
# tor_vendor_name: Vendor type for TOR switch
# tor_product_name: Product name of TOR switch. This is an optional field.
# tor_agent_http_server_port: HTTP server port. Same will be used by tor agent for introspect
#
#env.tor_agent = {host10:[{
#                    'tor_ip':'10.204.217.39',
#                    'tor_agent_id':'1',
#                    'tor_agent_name':'nodexx-1',
#                    'tor_type':'ovs',
#                    'tor_ovs_port':'9999',
#                    'tor_ovs_protocol':'tcp',
#                    'tor_tsn_ip':'10.204.221.35',
#                    'tor_tsn_name':'nodec45',
#                    'tor_name':'bng-contrail-qfx51-2',
#                    'tor_tunnel_ip':'34.34.34.34',
#                    'tor_vendor_name':'Juniper',
#                    'tor_product_name':'QFX5100',
#                    'tor_agent_http_server_port': '9010',
#                       }]
#                }
#######################################
#vcenter provisioning
#server is the vcenter server ip
#port is the port on which vcenter is listening for connection
#username is the vcenter username credentials
#password is the vcenter password credentials
#auth is the autentication type used to talk to vcenter, http or https
#datacenter is the datacenter name we are operating on
#cluster is the list of clusters we are operating on
#dvswitch section contains distributed switch related para,s
#       dv_switch_name 
#dvportgroup section contains the distributed port group info
#       dv_portgroupname and the number of ports the group has
######################################
#env.vcenter = {
#        'server':'127.0.0.1',
#        'port': '443',
#        'username': '[email protected]',
#        'password': 'Contrail123!',
#        'auth': 'https',
#        'datacenter': 'kd_dc',
#        'cluster': ['kd_cluster_1','kd_cluster_2'],
#        'dv_switch': { 'dv_switch_name': 'kd_dvswitch',
#                     },
#        'dv_port_group': { 'dv_portgroup_name': 'kd_dvportgroup',
#                           'number_of_ports': '3',
#                     },
#}
#
####################################################################################
# The compute vm provisioning on ESXI host
# This section is used to copy a vmdk on to the ESXI box and bring it up 
# the contrailVM which comes up will be setup as a compute node with only
# vrouter running on it. Each host has an associated esxi to it. 
#
# esxi_host information:
#    ip: the esxi ip on which the contrailvm(host/compute) runs
#    username: username used to login to esxi
#    password: password for esxi
#    fabric_vswitch: the name of the underlay vswitch that runs on esxi 
#                    optional, defaults to 'vswitch0'
#    fabric_port_group: the name of the underlay port group for esxi
#                       optional, defaults to contrail-fab-pg'
#    uplinck_nic: the nic used for underlay
#                 optional, defaults to None
#    data_store: the datastore on esxi where the vmdk is copied to
#    cluster: name of the cluster to which this esxi is added
#    contrail_vm information:
#        uplink: The SRIOV or Passthrough PCI Id(04:10.1). If not provided
#                will default to vmxnet3 based fabric uplink
#        mac: the virtual mac address for the contrail vm
#        host: the contrail_vm ip in the form of 'user@contrailvm_ip'
#        vmdk: the absolute path of the contrail-vmdk used to spawn vm
#              optional, if vmdk_download_path is specified
#        vmdk_download_path: download path of the contrail-vmdk.vmdk used to spawn vm  
#                            optional, if vmdk is specified
######################################################################################
#esxi_hosts = {
#       'esxi': {
#             'ip': '1.1.1.1',
#             'username': 'root',
#             'password': 'c0ntrail123',
#             'datastore': "/vmfs/volumes/ds1",
#             'cluster': "kd_cluster_1",
#             'contrail_vm': {
#                   'mac': "00:50:56:05:ba:ba",
#                   'host': "[email protected]",
#                   'vmdk_download_path': "http://10.84.5.100/vmware/vmdk/ContrailVM-disk1.vmdk";,
#             }
#       }
# OPTIONAL DPDK CONFIGURATION
# ===========================
# If some compute nodes should use DPDK vRouter version it has to be put in
# env.dpdk dictionary. The format is:
# env.dpdk = {
#     host1: { 'huge_pages' : '50', 'coremask' : '0xf' },
#     host2: { 'huge_pages' : '50', 'coremask' : '0,3-7' },
# }
# huge_pages - Specify what percentage of host memory should be reserved
#              for access with huge pages
# coremask   - Specify CPU affinity mask to run vRouter with. Supported formats:
#              hexadecimal, comma-sepparated list of CPUs, dash-separated range
#              of CPUs.
# OPTIONAL vrouter limit parameter
# ==================================
#env.vrouter_module_params = {
#     host4:{'mpls_labels':'131072', 'nexthops':'131072', 'vrfs':'65536', 'macs':'262144'},
#     host5:{'mpls_labels':'131072', 'nexthops':'131072', 'vrfs':'65536', 'macs':'262144'}
#}
#
# OPTIONAL md5 key enabling
# There are 2 ways of enabling BGP md5 key on node apart from the webui.
# 1. Before provisioning the node, include an env dict in testbed.py as shown below specifying the desired key value #    on the node. The key should be of type "string" only.
# 2. If md5 is not included in testbed.py and the node is already provisioned, you can run the
#    contrail-controller/src/config/utils/provision_control.py script with a newly added argument for md5
# The below env dict is for first method specified, where you include a dict in testbed.py as shown below:
#  env.md5 = {
#     host1: 'juniper',
#     host2: 'juniper',
#     host3: 'juniper',
#  }
# 'juniper' is the md5 key that will be configured on the nodes.
_______________________________________________
Users mailing list
[email protected]
http://lists.opencontrail.org/mailman/listinfo/users_lists.opencontrail.org

Reply via email to