On Tue, Feb 28, 2023 at 10:36 AM Enrique Llorente Pastora < [email protected]> wrote:
> > > On Tue, Feb 28, 2023 at 10:00 AM Ales Musil <[email protected]> wrote: > >> Hi, >> >> Thank you for working on this, >> I have a couple of style related comments down below. >> >> >> On Mon, Feb 27, 2023 at 1:46 PM Enrique Llorente <[email protected]> >> wrote: >> >>> Configure mac address >>> The mac address returned by ARP/NDP can be configured similar to LSP >>> addresses where the mac is the first entry on the list >>> >>> IPv6 >>> Support NDP IPv6 protocol >>> >>> Use CIDRs >>> Allow to specify subnets for ipv4 and ipv6, they will match whatever >>> address is received from ARP/NDP >>> >>> Signed-off-by: Enrique Llorente <[email protected]> >>> --- >>> northd/northd.c | 141 +++++++++++++++--- >>> northd/ovn-northd.8.xml | 12 +- >>> ovn-nb.xml | 18 ++- >>> tests/ovn.at | 183 +++++++++++++++++++---- >>> tests/system-common-macros.at | 5 +- >>> tests/system-ovn.at | 271 ++++++++++++++++++++++++++++++++++ >>> 6 files changed, 571 insertions(+), 59 deletions(-) >>> >>> diff --git a/northd/northd.c b/northd/northd.c >>> index 770a5b50e..3fc48e71d 100644 >>> --- a/northd/northd.c >>> +++ b/northd/northd.c >>> @@ -8644,29 +8644,43 @@ build_lswitch_arp_nd_responder_known_ips(struct >>> ovn_port *op, >>> } >>> } >>> } >>> - >>> - if (op->peer) { >>> - const char *arp_proxy = >>> smap_get(&op->nbsp->options,"arp_proxy"); >>> - >>> + const char *arp_proxy = >>> smap_get(&op->nbsp->options,"arp_proxy"); >>> + if (arp_proxy) { >>> struct lport_addresses proxy_arp_addrs; >>> - int i = 0; >>> + int i, ofs = 0; >>> + /* Either takes "MAC IP1 IP2" or "IP1 IP2" */ >>> + if (!extract_addresses(arp_proxy, &proxy_arp_addrs, &ofs) && >>> + !extract_ip_addresses(arp_proxy, &proxy_arp_addrs)) { >>> + static struct vlog_rate_limit rl = >>> VLOG_RATE_LIMIT_INIT(1, 5); >>> + VLOG_WARN_RL(&rl, "Invalid arp_proxy option: '%s' at >>> lsp '%s'", >>> + arp_proxy, op->nbsp->name); >>> + return; >>> + } >>> + >>> + /* Select the mac address to answer the proxy ARP/NDP */ >>> + char *ea_s = NULL; >>> + if (!eth_addr_is_zero(proxy_arp_addrs.ea)) { >>> + ea_s = proxy_arp_addrs.ea_s; >>> + } else if (op->peer) { >>> + ea_s = op->peer->lrp_networks.ea_s; >>> + } else { >>> + return; >>> + } >>> >>> - /* Add responses for ARP proxies. */ >>> - if (arp_proxy && extract_ip_addresses(arp_proxy, >>> - &proxy_arp_addrs) && >>> - proxy_arp_addrs.n_ipv4_addrs) { >>> + /* Add IPv4 responses for ARP proxies. */ >>> + if (proxy_arp_addrs.n_ipv4_addrs) { >>> /* Match rule on all proxy ARP IPs. */ >>> ds_clear(match); >>> ds_put_cstr(match, "arp.op == 1 && arp.tpa == {"); >>> >>> for (i = 0; i < proxy_arp_addrs.n_ipv4_addrs; i++) { >>> - ds_put_format(match, "%s,", >>> - proxy_arp_addrs.ipv4_addrs[i].addr_s); >>> + ds_put_format(match, "%s/%u,", >>> + proxy_arp_addrs.ipv4_addrs[i].addr_s, >>> + proxy_arp_addrs.ipv4_addrs[i].plen); >>> } >>> >>> ds_chomp(match, ','); >>> ds_put_cstr(match, "}"); >>> - destroy_lport_addresses(&proxy_arp_addrs); >>> >>> ds_clear(actions); >>> ds_put_format(actions, >>> @@ -8679,12 +8693,69 @@ build_lswitch_arp_nd_responder_known_ips(struct >>> ovn_port *op, >>> "outport = inport; " >>> "flags.loopback = 1; " >>> "output;", >>> - op->peer->lrp_networks.ea_s, >>> - op->peer->lrp_networks.ea_s); >>> + ea_s, >>> + ea_s); >>> >>> ovn_lflow_add_with_hint(lflows, op->od, >>> S_SWITCH_IN_ARP_ND_RSP, >>> 50, ds_cstr(match), ds_cstr(actions), >>> &op->nbsp->header_); >>> } >>> + >>> + /* Add IPv6 NDP responses. >>> + * For ND solicitations, we need to listen for both the >>> + * unicast IPv6 address and its all-nodes multicast address, >>> + * but always respond with the unicast IPv6 address. */ >>> + if (proxy_arp_addrs.n_ipv6_addrs) { >>> + struct ds ip6_dst_match = DS_EMPTY_INITIALIZER; >>> + struct ds nd_target_match = DS_EMPTY_INITIALIZER; >>> + for (size_t j = 0; j < proxy_arp_addrs.n_ipv6_addrs; >>> j++) { >>> + ds_put_format(&ip6_dst_match, "%s/%u, %s/%u", >>> + proxy_arp_addrs.ipv6_addrs[j].addr_s, >>> + proxy_arp_addrs.ipv6_addrs[j].plen, >>> + proxy_arp_addrs.ipv6_addrs[j].sn_addr_s, >>> + proxy_arp_addrs.ipv6_addrs[j].plen); >>> + ds_put_format(&nd_target_match, >>> + "%s/%u", >>> >> >> nit: To be aligned with the previous one this shouldn't be on a new line. >> >> >>> + proxy_arp_addrs.ipv6_addrs[j].addr_s, >>> + proxy_arp_addrs.ipv6_addrs[j].plen); >>> + if (j+1 < proxy_arp_addrs.n_ipv6_addrs) { >>> + ds_put_cstr(&ip6_dst_match, ", "); >>> + ds_put_cstr(&nd_target_match, ", "); >>> + } >>> >> >> I would probably prefer to do something like "ds_truncate(&ip6_dst_match, >> ip6_dst_match.length - 2);" >> instead of "if" in every loop iteration. >> >> >>> + } >>> + ds_clear(match); >>> + ds_put_format(match, >>> + "nd_ns " >>> + "&& ip6.dst == { %s } " >>> + "&& nd.target == { %s }", >>> + ds_cstr(&ip6_dst_match), >>> + ds_cstr(&nd_target_match)); >>> + ds_clear(actions); >>> + ds_put_format(actions, >>> + "%s { " >>> + "eth.src = %s; " >>> + "ip6.src = nd.target; " >>> + "nd.target = nd.target; " >>> + "nd.tll = %s; " >>> + "outport = inport; " >>> + "flags.loopback = 1; " >>> + "output; " >>> + "};", >>> + lsp_is_router(op->nbsp) ? "nd_na_router" : >>> "nd_na", >>> + ea_s, >>> + ea_s); >>> + ovn_lflow_add_with_hint__(lflows, op->od, >>> + S_SWITCH_IN_ARP_ND_RSP, 50, >>> + ds_cstr(match), >>> + ds_cstr(actions), >>> + NULL, >>> + copp_meter_get(COPP_ND_NA, >>> + op->od->nbs->copp, >>> + meter_groups), >>> + &op->nbsp->header_); >>> + ds_destroy(&ip6_dst_match); >>> + ds_destroy(&nd_target_match); >>> + } >>> + destroy_lport_addresses(&proxy_arp_addrs); >>> } >>> } >>> } >>> @@ -9063,7 +9134,6 @@ build_lswitch_ip_unicast_lookup(struct ovn_port >>> *op, >>> struct ds *match) >>> { >>> if (op->nbsp && (!lsp_is_external(op->nbsp))) { >>> - >>> >> >> nit: Unrelated change. >> >> >>> /* For ports connected to logical routers add flows to bypass >>> the >>> * broadcast flooding of ARP/ND requests in table 19. We direct >>> the >>> * requests only to the router port that owns the IP address. >>> @@ -9122,9 +9192,24 @@ build_lswitch_ip_unicast_lookup(struct ovn_port >>> *op, >>> ETH_ADDR_SCAN_FMT, >>> ETH_ADDR_SCAN_ARGS(mac))) { >>> continue; >>> } >>> + char * mac_s = xasprintf(ETH_ADDR_FMT, >>> ETH_ADDR_ARGS(mac)); >>> >> >> There is no need for the mac_s, in both cases you can directly use the >> ETH_ADDR_FMT. >> >> >>> + struct lport_addresses proxy_arp_addrs; >>> + const char *arp_proxy = >>> + smap_get(&op->nbsp->options,"arp_proxy"); >>> + int ofs = 0; >>> ds_clear(match); >>> - ds_put_format(match, "eth.dst == "ETH_ADDR_FMT, >>> - ETH_ADDR_ARGS(mac)); >>> + ds_put_cstr(match, "eth.dst == "); >>> + if (arp_proxy && >>> + extract_addresses(arp_proxy, &proxy_arp_addrs, >>> &ofs)) { >>> + ds_put_format(match, >>> + "{ %s, %s }", >>> + proxy_arp_addrs.ea_s, >>> + mac_s); >>> + destroy_lport_addresses(&proxy_arp_addrs); >>> + } else { >>> + ds_put_format(match, "%s", mac_s); >>> + } >>> + free(mac_s); >>> if (op->peer->od->n_l3dgw_ports >>> && op->od->n_localnet_ports) { >>> bool add_chassis_resident_check = false; >>> @@ -9157,7 +9242,6 @@ build_lswitch_ip_unicast_lookup(struct ovn_port >>> *op, >>> json_key); >>> } >>> } >>> - >>> ds_clear(actions); >>> ds_put_format(actions, action, op->json_key); >>> ovn_lflow_add_with_hint(lflows, op->od, >>> @@ -11640,8 +11724,25 @@ build_adm_ctrl_flows_for_lrouter_port( >>> op->lrp_networks.ea_s); >>> >>> ds_clear(match); >>> - ds_put_format(match, "eth.dst == %s && inport == %s", >>> - op->lrp_networks.ea_s, op->json_key); >>> + ds_put_cstr(match, "eth.dst == "); >>> + struct lport_addresses proxy_arp_addrs = {.ea = eth_addr_zero}; >>> >> + if (op->peer && op->peer->nbsp) { >>> + const char *arp_proxy = >>> + smap_get(&op->peer->nbsp->options,"arp_proxy"); >>> + int ofs = 0; >>> + if (arp_proxy && >>> + extract_addresses(arp_proxy, &proxy_arp_addrs, &ofs)) { >>> + ds_put_format(match, >>> + "{ %s, %s }", >>> + proxy_arp_addrs.ea_s, >>> + op->lrp_networks.ea_s); >>> + destroy_lport_addresses(&proxy_arp_addrs); >>> + } >>> + } >>> + if (eth_addr_is_zero(proxy_arp_addrs.ea)) { >>> + ds_put_format(match, "%s", op->lrp_networks.ea_s); >>> + } >>> + ds_put_format(match, " && inport == %s", op->json_key); >>> if (consider_l3dgw_port_is_centralized(op)) { >>> ds_put_format(match, " && is_chassis_resident(%s)", >>> op->cr_port->json_key); >>> diff --git a/northd/ovn-northd.8.xml b/northd/ovn-northd.8.xml >>> index 2eab2c4ae..069b2f1b0 100644 >>> --- a/northd/ovn-northd.8.xml >>> +++ b/northd/ovn-northd.8.xml >>> @@ -1400,7 +1400,16 @@ nd_na_router { >>> column of <code>NB_Global</code> table of the >>> <code>Northbound</code> >>> database), for logical ports of type <code>virtual</code> and >>> for >>> logical ports with 'unknown' address set. >>> - </p> >>> + </p> >>> + >>> + <p> >>> + The above NDP responder flows are added for the list of IPv6 >>> addresses >>> + if defined in <code>options:arp_proxy</code> column of >>> + <code>Logical_Switch_Port</code> table for logical switch ports >>> of >>> + type <code>router</code>. >>> + </p> >>> + >>> + >>> </li> >>> >>> <li> >>> @@ -1414,7 +1423,6 @@ nd_na_router { >>> IP address assignment, so sending a reply will prevent the VM >>> from >>> accepting the IP address that it owns. >>> </p> >>> - >>> >> >> nit: Unrelated change. >> >> >>> <p> >>> In place of <code>next;</code>, it would be reasonable to use >>> <code>drop;</code> for the flows' actions. If everything is >>> working >>> diff --git a/ovn-nb.xml b/ovn-nb.xml >>> index 8d56d0c6e..525c6567c 100644 >>> --- a/ovn-nb.xml >>> +++ b/ovn-nb.xml >>> @@ -992,12 +992,18 @@ >>> </column> >>> >>> <column name="options" key="arp_proxy"> >>> - Optional. A list of IPv4 addresses that this >>> - logical switch <code>router</code> port will reply to ARP >>> requests. >>> - Example: <code>169.254.239.254 169.254.239.2</code>. The >>> - <ref column="options" key="router-port"/>'s logical router >>> should >>> - have a route to forward packets sent to configured proxy ARP >>> IPs to >>> - an appropriate destination. >>> + Optional. A list of MAC and addresses/cidrs or just >>> addresses/cdirs >>> >> >> nit: s/cdirs/cidrs/ >> >> >>> + that this logical switch <code>router</code> port will reply >>> to >>> + ARP/NDP requests. Examples: >>> + <code>169.254.239.254 169.254.239.2</code>, >>> + <code>0a:58:a9:fe:01:01 169.254.239.254 169.254.239.2 >>> + 169.254.238.0/24</code> >>> + , >>> + <code>fd7b:6b4d:7b25:d22f::1 fd7b:6b4d:7b25:d22f::2</code> >>> + , <code>0a:58:a9:fe:01:01 fd7b:6b4d:7b25:d22f::0/64</code> >>> + . The<ref column="options" key="router-port"/>'s logical >>> router >>> >> >> nit: Please leave the "." and "," at the same line. >> >> >>> + should have a route to forward packets sent to configured >>> proxy ARP >>> + MAC/IPs to an appropriate destination. >>> </column> >>> </group> >>> >>> diff --git a/tests/ovn.at b/tests/ovn.at >>> index dc5c5df3f..8c8a3c263 100644 >>> --- a/tests/ovn.at >>> +++ b/tests/ovn.at >>> @@ -31781,20 +31781,46 @@ AT_KEYWORDS([proxy-arp]) >>> ovn_start >>> >>> # Logical network: >>> -# One LR - lr1 has switch ls1 (192.16.1.0/24) connected to it, >>> +# One LR - lr1 has switch ls1 (192.16.1.0/24) and ls2 (192.168.2.0/24) >>> connected to it, >>> # and and one HV with IP 192.16.1.6. >>> >>> ovn-nbctl lr-add lr1 >>> ovn-nbctl ls-add ls1 >>> +ovn-nbctl ls-add ls2 >>> + >>> + >>> + >>> + >>> >>> # Connect ls1 to lr1 >>> -ovn-nbctl lrp-add lr1 ls1 00:00:00:01:02:f1 192.16.1.1/24 >>> +ls1_ro_mac=00:00:00:01:02:f1 >>> +ls1_ro_ip4=192.168.1.1 >>> +ls1_ro_ip6=fd11::1 >>> +ovn-nbctl lrp-add lr1 ls1 $ls1_ro_mac $ls1_ro_ip4/24 $ls1_ro_ip6/64 >>> ovn-nbctl lsp-add ls1 rp-ls1 -- set Logical_Switch_Port rp-ls1 \ >>> - type=router options:router-port=ls1 addresses=\"00:00:00:01:02:f1\" >>> + type=router options:router-port=ls1 addresses=\"router\" >>> >>> # Create logical port ls1-lp1 in ls1 >>> +ls1_p1_mac=00:00:00:01:02:03 >>> +ls1_p1_ip4=192.16.1.6 >>> +ls1_p1_ip6=fd11::2 >>> ovn-nbctl lsp-add ls1 ls1-lp1 \ >>> --- lsp-set-addresses ls1-lp1 "00:00:00:01:02:03 192.16.1.6" >>> +-- lsp-set-addresses ls1-lp1 "$ls1_p1_mac $ls1_p1_ip4 $ls1_p1_ip6" >>> + >>> +# Connect ls2 to lr1 >>> +ls2_ro_mac=00:00:00:01:02:f2 >>> +ls2_ro_ip4=192.168.2.1 >>> +ls2_ro_ip6=fd12::1 >>> +ovn-nbctl lrp-add lr1 ls2 $ls2_ro_mac $ls2_ro_ip4/24 $ls2_ro_ip6/64 >>> +ovn-nbctl lsp-add ls2 rp-ls2 -- set Logical_Switch_Port rp-ls2 \ >>> + type=router options:router-port=ls1 addresses=\"router\" >>> + >>> +# Create logical port ls2-lp1 in ls2 >>> +ls2_p1_mac=00:00:00:01:02:04 >>> +ls2_p1_ip4=192.16.2.6 >>> +ls2_p1_ip6=fd12::2 >>> +ovn-nbctl lsp-add ls2 ls2-lp1 \ >>> +-- lsp-set-addresses ls2-lp1 "$ls2_p1_mac $ls2_p1_ip4 $ls2_p1_ip6" >>> >>> # Create one hypervisor and create OVS ports corresponding to logical >>> ports. >>> net_add n1 >>> @@ -31812,52 +31838,149 @@ ovs-vsctl -- add-port br-int vif1 -- \ >>> options:rxq_pcap=hv1/vif1-rx.pcap \ >>> ofport-request=1 >>> >>> -# And proxy ARP flows for 69.254.239.254 and 169.254.239.2 >>> -# and check that SB flows have been added. >>> +ovs-vsctl -- add-port br-int vif2 -- \ >>> + set interface vif2 external-ids:iface-id=ls2-lp1 \ >>> + options:tx_pcap=hv1/vif2-tx.pcap \ >>> + options:rxq_pcap=hv1/vif2-rx.pcap \ >>> + ofport-request=1 >>> + >>> + >>> +# Two proxy arp/ndp configurations with and wihout mac both dual stack >>> +# and with CIDR to check sbflows >>> +arp_proxy_ls1_ip4=169.254.238.2 >>> +arp_proxy_ls1_ip6=fd7b:6b4d:7b25:d22d::2 >>> +arp_proxy_ls1=(169.254.238.0/24 \ >>> + 169.254.239.2 \ >>> + fd7b:6b4d:7b25:d22d::0/64 \ >>> + fd7b:6b4d:7b25:d22f::1) >>> +arp_proxy_ls1_option="\"${arp_proxy_ls1[[*]]}\"" >>> + >>> +arp_proxy_ls2_ip4=169.254.236.2 >>> +arp_proxy_ls2_ip6=fd7b:6b4d:7b25:d22b::2 >>> +arp_proxy_ls2=(00:00:00:02:02:f1 \ >>> + 169.254.236.0/24 \ >>> + 169.254.237.2 \ >>> + fd7b:6b4d:7b25:d22b::0/64 \ >>> + fd7b:6b4d:7b25:d22c::1) >>> +arp_proxy_ls2_option="\"${arp_proxy_ls2[[*]]}\"" >>> + >>> ovn-nbctl --wait=hv add Logical_Switch_Port rp-ls1 \ >>> -options arp_proxy='"169.254.239.254 169.254.239.2"' >>> +options arp_proxy="$arp_proxy_ls1_option" >>> +ovn-nbctl --wait=hv add Logical_Switch_Port rp-ls2 \ >>> +options arp_proxy="$arp_proxy_ls2_option" >>> ovn-sbctl dump-flows > sbflows >>> AT_CAPTURE_FILE([sbflows]) >>> >>> -AT_CHECK([ovn-sbctl dump-flows | grep ls_in_arp_rsp | grep >>> "169.254.239.2" | sed 's/table=../table=??/'], [0], [dnl >>> - table=??(ls_in_arp_rsp ), priority=50 , match=(arp.op == 1 && >>> arp.tpa == {169.254.239.254,169.254.239.2}), dnl >>> -action=(eth.dst = eth.src; eth.src = 00:00:00:01:02:f1; arp.op = 2; /* >>> ARP reply */ arp.tha = arp.sha; arp.sha = 00:00:00:01:02:f1; arp.tpa <-> >>> arp.spa; outport = inport; flags.loopback = 1; output;) >>> +# IPv4 LS1 Responder lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls1[[1]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(ls_in_arp_rsp ), priority=50 , match=(arp.op == 1 && >>> dnl >>> +arp.tpa == {169.254.238.0/24,169.254.239.2/32} >>> <http://169.254.238.0/24,169.254.239.2/32%7D>), dnl >>> +action=(eth.dst = eth.src; eth.src = 00:00:00:01:02:f1; arp.op = 2; dnl >>> +/* ARP reply */ arp.tha = arp.sha; arp.sha = 00:00:00:01:02:f1; dnl >>> +arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) >>> +]) >>> + >>> +# IPv6 LS1 Responder lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls1[[3]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(ls_in_arp_rsp ), priority=50 , dnl >>> +match=(nd_ns && ip6.dst == { fd7b:6b4d:7b25:d22d::/64, >>> ff02::1:ff00:0/64, dnl >>> +fd7b:6b4d:7b25:d22f::1/128, ff02::1:ff00:1/128 } && dnl >>> +nd.target == { fd7b:6b4d:7b25:d22d::/64, fd7b:6b4d:7b25:d22f::1/128 }), >>> dnl >>> +action=(nd_na_router { eth.src = 00:00:00:01:02:f1; ip6.src = >>> nd.target; dnl >>> +nd.target = nd.target; nd.tll = 00:00:00:01:02:f1; outport = inport; dnl >>> +flags.loopback = 1; output; };) >>> +]) >>> + >>> +# IPv4 LS2 Responder lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls2[[2]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(ls_in_arp_rsp ), priority=50 , dnl >>> +match=(arp.op == 1 && arp.tpa == {169.254.236.0/24,169.254.237.2/32} >>> <http://169.254.236.0/24,169.254.237.2/32%7D>), dnl >>> +action=(eth.dst = eth.src; eth.src = 00:00:00:02:02:f1; arp.op = 2; dnl >>> +/* ARP reply */ arp.tha = arp.sha; arp.sha = 00:00:00:02:02:f1; dnl >>> +arp.tpa <-> arp.spa; outport = inport; flags.loopback = 1; output;) >>> +]) >>> + >>> +# IPv4 LS2 Responder lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls2[[4]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(ls_in_arp_rsp ), priority=50 , dnl >>> +match=(nd_ns && ip6.dst == { fd7b:6b4d:7b25:d22b::/64, >>> ff02::1:ff00:0/64, dnl >>> +fd7b:6b4d:7b25:d22c::1/128, ff02::1:ff00:1/128 } && dnl >>> +nd.target == { fd7b:6b4d:7b25:d22b::/64, fd7b:6b4d:7b25:d22c::1/128 }), >>> dnl >>> +action=(nd_na_router { eth.src = 00:00:00:02:02:f1; ip6.src = >>> nd.target; dnl >>> +nd.target = nd.target; nd.tll = 00:00:00:02:02:f1; outport = inport; dnl >>> +flags.loopback = 1; output; };) >>> +]) >>> + >>> +# L2 lookup lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_l2_lkup | >>> + grep "${arp_proxy_ls2[[0]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(ls_in_l2_lkup ), priority=50 , dnl >>> +match=(eth.dst == { 00:00:00:02:02:f1, 00:00:00:01:02:f1 }), dnl >>> +action=(outport = "rp-ls2"; output;) >>> +]) >>> + >>> +# LR admission lflows >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep lr_in_admission | >>> + grep "${arp_proxy_ls2[[0]]}" | >>> + sed 's/table=../table=??/'], [0], [dnl >>> + table=??(lr_in_admission ), priority=50 , dnl >>> +match=(eth.dst == { 00:00:00:02:02:f1, 00:00:00:01:02:f1 } && dnl >>> +inport == "ls1"), action=(xreg0[[0..47]] = 00:00:00:01:02:f1; next;) >>> ]) >>> >>> -# Remove and check that the flows have been removed >>> -ovn-nbctl --wait=hv remove Logical_Switch_Port rp-ls1 options >>> arp_proxy='"169.254.239.254 169.254.239.2"' >>> >>> -AT_CHECK([ovn-sbctl dump-flows | grep ls_in_arp_rsp | grep >>> "169.254.239.2"], [1], [dnl >>> +# Remove and check that the flows have been removed >>> +ovn-nbctl --wait=hv remove Logical_Switch_Port rp-ls1 \ >>> +options arp_proxy="$arp_proxy_ls1_option" >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls1[[1]]}"], [1], [dnl >>> +]) >>> +ovn-nbctl --wait=hv remove Logical_Switch_Port rp-ls2 \ >>> +options arp_proxy="$arp_proxy_ls2_option" >>> +AT_CHECK([ovn-sbctl dump-flows | >>> + grep ls_in_arp_rsp | >>> + grep "${arp_proxy_ls2[[2]]}"], [1], [dnl >>> ]) >>> >>> # Add the flows back send arp request and check we see an ARP response >>> ovn-nbctl --wait=hv add Logical_Switch_Port rp-ls1 \ >>> -options arp_proxy='"169.254.239.254 169.254.239.2"' >>> - >>> -ls1_p1_mac=00:00:00:01:02:03 >>> -ls1_p1_ip=192.16.1.6 >>> - >>> -ls1_ro_mac=00:00:00:01:02:f1 >>> -ls1_ro_ip=192.168.1.1 >>> +options arp_proxy="$arp_proxy_ls1_option" >>> >>> -proxy_ip1=169.254.239.254 >>> -proxy_ip2=169.254.239.2 >>> +ovn-nbctl --wait=hv add Logical_Switch_Port rp-ls2 \ >>> +options arp_proxy="$arp_proxy_ls2_option" >>> >>> bcast_mac=ff:ff:ff:ff:ff:ff >>> >>> -# Send ARP request for 169.254.239.254 >>> +# Send ARP request for ls1 arp_proxy ipv4 /32 address >>> packet="inport==\"ls1-lp1\" && eth.src==$ls1_p1_mac && >>> eth.dst==$bcast_mac && >>> - arp.op==1 && arp.sha==$ls1_p1_mac && arp.spa==$ls1_p1_ip && >>> - arp.tha==$bcast_mac && arp.tpa==$proxy_ip1" >>> + arp.op==1 && arp.sha==$ls1_p1_mac && arp.spa==$ls1_p1_ip4 && >>> + arp.tha==$bcast_mac && arp.tpa==${arp_proxy_ls1[[1]]}" >>> >>> as hv1 ovn-appctl -t ovn-controller inject-pkt "$packet" >>> >>> -as hv1 ovs-ofctl dump-flows br-int| grep 169.254.239.254 | grep >>> priority=50 > debug1 >>> +as hv1 ovs-ofctl dump-flows br-int| \ >>> + grep ${arp_proxy_ls1[[1]]} | grep priority=50 > debug1 >>> AT_CAPTURE_FILE([debug1]) >>> >>> # Check if packet hit the ARP reply ovs flow >>> AT_CHECK([ovs-ofctl dump-flows br-int | \ >>> - grep "169.254.239.254" | \ >>> + grep "${arp_proxy_ls1[[1]]}" | \ >>> grep "priority=50" | \ >>> grep "arp_op=1" | \ >>> grep "n_packets=1" | wc -l], [0], [dnl >>> @@ -31866,8 +31989,8 @@ AT_CHECK([ovs-ofctl dump-flows br-int | \ >>> >>> # Check that the HV gets an ARP reply >>> expected="eth.src==$ls1_ro_mac && eth.dst==$ls1_p1_mac && >>> - arp.op==2 && arp.sha==$ls1_ro_mac && arp.spa==$proxy_ip1 && >>> - arp.tha==$ls1_p1_mac && arp.tpa==$ls1_p1_ip" >>> + arp.op==2 && arp.sha==$ls1_ro_mac && >>> arp.spa==${arp_proxy_ls1[[1]]} && >>> + arp.tha==$ls1_p1_mac && arp.tpa==$ls1_p1_ip4" >>> echo $expected | ovstest test-ovn expr-to-packets > expected >>> >>> OVN_CHECK_PACKETS([hv1/vif1-tx.pcap], [expected]) >>> @@ -33354,7 +33477,7 @@ check ovs-vsctl add-port br-int ls0-hv -- set >>> Interface ls0-hv external-ids:ifac >>> check ovn-nbctl lr-add lr0 >>> >>> check ovn-nbctl ls-add ls0 >>> -check ovn-nbctl lsp-add ls0 ls0-lr0 >>> +check ovn-nbctl lsp-add ls0 ls0-lr0 >>> >> >> nit: Unrelated change. >> >> >>> check ovn-nbctl lsp-set-type ls0-lr0 router >>> check ovn-nbctl lsp-set-addresses ls0-lr0 router >>> check ovn-nbctl lrp-add lr0 lr0-ls0 00:00:00:00:00:01 10.0.0.1 >>> diff --git a/tests/system-common-macros.at b/tests/ >>> system-common-macros.at >>> index d65f359a6..2584234a5 100644 >>> --- a/tests/system-common-macros.at >>> +++ b/tests/system-common-macros.at >>> @@ -80,7 +80,7 @@ m4_define([NS_ADD_INT], >>> ) >>> >>> # ADD_VETH([port], [namespace], [ovs-br], [ip_addr] [mac_addr], >>> [gateway], >>> -# [ip_addr_flags]) >>> +# [ip_addr_flags][route]) >>> >> >> nit: Missing space. >> Also I'm not sure how useful this option is besides this test. >> IMO it would be better to add it additionally in the test. >> > > We cannot set a gw destination with an address not routable or we get > "Error: Nexthop has invalid gateway.", > that's why it's added to the macro to set the point to point route before > configuring the default gw. > Looks like adding always the point to point route is not hurting the other tests, I will do that > > >> >> >> # >>> # Add a pair of veth ports. 'port' will be added to name space >>> 'namespace', >>> # and "ovs-'port'" will be added to ovs bridge 'ovs-br'. >>> @@ -105,6 +105,9 @@ m4_define([ADD_VETH], >>> if test -n "$5"; then >>> NS_CHECK_EXEC([$2], [ip link set dev $1 address $5]) >>> fi >>> + if test -n "$8"; then >>> + NS_CHECK_EXEC([$2], [ip route add $8 dev $1]) >>> + fi >>> if test -n "$6"; then >>> NS_CHECK_EXEC([$2], [ip route add default via $6]) >>> fi >>> diff --git a/tests/system-ovn.at b/tests/system-ovn.at >>> index 563858e70..036a4c306 100644 >>> --- a/tests/system-ovn.at >>> +++ b/tests/system-ovn.at >>> @@ -10660,3 +10660,274 @@ OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query >>> port patch-.*/d >>> /connection dropped.*/d"]) >>> AT_CLEANUP >>> ]) >>> + >>> +OVN_FOR_EACH_NORTHD([ >>> +AT_SETUP([1 LR to test ARP proxy convinations]) >>> +AT_KEYWORDS([proxy-arp]) >>> +AT_SKIP_IF([test $HAVE_TCPDUMP = no]) >>> + >>> +ovn_start >>> +OVS_TRAFFIC_VSWITCHD_START() >>> +ADD_BR([br-int]) >>> + >>> +# Set external-ids in br-int needed for ovn-controller >>> +ovs-vsctl \ >>> + -- set Open_vSwitch . external-ids:system-id=hv1 \ >>> + -- set Open_vSwitch . >>> external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ >>> + -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ >>> + -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ >>> + -- set bridge br-int fail-mode=secure >>> other-config:disable-in-band=true >>> + >>> +# Start ovn-controller >>> +start_daemon ovn-controller >>> + >>> +# Logical network: >>> +# One LR - R1 and two LSs - foo and bar, R1 has switches foo ( >>> 192.168.1.0/24) and >>> +# bar (192.168.2.0/24) connected to it >>> +# >>> +# foo -- R1 -- bar >>> + >>> +ovn-nbctl create Logical_Router name=R1 >>> + >>> +ovn-nbctl ls-add foo >>> +ovn-nbctl ls-add bar >>> + >>> +# Connect foo to R1 >>> +ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24 >>> +ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \ >>> + type=router options:arp_proxy="0a:58:a9:fe:01:01 169.254.239.254 >>> 169.254.239.2 169.254.238.0/24 " options:router-port=foo >>> addresses='"router"' >>> + >>> +# Connect bar to R1 >>> +ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24 >>> +ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \ >>> + type=router options:arp_proxy="169.254.239.253" >>> options:router-port=bar addresses='"router"' >>> + >>> +# Logical port 'foo1' in switch 'foo'. >>> +ADD_NAMESPACES(foo1) >>> +ADD_VETH(foo1, foo1, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \ >>> + "169.254.239.2", , "169.254.239.2") >>> +ovn-nbctl lsp-add foo foo1 \ >>> +-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2" >>> + >>> +# Logical port 'foo2' in switch 'foo'. >>> +ADD_NAMESPACES(foo2) >>> +ADD_VETH(foo2, foo2, br-int, "192.168.1.3/24", "f0:00:00:01:02:04", \ >>> + "169.254.239.254", , "169.254.239.254") >>> +ovn-nbctl lsp-add foo foo2 \ >>> +-- lsp-set-addresses foo2 "f0:00:00:01:02:04 192.168.1.3" >>> + >>> +# Logical port 'foo3' in switch 'foo'. >>> +ADD_NAMESPACES(foo3) >>> +ADD_VETH(foo3, foo3, br-int, "192.168.1.4/24", "f0:00:00:01:02:05", \ >>> + "169.254.238.1", , "169.254.238.1") >>> +ovn-nbctl lsp-add foo foo3 \ >>> +-- lsp-set-addresses foo3 "f0:00:00:01:02:05 192.168.1.4" >>> + >>> +# Logical port 'bar1' in switch 'bar'. >>> +ADD_NAMESPACES(bar1) >>> +ADD_VETH(bar1, bar1, br-int, "192.168.2.2/24", "f0:00:00:01:02:06", \ >>> +"169.254.239.253", ,"169.254.239.253") >>> +ovn-nbctl lsp-add bar bar1 \ >>> +-- lsp-set-addresses bar1 "f0:00:00:01:02:06 192.168.2.2" >>> + >>> +# wait for ovn-controller to catch up. >>> +ovn-nbctl --wait=hv sync >>> + >>> +NETNS_DAEMONIZE([foo1], [tcpdump -l -nn -e -i foo1 'ether dst >>> 0a:58:a9:fe:01:01 and icmp' > foo1-icmp.pcap 2>foo1-tcpdump.stderr], >>> [foo1-icmp-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo1-tcpdump.stderr]) >>> + >>> +# 'foo1' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo1], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo1-icmp.pcap| wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> +NETNS_DAEMONIZE([foo2], [tcpdump -l -nn -e -i foo2 'ether dst >>> 0a:58:a9:fe:01:01 and icmp' > foo2-icmp.pcap 2>foo2-tcpdump.stderr], >>> [foo2-icmp-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo2-tcpdump.stderr]) >>> + >>> +# 'foo2' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo2], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo2-icmp.pcap| wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> +NETNS_DAEMONIZE([foo3], [tcpdump -l -nn -e -i foo3 'ether dst >>> 0a:58:a9:fe:01:01 and icmp' > foo3-icmp.pcap 2>foo3-tcpdump.stderr], >>> [foo3-icmp-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo3-tcpdump.stderr]) >>> + >>> +# 'foo3' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo3], [ping -q -c 3 -i 0.3 -w 2 192.168.2.2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo3-icmp.pcap| wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> + >>> +OVS_APP_EXIT_AND_WAIT([ovn-controller]) >>> + >>> +as ovn-sb >>> +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) >>> + >>> +as ovn-nb >>> +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) >>> + >>> +as northd >>> +OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) >>> + >>> +as >>> +OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d >>> +/connection dropped.*/d"]) >>> +AT_CLEANUP >>> +]) >>> + >>> +OVN_FOR_EACH_NORTHD([ >>> +AT_SETUP([1 LR to test ARP proxy convinations - IPv6]) >>> +AT_KEYWORDS([proxy-arp]) >>> +AT_SKIP_IF([test $HAVE_TCPDUMP = no]) >>> + >>> +ovn_start >>> +OVS_TRAFFIC_VSWITCHD_START() >>> +ADD_BR([br-int]) >>> + >>> +# Set external-ids in br-int needed for ovn-controller >>> +ovs-vsctl \ >>> + -- set Open_vSwitch . external-ids:system-id=hv1 \ >>> + -- set Open_vSwitch . >>> external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \ >>> + -- set Open_vSwitch . external-ids:ovn-encap-type=geneve \ >>> + -- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \ >>> + -- set bridge br-int fail-mode=secure >>> other-config:disable-in-band=true >>> + >>> +# Start ovn-controller >>> +start_daemon ovn-controller >>> + >>> +# Logical network: >>> +# One LR - R1 and two LSs - foo and bar, R1 has switches foo >>> (fd11::/64) and >>> +# bar (fd12::/64) connected to it >>> +# >>> +# foo -- R1 -- bar >>> + >>> +ovn-nbctl create Logical_Router name=R1 >>> + >>> +ovn-nbctl ls-add foo >>> +ovn-nbctl ls-add bar >>> + >>> +# Connect foo to R1 >>> +ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 fd11::1/64 >>> +ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \ >>> + type=router options:arp_proxy="0a:58:a9:fe:01:01 >>> fd7b:6b4d:7b25:d22f::1 fd7b:6b4d:7b25:d22f::2 fd7b:6b4d:7b25:d22d::0/64" >>> options:router-port=foo addresses='"router"' >>> + >>> +# Connect bar to R1 >>> +ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 fd12::1/64 >>> +ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \ >>> + type=router options:arp_proxy="fd7b:6b4d:7b25:d22f::3" >>> options:router-port=bar addresses='"router"' >>> + >>> +# Logical port 'foo1' in switch 'foo'. >>> +ADD_NAMESPACES(foo1) >>> +ADD_VETH(foo1, foo1, br-int, "fd11::2/64", "f0:00:00:01:02:03", \ >>> + "fd7b:6b4d:7b25:d22f::1", , "fd7b:6b4d:7b25:d22f::1") >>> +OVS_WAIT_UNTIL([test "$(ip netns exec foo1 ip a | grep fd11::2 | grep >>> tentative)" = ""]) >>> +ovn-nbctl lsp-add foo foo1 \ >>> +-- lsp-set-addresses foo1 "f0:00:00:01:02:03 fd11::2" >>> + >>> +# Logical port 'foo2' in switch 'foo'. >>> +ADD_NAMESPACES(foo2) >>> +ADD_VETH(foo2, foo2, br-int, "fd11::3/64", "f0:00:00:01:02:04", \ >>> + "fd7b:6b4d:7b25:d22f::2", , "fd7b:6b4d:7b25:d22f::2") >>> +OVS_WAIT_UNTIL([test "$(ip netns exec foo2 ip a | grep fd11::3 | grep >>> tentative)" = ""]) >>> +ovn-nbctl lsp-add foo foo2 \ >>> +-- lsp-set-addresses foo2 "f0:00:00:01:02:04 fd11::3" >>> + >>> +# Logical port 'foo3' in switch 'foo'. >>> +ADD_NAMESPACES(foo3) >>> +ADD_VETH(foo3, foo3, br-int, "fd11::4/64", "f0:00:00:01:02:05", \ >>> + "fd7b:6b4d:7b25:d22d::1", , "fd7b:6b4d:7b25:d22d::1") >>> +OVS_WAIT_UNTIL([test "$(ip netns exec foo3 ip a | grep fd11::4 | grep >>> tentative)" = ""]) >>> +ovn-nbctl lsp-add foo foo3 \ >>> +-- lsp-set-addresses foo3 "f0:00:00:01:02:05 fd11::4" >>> + >>> +# Logical port 'bar1' in switch 'bar'. >>> +ADD_NAMESPACES(bar1) >>> +ADD_VETH(bar1, bar1, br-int, "fd12::2/64", "f0:00:00:01:02:06", \ >>> +"fd7b:6b4d:7b25:d22f::3", ,"fd7b:6b4d:7b25:d22f::3") >>> +OVS_WAIT_UNTIL([test "$(ip netns exec foo1 ip a | grep fd12::2 | grep >>> tentative)" = ""]) >>> +ovn-nbctl lsp-add bar bar1 \ >>> +-- lsp-set-addresses bar1 "f0:00:00:01:02:06 fd12::2" >>> + >>> +# wait for ovn-controller to catch up. >>> +ovn-nbctl --wait=hv sync >>> + >>> +# Force ipv6 nd neighbour solicitation >>> +NS_EXEC([foo1], [ping6 -c 1 fd12::2]) >>> + >>> +NETNS_DAEMONIZE([foo1], [tcpdump -vvvv -ttt -l -nn -e -i foo1 'ether >>> dst 0a:58:a9:fe:01:01 and icmp6' > foo1-icmp6.pcap 2> foo1-tcpdump.stderr], >>> [foo1-icmp6-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo1-tcpdump.stderr]) >>> + >>> +# 'foo1' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo1], [ping6 -v -q -c 3 -i 0.3 -w 2 fd12::2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo1-icmp6.pcap| grep "echo request" | wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> +# Force ipv6 nd neighbour solicitation >>> +NS_EXEC([foo2], [ping6 -c 1 fd12::2]) >>> + >>> +NETNS_DAEMONIZE([foo2], [tcpdump -vvvv -ttt -l -nn -e -i foo2 'ether >>> dst 0a:58:a9:fe:01:01 and icmp6' > foo2-icmp6.pcap 2> foo2-tcpdump.stderr], >>> [foo2-icmp6-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo2-tcpdump.stderr]) >>> + >>> +# 'foo2' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo2], [ping6 -v -q -c 3 -i 0.3 -w 2 fd12::2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo2-icmp6.pcap| grep "echo request" | wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> +# Force ipv6 nd neighbour solicitation >>> +NS_EXEC([foo3], [ping6 -c 1 fd12::2]) >>> + >>> +NETNS_DAEMONIZE([foo3], [tcpdump -vvvv -ttt -l -nn -e -i foo3 'ether >>> dst 0a:58:a9:fe:01:01 and icmp6' > foo3-icmp6.pcap 2> foo3-tcpdump.stderr], >>> [foo3-icmp6-tcpdump.pid]) >>> +OVS_WAIT_UNTIL([grep "listening" foo3-tcpdump.stderr]) >>> + >>> +# 'foo3' should be able to ping 'bar1' >>> +NS_CHECK_EXEC([foo3], [ping6 -v -q -c 3 -i 0.3 -w 2 fd12::2 | >>> FORMAT_PING], \ >>> +[0], [dnl >>> +3 packets transmitted, 3 received, 0% packet loss, time 0ms >>> +]) >>> +OVS_WAIT_UNTIL([ >>> + total_pkts=$(cat foo3-icmp6.pcap| grep "echo request" | wc -l) >>> + test "${total_pkts}" = "3" >>> +]) >>> + >>> +OVS_APP_EXIT_AND_WAIT([ovn-controller]) >>> + >>> +as ovn-sb >>> +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) >>> + >>> +as ovn-nb >>> +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) >>> + >>> +as northd >>> +OVS_APP_EXIT_AND_WAIT([NORTHD_TYPE]) >>> + >>> +as >>> +OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d >>> +/connection dropped.*/d"]) >>> +AT_CLEANUP >>> +]) >>> + >>> -- >>> 2.32.0 >>> >>> _______________________________________________ >>> dev mailing list >>> [email protected] >>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev >>> >>> >> Thanks, >> Ales >> >> -- >> >> Ales Musil >> >> Senior Software Engineer - OVN Core >> >> Red Hat EMEA <https://www.redhat.com> >> >> [email protected] IM: amusil >> <https://red.ht/sig> >> > > > -- > *Quique Llorente* > > CNV networking Senior Software Engineer > > Red Hat EMEA <https://www.redhat.com/> > > [email protected] <[email protected]> > @RedHat <https://twitter.com/redhat> Red Hat > <https://www.linkedin.com/company/red-hat> Red Hat > <https://www.facebook.com/RedHatInc> > <https://www.redhat.com/> > -- *Quique Llorente* CNV networking Senior Software Engineer Red Hat EMEA <https://www.redhat.com/> [email protected] <[email protected]> @RedHat <https://twitter.com/redhat> Red Hat <https://www.linkedin.com/company/red-hat> Red Hat <https://www.facebook.com/RedHatInc> <https://www.redhat.com/> _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
