On Mon, Apr 22, 2024 at 4:51 PM Dumitru Ceara <[email protected]> wrote:
> On 4/22/24 22:43, Dumitru Ceara wrote: > > On 4/17/24 14:41, Lorenzo Bianconi wrote: > >> Similar to what is already implemented for routed e/w traffic, > >> introduce pmtud support for e/w traffic between two logical switch ports > >> connected to the same logical switch, but running on two different > >> hypervisors. > >> > >> Acked-by: Mark Michelson <[email protected]> > >> Reported-at: https://issues.redhat.com/browse/FDP-362 > >> Signed-off-by: Lorenzo Bianconi <[email protected]> > >> --- > >> Changes since v2: > >> - minor changes > > > > Hi Lorenzo, > > > > Thanks for the new version. However, this fails multinode CI: > > > > https://github.com/dceara/ovn/actions/runs/8786825542/job/24112259604 > > > > It hangs when cleaning up the second test, please see below. > > > >> Changes since v1: > >> - move logic in consider_port_binding > >> - add more self-test > >> - fix typos > >> --- > > > > [...] > > > >> > >> diff --git a/tests/multinode-macros.at b/tests/multinode-macros.at > >> index c04506a52..7a3b5cb50 100644 > >> --- a/tests/multinode-macros.at > >> +++ b/tests/multinode-macros.at > >> @@ -7,6 +7,10 @@ > >> m4_define([M_NS_EXEC], > >> [podman exec $1 ip netns exec $2 $3]) > >> > >> +# M_NS_DAEMONIZE([fake_node],[namespace], [command], [pidfile]) > >> +m4_define([M_NS_DAEMONIZE], > >> + [podman exec $1 ip netns exec $2 $3 & echo $! > $4]) > >> + > > > > This actually stores the PID (in the host pid namespace) of "podman > > exec" into $4. Later we wrongfully call "as <container> kill <pid>". > > > > One way to make this work seems to be to automatically cleanup (similar > > to NETNS_DAEMONIZE()): > > > > # M_NS_DAEMONIZE([fake_node],[namespace], [command], [pidfile]) > > m4_define([M_NS_DAEMONIZE], > > [podman exec $1 ip netns exec $2 $3 & echo $! > $4 > > echo "kill \`cat $4\`" >> cleanup > > ] > > ) > > > > The "cleanup" script gets executed at the end of the test, outside any > > container so it will kill all "podman exec" commands. If we go this > > way, we don't need the explicit "kill" calls below (podman exec is > > killed so the command it run, nc, is also killed). > > > > What do you think, Lorenzo? Mark, Numan, do you have any other ideas? > +1. Sounds good to me. Numan > > > > >> # M_NS_CHECK_EXEC([fake_node], [namespace], [command], other_params...) > >> # > >> # Wrapper for AT_CHECK that executes 'command' inside 'fake_node''s > namespace'. > >> diff --git a/tests/multinode.at b/tests/multinode.at > >> index 0187382be..d9085b64d 100644 > >> --- a/tests/multinode.at > >> +++ b/tests/multinode.at > >> @@ -154,6 +154,11 @@ check multinode_nbctl lr-nat-add lr0 snat > 172.20.0.100 20.0.0.0/24 > >> check multinode_nbctl acl-add sw0 from-lport 1002 'ip4 || ip6' > allow-related > >> check multinode_nbctl acl-add sw1 from-lport 1002 'ip4 || ip6' > allow-related > >> > >> +# create LB > >> +check multinode_nbctl lb-add lb0 10.0.0.1:8080 10.0.0.4:8080 udp > >> +check multinode_nbctl ls-lb-add sw0 lb0 > >> +M_NS_DAEMONIZE([ovn-chassis-2], [sw0p2], [nc -u -l 8080 >/dev/null > 2>&1], [nc.pid]) > >> + > >> m_as ovn-gw-1 ip netns add ovn-ext0 > >> m_as ovn-gw-1 ovs-vsctl add-port br-ex ext0 -- set interface ext0 > type=internal > >> m_as ovn-gw-1 ip link set ext0 netns ovn-ext0 > >> @@ -207,6 +212,14 @@ M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q > -c 3 -i 0.3 -w 2 172.20.1.2 | > >> 3 packets transmitted, 3 received, 0% packet loss, time 0ms > >> ]) > >> > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1000 dev eth1 > >> +for i in $(seq 30); do > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [sh -c 'dd bs=512 count=2 > if=/dev/urandom |nc -u 10.0.0.1 8080'], [ignore], [ignore], [ignore]) > >> +done > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route get 10.0.0.1 dev > sw0p1 | grep -q 'mtu 942']) > >> + > >> +m_as ovn-chassis-2 kill $(cat nc.pid) > > > > This is wrong, nc.pid doesn't make sense inside ovn-chassis-2 > > > >> + > >> AT_CLEANUP > >> > >> AT_SETUP([ovn multinode pmtu - distributed router - vxlan]) > >> @@ -696,6 +709,11 @@ check multinode_nbctl lr-nat-add lr0 snat > 172.20.0.100 20.0.0.0/24 > >> check multinode_nbctl acl-add sw0 from-lport 1002 'ip4 || ip6' > allow-related > >> check multinode_nbctl acl-add sw1 from-lport 1002 'ip4 || ip6' > allow-related > >> > >> +# create LB > >> +check multinode_nbctl lb-add lb0 10.0.0.1:8080 20.0.0.3:8080 udp > >> +check multinode_nbctl lr-lb-add lr0 lb0 > >> +M_NS_DAEMONIZE([ovn-chassis-2], [sw1p1], [nc -u -l 8080 >/dev/null > 2>&1], [nc.pid]) > >> + > >> m_as ovn-gw-1 ip netns add ovn-ext0 > >> m_as ovn-gw-1 ovs-vsctl add-port br-ex ext0 -- set interface ext0 > type=internal > >> m_as ovn-gw-1 ip link set ext0 netns ovn-ext0 > >> @@ -751,6 +769,18 @@ M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q > -c 3 -i 0.3 -w 2 172.20.1.2 | > >> M_NS_CHECK_EXEC([ovn-gw-1], [ovn-ext0], [ip link set dev ext1 mtu > 1100]) > >> M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -c 20 -i 0.5 -s 1300 > -M do 172.20.1.2 2>&1 |grep -q "mtu = 1100"]) > > Missing space before "grep". > > >> > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route flush dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add 10.0.0.0/24 > dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add default via > 10.0.0.1 dev sw0p1]) > >> + > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1000 dev eth1 > >> +for i in $(seq 30); do > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [sh -c 'dd bs=512 count=2 > if=/dev/urandom |nc -u 10.0.0.1 8080'], [ignore], [ignore], [ignore]) > >> +done > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route get 10.0.0.1 dev > sw0p1 | grep -q 'mtu 942']) > >> + > >> +m_as ovn-chassis-2 kill $(cat nc.pid) > > > > Same reasoning about wrong nc.pid inside the container. > > > >> + > >> AT_CLEANUP > >> > >> AT_SETUP([ovn multinode pmtu - gw router - vxlan]) > >> @@ -834,6 +864,11 @@ check multinode_nbctl lr-nat-add lr0 snat > 172.20.0.100 20.0.0.0/24 > >> check multinode_nbctl acl-add sw0 from-lport 1002 'ip4 || ip6' > allow-related > >> check multinode_nbctl acl-add sw1 from-lport 1002 'ip4 || ip6' > allow-related > >> > >> +# create LB > >> +check multinode_nbctl lb-add lb0 10.0.0.1:8080 20.0.0.3:8080 udp > >> +check multinode_nbctl lr-lb-add lr0 lb0 > >> +M_NS_DAEMONIZE([ovn-chassis-2], [sw1p1], [nc -u -l 8080 >/dev/null > 2>&1], [nc.pid]) > >> + > >> m_as ovn-gw-1 ip netns add ovn-ext0 > >> m_as ovn-gw-1 ovs-vsctl add-port br-ex ext0 -- set interface ext0 > type=internal > >> m_as ovn-gw-1 ip link set ext0 netns ovn-ext0 > >> @@ -882,4 +917,120 @@ M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping > -q -c 3 -i 0.3 -w 2 172.20.1.2 | > >> > >> M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -c 20 -i 0.5 -s 1300 > -M do 172.20.1.2 2>&1 |grep -q "mtu = 1150"]) > > Same here. > > >> > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route flush dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add 10.0.0.0/24 > dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add default via > 10.0.0.1 dev sw0p1]) > >> + > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1000 dev eth1 > >> +for i in $(seq 30); do > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [sh -c 'dd bs=512 count=2 > if=/dev/urandom |nc -u 10.0.0.1 8080'], [ignore], [ignore], [ignore]) > >> +done > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route get 10.0.0.1 dev > sw0p1 | grep -q 'mtu 950']) > >> + > >> +m_as ovn-chassis-2 kill $(cat nc.pid) > > > > Here too. > > > >> + > >> +AT_CLEANUP > >> + > >> +AT_SETUP([ovn multinode pmtu - logical switch - geneve]) > >> + > >> +# Check that ovn-fake-multinode setup is up and running > >> +check_fake_multinode_setup > >> + > >> +# Delete the multinode NB and OVS resources before starting the test. > >> +cleanup_multinode_resources > >> + > >> +m_as ovn-chassis-1 ip link del sw0p1-p > >> +m_as ovn-chassis-2 ip link del sw0p2-p > >> + > >> +# Reset geneve tunnels > >> +for c in ovn-chassis-1 ovn-chassis-2 ovn-gw-1 > >> +do > >> + m_as $c ovs-vsctl set open . external-ids:ovn-encap-type=geneve > >> +done > >> + > >> +OVS_WAIT_UNTIL([m_as ovn-chassis-1 ip link show | grep -q genev_sys]) > >> +OVS_WAIT_UNTIL([m_as ovn-chassis-2 ip link show | grep -q genev_sys]) > >> +OVS_WAIT_UNTIL([m_as ovn-gw-1 ip link show | grep -q genev_sys]) > >> + > >> +# Test East-West switching > >> +check multinode_nbctl ls-add sw0 > >> +check multinode_nbctl lsp-add sw0 sw0-port1 > >> +check multinode_nbctl lsp-set-addresses sw0-port1 "50:54:00:00:00:03 > 10.0.0.3 1000::3" > >> +check multinode_nbctl lsp-add sw0 sw0-port2 > >> +check multinode_nbctl lsp-set-addresses sw0-port2 "50:54:00:00:00:04 > 10.0.0.4 1000::4" > >> + > >> +m_as ovn-chassis-1 /data/create_fake_vm.sh sw0-port1 sw0p1 > 50:54:00:00:00:03 10.0.0.3 24 10.0.0.1 1000::3/64 1000::a > >> +m_as ovn-chassis-2 /data/create_fake_vm.sh sw0-port2 sw0p2 > 50:54:00:00:00:04 10.0.0.4 24 10.0.0.1 1000::4/64 1000::a > >> + > >> +# Create the second logical switch with one port > >> +check multinode_nbctl ls-add sw1 > >> +check multinode_nbctl lsp-add sw1 sw1-port1 > >> +check multinode_nbctl lsp-set-addresses sw1-port1 "40:54:00:00:00:03 > 20.0.0.3 2000::3" > >> + > >> +# Create a logical router and attach both logical switches > >> +check multinode_nbctl lr-add lr0 > >> +check multinode_nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 > 10.0.0.1/24 1000::a/64 > >> +check multinode_nbctl lsp-add sw0 sw0-lr0 > >> +check multinode_nbctl lsp-set-type sw0-lr0 router > >> +check multinode_nbctl lsp-set-addresses sw0-lr0 router > >> +check multinode_nbctl lsp-set-options sw0-lr0 router-port=lr0-sw0 > >> + > >> +check multinode_nbctl lrp-add lr0 lr0-sw1 00:00:00:00:ff:02 > 20.0.0.1/24 2000::a/64 > >> +check multinode_nbctl lsp-add sw1 sw1-lr0 > >> +check multinode_nbctl lsp-set-type sw1-lr0 router > >> +check multinode_nbctl lsp-set-addresses sw1-lr0 router > >> +check multinode_nbctl lsp-set-options sw1-lr0 router-port=lr0-sw1 > >> + > >> +m_as ovn-chassis-2 /data/create_fake_vm.sh sw1-port1 sw1p1 > 40:54:00:00:00:03 20.0.0.3 24 20.0.0.1 2000::3/64 2000::a > >> + > >> +check multinode_nbctl lr-nat-add lr0 snat 172.20.0.100 10.0.0.0/24 > >> +check multinode_nbctl lr-nat-add lr0 snat 172.20.0.100 20.0.0.0/24 > >> + > >> +check multinode_nbctl lrp-set-gateway-chassis lr0-sw0 ovn-chassis-1 10 > >> +check multinode_nbctl lrp-set-gateway-chassis lr0-sw1 ovn-chassis-2 10 > >> + > >> +# create some ACLs > >> +check multinode_nbctl acl-add sw0 from-lport 1002 'ip4 || ip6' > allow-related > >> +check multinode_nbctl acl-add sw1 from-lport 1002 'ip4 || ip6' > allow-related > >> + > >> +check multinode_nbctl lb-add lb0 10.0.0.1:8080 10.0.0.4:8080 udp > >> +check multinode_nbctl ls-lb-add sw0 lb0 > >> +M_NS_DAEMONIZE([ovn-chassis-2], [sw0p2], [nc -u -l 8080 >/dev/null > 2>&1], [nc.pid]) > >> + > >> +m_wait_for_ports_up > >> + > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q -c 3 -i 0.3 -w 2 > 10.0.0.4 | FORMAT_PING], \ > >> +[0], [dnl > >> +3 packets transmitted, 3 received, 0% packet loss, time 0ms > >> +]) > >> + > >> +# Change ptmu for the geneve tunnel > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1200 dev eth1 > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -c 5 -s 1300 -M do > 10.0.0.4 2>&1 |grep -q "message too long, mtu=1142"]) > > Same here. > > >> + > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route flush dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add 10.0.0.0/24 > dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add default via > 10.0.0.1 dev sw0p1]) > >> + > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -q -c 3 -i 0.3 -w 2 > 20.0.0.3 | FORMAT_PING], \ > >> +[0], [dnl > >> +3 packets transmitted, 3 received, 0% packet loss, time 0ms > >> +]) > >> + > >> +# Change ptmu for the geneve tunnel > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1100 dev eth1 > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ping -c 5 -s 1300 -M do > 20.0.0.3 2>&1 |grep -q "message too long, mtu=1042"]) > > Same here > > >> + > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route flush dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add 10.0.0.0/24 > dev sw0p1]) > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route add default via > 10.0.0.1 dev sw0p1]) > >> + > >> +m_as ovn-chassis-1 ip route change 170.168.0.0/16 mtu 1000 dev eth1 > >> +for i in $(seq 30); do > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [sh -c 'dd bs=512 count=2 > if=/dev/urandom |nc -u 10.0.0.1 8080'], [ignore], [ignore], [ignore]) > >> +done > >> +M_NS_CHECK_EXEC([ovn-chassis-1], [sw0p1], [ip route get 10.0.0.1 dev > sw0p1 | grep -q 'mtu 942']) > >> + > >> +m_as ovn-chassis-2 kill $(cat nc.pid) > > > > This too. > > > >> + > >> AT_CLEANUP > >> diff --git a/tests/ovn-controller.at b/tests/ovn-controller.at > >> index f2c792c9c..0d694b1d9 100644 > >> --- a/tests/ovn-controller.at > >> +++ b/tests/ovn-controller.at > >> @@ -2880,3 +2880,66 @@ AT_CHECK([test x"$port_uuid"=$(ovs-vsctl get > port $fakech_tunnel _uuid)]) > >> OVN_CLEANUP([hv1]) > >> AT_CLEANUP > >> ]) > >> + > >> +OVN_FOR_EACH_NORTHD([ > >> +AT_SETUP([ovn-controller - pmtud flows]) > >> +AT_KEYWORDS([pmtud]) > >> + > >> +ovn_start > >> + > >> +net_add n1 > >> +sim_add hv1 > >> +ovs-vsctl add-br br-phys > >> +ovn_attach n1 br-phys 192.168.0.1 > >> + > >> +check ovn-nbctl ls-add ls1 \ > >> + -- lsp-add ls1 lsp1 \ > >> + -- lsp-set-addresses lsp1 "00:00:00:00:00:01 192.168.1.1" \ > >> + -- lsp-add ls1 lsp2 \ > >> + -- lsp-set-addresses lsp2 "00:00:00:00:00:02 192.168.1.2" > >> + > >> +as hv1 > >> +check ovs-vsctl \ > >> + -- add-port br-int vif1 \ > >> + -- set Interface vif1 external_ids:iface-id=lsp1 \ > >> + -- add-port br-int vif2 \ > >> + -- set Interface vif2 external_ids:iface-id=lsp2 > >> + > >> +AT_CHECK([as hv1 ovs-ofctl dump-flows br-int > table=OFTABLE_CT_ZONE_LOOKUP | \ > >> + sed -e 's/cookie=0x.*, duration=.*, table/cookie=??, > duration=??, table/' | \ > >> + sed -e > 's/actions=load:0x.*->NXM_NX_REG13/actions=load:0x?->NXM_NX_REG13/' | \ > >> + grep -v NXST_FLOW |sort], [0], [dnl > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=0 > actions=resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x1,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x2,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> +]) > >> + > >> +check ovn-nbctl lsp-add ls1 lsp3 \ > >> + -- lsp-set-addresses lsp3 "00:00:00:00:00:03 192.168.1.3" > >> +check ovs-vsctl \ > >> + -- add-port br-int vif3 \ > >> + -- set Interface vif3 external_ids:iface-id=lsp3 > >> + > >> +AT_CHECK([as hv1 ovs-ofctl dump-flows br-int > table=OFTABLE_CT_ZONE_LOOKUP | \ > >> + sed -e 's/cookie=0x.*, duration=.*, table/cookie=??, > duration=??, table/' | \ > >> + sed -e > 's/actions=load:0x.*->NXM_NX_REG13/actions=load:0x?->NXM_NX_REG13/' | \ > >> + grep -v NXST_FLOW |sort], [0], [dnl > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=0 > actions=resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x1,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x2,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x3,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> +]) > >> + > >> +check ovn-nbctl lsp-del lsp3 > >> +AT_CHECK([as hv1 ovs-ofctl dump-flows br-int > table=OFTABLE_CT_ZONE_LOOKUP | \ > >> + sed -e 's/cookie=0x.*, duration=.*, table/cookie=??, > duration=??, table/' | \ > >> + sed -e > 's/actions=load:0x.*->NXM_NX_REG13/actions=load:0x?->NXM_NX_REG13/' | > >> + grep -v NXST_FLOW |sort], [0], [dnl > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=0 > actions=resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x1,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> + cookie=??, duration=??, table=OFTABLE_CT_ZONE_LOOKUP, n_packets=0, > n_bytes=0, idle_age=0, priority=100,reg14=0x2,metadata=0x1 > actions=load:0x?->NXM_NX_REG13[[0..15]],load:0x2->NXM_NX_REG11[[]],load:0x1->NXM_NX_REG12[[]],resubmit(,OFTABLE_LOG_INGRESS_PIPELINE) > >> +]) > >> + > >> +OVN_CLEANUP([hv1]) > >> +AT_CLEANUP > >> +]) > > > > Regards, > > Dumitru > > > > _______________________________________________ > dev mailing list > [email protected] > https://mail.openvswitch.org/mailman/listinfo/ovs-dev > > _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
