Re: [ovs-dev] [PATCH ovn 3/5] controller: Add load balancer hairpin OF flows.

2020-10-26 Thread Numan Siddique
On Sat, Oct 24, 2020 at 1:53 AM Mark Michelson  wrote:
>
> On 10/21/20 3:25 AM, num...@ovn.org wrote:
> > From: Numan Siddique 
> >
> > Presently to handle the load balancer hairpin traffic (the traffic destined 
> > to the
> > load balancer VIP is dnatted to the backend which originated the traffic), 
> > ovn-northd
> > adds a lot of logical flows to check this scenario. This patch attempts to 
> > reduce the
> > these logical flows. Each ovn-controller will read the load balancers from
> > the newly added southbound Load_Balancer table and adds the load balancer 
> > hairpin OF
> > flows in the table 68, 69 and 70. If suppose a below load balancer is 
> > configured
> >
> > 10.0.0.10:80 = 10.0.0.4:8080, 10.0.0.5:8090, then the below flows are added
> >
> > table=68, ip.src = 10.0.0.4,ip.dst=10.0.0.4,tcp.dst=8080 
> > actions=load:1->NXM_NX_REG9[7]
> > table=68, ip.src = 10.0.0.5,ip.dst=10.0.0.5,tcp.dst=8090 
> > actions=load:1->NXM_NX_REG9[7]
> > table=69, ip.src = 10.0.0.4,ip.dst=10.0.0.10,tcp.src=8080 
> > actions=load:1->NXM_NX_REG9[7]
> > table=69, ip.src = 10.0.0.5,ip.dst=10.0.0.10,tcp.src=8090 
> > actions=load:1->NXM_NX_REG9[7]
> > table=70, ct.trk && ct.dnat && ct.nw_dst == 10.0.0.10. actions=ct(commit, 
> > zone=reg12, nat(src=10.0.0.5))
> >
> > Upcoming patch will add OVN actions which does the lookup in these tables 
> > to handle the
> > hairpin traffic.
> >
> > Signed-off-by: Numan Siddique 
> > ---
> >   controller/lflow.c   | 256 +++
> >   controller/lflow.h   |   6 +-
> >   controller/ovn-controller.c  |  27 +-
> >   include/ovn/logical-fields.h |   3 +
> >   tests/ovn.at | 469 +++
> >   5 files changed, 759 insertions(+), 2 deletions(-)
> >
> > diff --git a/controller/lflow.c b/controller/lflow.c
> > index f631679c3f..77da4a5198 100644
> > --- a/controller/lflow.c
> > +++ b/controller/lflow.c
> > @@ -26,6 +26,7 @@
> >   #include "ovn-controller.h"
> >   #include "ovn/actions.h"
> >   #include "ovn/expr.h"
> > +#include "lib/lb.h"
> >   #include "lib/ovn-l7.h"
> >   #include "lib/ovn-sb-idl.h"
> >   #include "lib/extend-table.h"
> > @@ -1138,6 +1139,216 @@ add_neighbor_flows(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >   }
> >   }
> >
> > +static void
> > +add_lb_vip_hairpin_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
> > + struct lb_vip_backend *lb_backend,
> > + uint8_t lb_proto,
> > + struct ovn_desired_flow_table *flow_table)
> > +{
> > +uint64_t stub[1024 / 8];
> > +struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
> > +
> > +ofpbuf_clear();
>
> This ofpbuf_clear is not necessary. There are a couple other places in
> this file where there is an ofpbuf_clear directly after an
> initialization of an ofpbuf. Those can be removed as well.

Ack.

>
> > +uint8_t value = 1;
> > +put_load(, sizeof value, MFF_LOG_FLAGS,
> > + MLF_LOOKUP_LB_HAIRPIN_BIT, 1, );
> > +
> > +ovs_be32 vip4;
> > +struct in6_addr vip6;
> > +
> > +if (lb_vip->addr_family == AF_INET) {
> > +ovs_assert(ip_parse(lb_vip->vip, ));
> > +} else {
> > +ovs_assert(ipv6_parse(lb_vip->vip, ));
> > +}
> > +
> > +for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
> > +struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
> > +struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
> > +match_set_metadata(_match,
> > +htonll(lb->slb->datapaths[i]->tunnel_key));
> > +match_set_metadata(_reply_match,
> > +htonll(lb->slb->datapaths[i]->tunnel_key));
> > +
> > +if (lb_vip->addr_family == AF_INET) {
> > +ovs_be32 ip4;
> > +ovs_assert(ip_parse(lb_backend->ip, ));
> > +
> > +match_set_dl_type(_match, htons(ETH_TYPE_IP));
> > +match_set_nw_src(_match, ip4);
> > +match_set_nw_dst(_match, ip4);
> > +
> > +match_set_dl_type(_reply_match,
> > +htons(ETH_TYPE_IP));
> > +match_set_nw_src(_reply_match, ip4);
> > +match_set_nw_dst(_reply_match, vip4);
> > +} else {
> > +struct in6_addr ip6;
> > +ovs_assert(ipv6_parse(lb_backend->ip, ));
> > +
> > +match_set_dl_type(_match, htons(ETH_TYPE_IPV6));
> > +match_set_ipv6_src(_match, );
> > +match_set_ipv6_dst(_match, );
> > +
> > +match_set_dl_type(_reply_match,
> > +htons(ETH_TYPE_IPV6));
> > +match_set_ipv6_src(_reply_match, );
> > +match_set_ipv6_dst(_reply_match, );
> > +}
> > +
> > +if (lb_backend->port) {
> > +match_set_nw_proto(_match, lb_proto);
> > +match_set_tp_dst(_match, htons(lb_backend->port));
> > +
> > +

Re: [ovs-dev] [PATCH ovn 3/5] controller: Add load balancer hairpin OF flows.

2020-10-23 Thread Mark Michelson

On 10/21/20 3:25 AM, num...@ovn.org wrote:

From: Numan Siddique 

Presently to handle the load balancer hairpin traffic (the traffic destined to 
the
load balancer VIP is dnatted to the backend which originated the traffic), 
ovn-northd
adds a lot of logical flows to check this scenario. This patch attempts to 
reduce the
these logical flows. Each ovn-controller will read the load balancers from
the newly added southbound Load_Balancer table and adds the load balancer 
hairpin OF
flows in the table 68, 69 and 70. If suppose a below load balancer is configured

10.0.0.10:80 = 10.0.0.4:8080, 10.0.0.5:8090, then the below flows are added

table=68, ip.src = 10.0.0.4,ip.dst=10.0.0.4,tcp.dst=8080 
actions=load:1->NXM_NX_REG9[7]
table=68, ip.src = 10.0.0.5,ip.dst=10.0.0.5,tcp.dst=8090 
actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.4,ip.dst=10.0.0.10,tcp.src=8080 
actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.5,ip.dst=10.0.0.10,tcp.src=8090 
actions=load:1->NXM_NX_REG9[7]
table=70, ct.trk && ct.dnat && ct.nw_dst == 10.0.0.10. actions=ct(commit, 
zone=reg12, nat(src=10.0.0.5))

Upcoming patch will add OVN actions which does the lookup in these tables to 
handle the
hairpin traffic.

Signed-off-by: Numan Siddique 
---
  controller/lflow.c   | 256 +++
  controller/lflow.h   |   6 +-
  controller/ovn-controller.c  |  27 +-
  include/ovn/logical-fields.h |   3 +
  tests/ovn.at | 469 +++
  5 files changed, 759 insertions(+), 2 deletions(-)

diff --git a/controller/lflow.c b/controller/lflow.c
index f631679c3f..77da4a5198 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -26,6 +26,7 @@
  #include "ovn-controller.h"
  #include "ovn/actions.h"
  #include "ovn/expr.h"
+#include "lib/lb.h"
  #include "lib/ovn-l7.h"
  #include "lib/ovn-sb-idl.h"
  #include "lib/extend-table.h"
@@ -1138,6 +1139,216 @@ add_neighbor_flows(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
  }
  }
  
+static void

+add_lb_vip_hairpin_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct lb_vip_backend *lb_backend,
+ uint8_t lb_proto,
+ struct ovn_desired_flow_table *flow_table)
+{
+uint64_t stub[1024 / 8];
+struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ofpbuf_clear();


This ofpbuf_clear is not necessary. There are a couple other places in 
this file where there is an ofpbuf_clear directly after an 
initialization of an ofpbuf. Those can be removed as well.



+uint8_t value = 1;
+put_load(, sizeof value, MFF_LOG_FLAGS,
+ MLF_LOOKUP_LB_HAIRPIN_BIT, 1, );
+
+ovs_be32 vip4;
+struct in6_addr vip6;
+
+if (lb_vip->addr_family == AF_INET) {
+ovs_assert(ip_parse(lb_vip->vip, ));
+} else {
+ovs_assert(ipv6_parse(lb_vip->vip, ));
+}
+
+for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
+struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
+match_set_metadata(_match,
+htonll(lb->slb->datapaths[i]->tunnel_key));
+match_set_metadata(_reply_match,
+htonll(lb->slb->datapaths[i]->tunnel_key));
+
+if (lb_vip->addr_family == AF_INET) {
+ovs_be32 ip4;
+ovs_assert(ip_parse(lb_backend->ip, ));
+
+match_set_dl_type(_match, htons(ETH_TYPE_IP));
+match_set_nw_src(_match, ip4);
+match_set_nw_dst(_match, ip4);
+
+match_set_dl_type(_reply_match,
+htons(ETH_TYPE_IP));
+match_set_nw_src(_reply_match, ip4);
+match_set_nw_dst(_reply_match, vip4);
+} else {
+struct in6_addr ip6;
+ovs_assert(ipv6_parse(lb_backend->ip, ));
+
+match_set_dl_type(_match, htons(ETH_TYPE_IPV6));
+match_set_ipv6_src(_match, );
+match_set_ipv6_dst(_match, );
+
+match_set_dl_type(_reply_match,
+htons(ETH_TYPE_IPV6));
+match_set_ipv6_src(_reply_match, );
+match_set_ipv6_dst(_reply_match, );
+}
+
+if (lb_backend->port) {
+match_set_nw_proto(_match, lb_proto);
+match_set_tp_dst(_match, htons(lb_backend->port));
+
+match_set_nw_proto(_reply_match, lb_proto);
+match_set_tp_src(_reply_match,
+htons(lb_backend->port));
+}


Most of the above code can be moved outside the for loop since it 
creates the same matches every time. The only variable is the datapath 
tunnel key.


You can create the hairpin_match and hairpin_reply_match outside the 
loop, and set everything except the metadata. Then inside the loop, you 
can set the metadata on the matches and add the flows.



+
+ofctrl_add_flow(flow_table, 

[ovs-dev] [PATCH ovn 3/5] controller: Add load balancer hairpin OF flows.

2020-10-21 Thread numans
From: Numan Siddique 

Presently to handle the load balancer hairpin traffic (the traffic destined to 
the
load balancer VIP is dnatted to the backend which originated the traffic), 
ovn-northd
adds a lot of logical flows to check this scenario. This patch attempts to 
reduce the
these logical flows. Each ovn-controller will read the load balancers from
the newly added southbound Load_Balancer table and adds the load balancer 
hairpin OF
flows in the table 68, 69 and 70. If suppose a below load balancer is configured

10.0.0.10:80 = 10.0.0.4:8080, 10.0.0.5:8090, then the below flows are added

table=68, ip.src = 10.0.0.4,ip.dst=10.0.0.4,tcp.dst=8080 
actions=load:1->NXM_NX_REG9[7]
table=68, ip.src = 10.0.0.5,ip.dst=10.0.0.5,tcp.dst=8090 
actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.4,ip.dst=10.0.0.10,tcp.src=8080 
actions=load:1->NXM_NX_REG9[7]
table=69, ip.src = 10.0.0.5,ip.dst=10.0.0.10,tcp.src=8090 
actions=load:1->NXM_NX_REG9[7]
table=70, ct.trk && ct.dnat && ct.nw_dst == 10.0.0.10. actions=ct(commit, 
zone=reg12, nat(src=10.0.0.5))

Upcoming patch will add OVN actions which does the lookup in these tables to 
handle the
hairpin traffic.

Signed-off-by: Numan Siddique 
---
 controller/lflow.c   | 256 +++
 controller/lflow.h   |   6 +-
 controller/ovn-controller.c  |  27 +-
 include/ovn/logical-fields.h |   3 +
 tests/ovn.at | 469 +++
 5 files changed, 759 insertions(+), 2 deletions(-)

diff --git a/controller/lflow.c b/controller/lflow.c
index f631679c3f..77da4a5198 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -26,6 +26,7 @@
 #include "ovn-controller.h"
 #include "ovn/actions.h"
 #include "ovn/expr.h"
+#include "lib/lb.h"
 #include "lib/ovn-l7.h"
 #include "lib/ovn-sb-idl.h"
 #include "lib/extend-table.h"
@@ -1138,6 +1139,216 @@ add_neighbor_flows(struct ovsdb_idl_index 
*sbrec_port_binding_by_name,
 }
 }
 
+static void
+add_lb_vip_hairpin_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct lb_vip_backend *lb_backend,
+ uint8_t lb_proto,
+ struct ovn_desired_flow_table *flow_table)
+{
+uint64_t stub[1024 / 8];
+struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+ofpbuf_clear();
+uint8_t value = 1;
+put_load(, sizeof value, MFF_LOG_FLAGS,
+ MLF_LOOKUP_LB_HAIRPIN_BIT, 1, );
+
+ovs_be32 vip4;
+struct in6_addr vip6;
+
+if (lb_vip->addr_family == AF_INET) {
+ovs_assert(ip_parse(lb_vip->vip, ));
+} else {
+ovs_assert(ipv6_parse(lb_vip->vip, ));
+}
+
+for (size_t i = 0; i < lb->slb->n_datapaths; i++) {
+struct match hairpin_match = MATCH_CATCHALL_INITIALIZER;
+struct match hairpin_reply_match = MATCH_CATCHALL_INITIALIZER;
+match_set_metadata(_match,
+htonll(lb->slb->datapaths[i]->tunnel_key));
+match_set_metadata(_reply_match,
+htonll(lb->slb->datapaths[i]->tunnel_key));
+
+if (lb_vip->addr_family == AF_INET) {
+ovs_be32 ip4;
+ovs_assert(ip_parse(lb_backend->ip, ));
+
+match_set_dl_type(_match, htons(ETH_TYPE_IP));
+match_set_nw_src(_match, ip4);
+match_set_nw_dst(_match, ip4);
+
+match_set_dl_type(_reply_match,
+htons(ETH_TYPE_IP));
+match_set_nw_src(_reply_match, ip4);
+match_set_nw_dst(_reply_match, vip4);
+} else {
+struct in6_addr ip6;
+ovs_assert(ipv6_parse(lb_backend->ip, ));
+
+match_set_dl_type(_match, htons(ETH_TYPE_IPV6));
+match_set_ipv6_src(_match, );
+match_set_ipv6_dst(_match, );
+
+match_set_dl_type(_reply_match,
+htons(ETH_TYPE_IPV6));
+match_set_ipv6_src(_reply_match, );
+match_set_ipv6_dst(_reply_match, );
+}
+
+if (lb_backend->port) {
+match_set_nw_proto(_match, lb_proto);
+match_set_tp_dst(_match, htons(lb_backend->port));
+
+match_set_nw_proto(_reply_match, lb_proto);
+match_set_tp_src(_reply_match,
+htons(lb_backend->port));
+}
+
+ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN, 100,
+lb->slb->header_.uuid.parts[0], _match,
+, >slb->header_.uuid);
+
+ofctrl_add_flow(flow_table, OFTABLE_CHK_LB_HAIRPIN_REPLY, 100,
+lb->slb->header_.uuid.parts[0],
+_reply_match,
+, >slb->header_.uuid);
+}
+
+ofpbuf_uninit();
+}
+
+static void
+add_lb_ct_snat_vip_flows(struct ovn_lb *lb, struct lb_vip *lb_vip,
+ struct ovn_desired_flow_table *flow_table)
+{
+ovs_be32 vip4;
+struct in6_addr vip6;
+
+if