On Thu, Aug 12, 2021 at 6:18 AM Mark Gray <[email protected]> wrote:
>
> On 11/08/2021 21:00, Mark Gray wrote:
> > Conjunctive flows require a unique integer as an identifier.
> > In a future patch, we will use conjunctive flows for LB
> > hairpin flows. This patch generates a unique integer id for each
> > LB.
> >
> > Signed-off-by: Mark Gray <[email protected]>
> > ---
> > controller/lflow.c | 54 ++++++++++++++++++++++++++++++++-----
> > controller/lflow.h | 2 ++
> > controller/ovn-controller.c | 14 ++++++++++
> > 3 files changed, 64 insertions(+), 6 deletions(-)
> >
> > diff --git a/controller/lflow.c b/controller/lflow.c
> > index 2e08261c6aed..4aa882d00c54 100644
> > --- a/controller/lflow.c
> > +++ b/controller/lflow.c
> > @@ -17,6 +17,7 @@
> > #include "lflow.h"
> > #include "coverage.h"
> > #include "ha-chassis.h"
> > +#include "lib/id-pool.h"
> > #include "lflow-cache.h"
> > #include "local_data.h"
> > #include "lport.h"
> > @@ -1528,8 +1529,13 @@ add_lb_ct_snat_vip_flows(struct ovn_controller_lb
> > *lb,
> > static void
> > consider_lb_hairpin_flows(const struct sbrec_load_balancer *sbrec_lb,
> > const struct hmap *local_datapaths,
> > - struct ovn_desired_flow_table *flow_table)
> > + struct ovn_desired_flow_table *flow_table,
> > + struct simap *ids)
> > {
> > + int id = simap_get(ids, sbrec_lb->name);
> > + VLOG_DBG("Load Balancer %s has conjunctive flow id %u",
> > + sbrec_lb->name, id);
> > +
> > /* Check if we need to add flows or not. If there is one datapath
> > * in the local_datapaths, it means all the datapaths of the lb
> > * will be in the local_datapaths. */
> > @@ -1576,11 +1582,26 @@ consider_lb_hairpin_flows(const struct
> > sbrec_load_balancer *sbrec_lb,
> > static void
> > add_lb_hairpin_flows(const struct sbrec_load_balancer_table *lb_table,
> > const struct hmap *local_datapaths,
> > - struct ovn_desired_flow_table *flow_table)
> > + struct ovn_desired_flow_table *flow_table,
> > + struct simap *ids,
> > + struct id_pool *pool)
> > {
> > + uint32_t id;
> > const struct sbrec_load_balancer *lb;
> > SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
> > - consider_lb_hairpin_flows(lb, local_datapaths, flow_table);
> > + /* Allocate a unique 32-bit integer to this load-balancer. This
> > will
> > + be used as a conjunctive flow id in the OFTABLE_CT_SNAT_HAIRPIN
> > + table.
> > +
> > + If we are unable to allocate a unique ID then we have run out of
> > + ids. As this is unrecoverable then we abort. However, this is
> > + unlikely to happen as it would be mean that we have created
> > + "UINT32_MAX" load-balancers.
> > + */
> > +
> > + ovs_assert(id_pool_alloc_id(pool, &id));
> > + simap_put(ids, lb->name, id);
> > + consider_lb_hairpin_flows(lb, local_datapaths, flow_table, ids);
> > }
> > }
> >
> > @@ -1686,7 +1707,9 @@ lflow_run(struct lflow_ctx_in *l_ctx_in, struct
> > lflow_ctx_out *l_ctx_out)
> > l_ctx_in->mac_binding_table,
> > l_ctx_in->local_datapaths,
> > l_ctx_out->flow_table);
> > add_lb_hairpin_flows(l_ctx_in->lb_table, l_ctx_in->local_datapaths,
> > - l_ctx_out->flow_table);
> > + l_ctx_out->flow_table,
> > + l_ctx_out->hairpin_lb_ids,
> > + l_ctx_out->hairpin_id_pool);
> > add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
> > l_ctx_out->flow_table);
> > }
> > @@ -1804,7 +1827,8 @@ lflow_processing_end:
> > * associated. */
> > for (size_t i = 0; i < n_dp_lbs; i++) {
> > consider_lb_hairpin_flows(dp_lbs[i], l_ctx_in->local_datapaths,
> > - l_ctx_out->flow_table);
> > + l_ctx_out->flow_table,
> > + l_ctx_out->hairpin_lb_ids);
> > }
> >
> > return handled;
> > @@ -1876,12 +1900,16 @@ lflow_handle_changed_lbs(struct lflow_ctx_in
> > *l_ctx_in,
> > struct lflow_ctx_out *l_ctx_out)
> > {
> > const struct sbrec_load_balancer *lb;
> > + struct id_pool *pool = l_ctx_out->hairpin_id_pool;
> > + struct simap *ids = l_ctx_out->hairpin_lb_ids;
> >
> > SBREC_LOAD_BALANCER_TABLE_FOR_EACH_TRACKED (lb, l_ctx_in->lb_table) {
> > if (sbrec_load_balancer_is_deleted(lb)) {
> > VLOG_DBG("Remove hairpin flows for deleted load balancer
> > "UUID_FMT,
> > UUID_ARGS(&lb->header_.uuid));
> > ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
> > + id_pool_free_id(pool, simap_get(ids, lb->name));
> > + simap_find_and_delete(ids, lb->name);
> > }
> > }
> >
> > @@ -1894,12 +1922,26 @@ lflow_handle_changed_lbs(struct lflow_ctx_in
> > *l_ctx_in,
> > VLOG_DBG("Remove hairpin flows for updated load balancer
> > "UUID_FMT,
> > UUID_ARGS(&lb->header_.uuid));
> > ofctrl_remove_flows(l_ctx_out->flow_table, &lb->header_.uuid);
> > + } else {
> > + /* Allocate a unique 32-bit integer to this load-balancer. This
> > + will be used as a conjunctive flow id in the
> > + OFTABLE_CT_SNAT_HAIRPIN table.
> > +
> > + If we are unable to allocate a unique ID then we have run
> > out of
> > + ids. As this is unrecoverable then we abort. However, this
> > is
> > + unlikely to happen as it would be mean that we have created
> > + "UINT32_MAX" load-balancers.
> > + */
> > + uint32_t id;
> > + ovs_assert(id_pool_alloc_id(pool, &id));
> > + simap_put(ids, lb->name, id);
> > }
> >
> > VLOG_DBG("Add load balancer hairpin flows for "UUID_FMT,
> > UUID_ARGS(&lb->header_.uuid));
> > consider_lb_hairpin_flows(lb, l_ctx_in->local_datapaths,
> > - l_ctx_out->flow_table);
> > + l_ctx_out->flow_table,
> > + l_ctx_out->hairpin_lb_ids);
> > }
> >
> > return true;
> > diff --git a/controller/lflow.h b/controller/lflow.h
> > index 6fcda5355cdf..c4a20ce309ba 100644
> > --- a/controller/lflow.h
> > +++ b/controller/lflow.h
> > @@ -158,6 +158,8 @@ struct lflow_ctx_out {
> > struct lflow_cache *lflow_cache;
> > uint32_t *conj_id_ofs;
> > bool conj_id_overflow;
> > + struct simap *hairpin_lb_ids;
> > + struct id_pool *hairpin_id_pool;
> > };
> >
> > void lflow_init(void);
> > diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
> > index 75f16b15e051..2fbea0d10587 100644
> > --- a/controller/ovn-controller.c
> > +++ b/controller/ovn-controller.c
> > @@ -33,6 +33,7 @@
> > #include "openvswitch/dynamic-string.h"
> > #include "encaps.h"
> > #include "fatal-signal.h"
> > +#include "lib/id-pool.h"
> > #include "if-status.h"
> > #include "ip-mcast.h"
> > #include "openvswitch/hmap.h"
> > @@ -2166,6 +2167,11 @@ struct lflow_output_persistent_data {
> > struct lflow_cache *lflow_cache;
> > };
> >
> > +struct lflow_output_hairpin_data {
> > + struct id_pool *pool;
> > + struct simap *ids;
> > +};
> > +
> > struct ed_type_lflow_output {
> > /* Logical flow table */
> > struct ovn_desired_flow_table flow_table;
> > @@ -2179,6 +2185,9 @@ struct ed_type_lflow_output {
> > /* Data which is persistent and not cleared during
> > * full recompute. */
> > struct lflow_output_persistent_data pd;
> > +
> > + /* Data for managing hairpin flow conjunctive flow ids. */
> > + struct lflow_output_hairpin_data hd;
> > };
> >
> > static void
> > @@ -2300,6 +2309,8 @@ init_lflow_ctx(struct engine_node *node,
> > l_ctx_out->conj_id_ofs = &fo->pd.conj_id_ofs;
> > l_ctx_out->lflow_cache = fo->pd.lflow_cache;
> > l_ctx_out->conj_id_overflow = false;
> > + l_ctx_out->hairpin_id_pool = fo->hd.pool;
> > + l_ctx_out->hairpin_lb_ids = fo->hd.ids;
> > }
> >
> > static void *
> > @@ -2312,6 +2323,9 @@ en_lflow_output_init(struct engine_node *node
> > OVS_UNUSED,
> > ovn_extend_table_init(&data->meter_table);
> > data->pd.conj_id_ofs = 1;
> > lflow_resource_init(&data->lflow_resource_ref);
> > + data->hd.ids = xzalloc(sizeof(*(data->hd.ids)));
> > + simap_init(data->hd.ids);
> > + data->hd.pool = id_pool_create(1, UINT32_MAX - 1);
>
> I need to free these resources on cleanup.
Thanks for these patches.
They seem to have a huge impact on scale and reduces the number of OF flows.
In my test setup, these patches brought down the number of OF flows
from around 1.1M to 450k.
I applied this patch series to the main branch with the below changes
********************************
diff --git a/controller/lflow.c b/controller/lflow.c
index 4aa882d00c..78381f7c15 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -1590,13 +1590,13 @@ add_lb_hairpin_flows(const struct
sbrec_load_balancer_table *lb_table,
const struct sbrec_load_balancer *lb;
SBREC_LOAD_BALANCER_TABLE_FOR_EACH (lb, lb_table) {
/* Allocate a unique 32-bit integer to this load-balancer. This will
- be used as a conjunctive flow id in the OFTABLE_CT_SNAT_HAIRPIN
- table.
-
- If we are unable to allocate a unique ID then we have run out of
- ids. As this is unrecoverable then we abort. However, this is
- unlikely to happen as it would be mean that we have created
- "UINT32_MAX" load-balancers.
+ * be used as a conjunctive flow id in the OFTABLE_CT_SNAT_HAIRPIN
+ * table.
+ *
+ * If we are unable to allocate a unique ID then we have run out of
+ * ids. As this is unrecoverable then we abort. However, this is
+ * unlikely to happen as it would be mean that we have created
+ * "UINT32_MAX" load-balancers.
*/
ovs_assert(id_pool_alloc_id(pool, &id));
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 2fbea0d105..678419ab3d 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -2169,7 +2169,7 @@ struct lflow_output_persistent_data {
struct lflow_output_hairpin_data {
struct id_pool *pool;
- struct simap *ids;
+ struct simap ids;
};
struct ed_type_lflow_output {
@@ -2310,7 +2310,7 @@ init_lflow_ctx(struct engine_node *node,
l_ctx_out->lflow_cache = fo->pd.lflow_cache;
l_ctx_out->conj_id_overflow = false;
l_ctx_out->hairpin_id_pool = fo->hd.pool;
- l_ctx_out->hairpin_lb_ids = fo->hd.ids;
+ l_ctx_out->hairpin_lb_ids = &fo->hd.ids;
}
static void *
@@ -2323,8 +2323,7 @@ en_lflow_output_init(struct engine_node *node OVS_UNUSED,
ovn_extend_table_init(&data->meter_table);
data->pd.conj_id_ofs = 1;
lflow_resource_init(&data->lflow_resource_ref);
- data->hd.ids = xzalloc(sizeof(*(data->hd.ids)));
- simap_init(data->hd.ids);
+ simap_init(&data->hd.ids);
data->hd.pool = id_pool_create(1, UINT32_MAX - 1);
return data;
}
@@ -2338,6 +2337,8 @@ en_lflow_output_cleanup(void *data)
ovn_extend_table_destroy(&flow_output_data->meter_table);
lflow_resource_destroy(&flow_output_data->lflow_resource_ref);
lflow_cache_destroy(flow_output_data->pd.lflow_cache);
+ simap_destroy(&flow_output_data->hd.ids);
+ id_pool_destroy(flow_output_data->hd.pool);
}
static void
diff --git a/controller/lflow.c b/controller/lflow.c
index 78381f7c15..a0cf2e50c9 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -1599,8 +1599,11 @@ add_lb_hairpin_flows(const struct
sbrec_load_balancer_table *lb_table,
* "UINT32_MAX" load-balancers.
*/
- ovs_assert(id_pool_alloc_id(pool, &id));
- simap_put(ids, lb->name, id);
+ id = simap_get(ids, lb->name);
+ if (!id) {
+ ovs_assert(id_pool_alloc_id(pool, &id));
+ simap_put(ids, lb->name, id);
+ }
consider_lb_hairpin_flows(lb, local_datapaths, flow_table, ids);
}
}
**************************************************************************
The above change of simap_get before allocating the id was required.
Otherwise for every engine recompute
the load balancer ids were getting updated.
Thanks
Numan
>
> > return data;
> > }
> >
> >
>
> _______________________________________________
> dev mailing list
> [email protected]
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev