Re: [ovs-dev] [PATCH ovn 2/2] northd: improve unreachable_ips flows processing for dp_groups
I think you should quote the results from the cover letter in this commit. Other than that, Acked-by: Mark Michelson On 8/24/21 2:02 PM, Lorenzo Bianconi wrote: Improve code efficiency of load-balancer unreachable_ips flows processing when datapath_groups are enabled. We can avoid to perform a flow lookup for each ovn_port in build_lrouter_flows_for_unreachable_ips() since lflows only depends on vips (match and action filters). Signed-off-by: Lorenzo Bianconi --- northd/ovn-northd.c | 51 ++--- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c index a8e69b3cb..1a48574f2 100644 --- a/northd/ovn-northd.c +++ b/northd/ovn-northd.c @@ -4357,7 +4357,7 @@ static struct hashrow_locks lflow_locks; /* Adds a row with the specified contents to the Logical_Flow table. * Version to use when locking is required. */ -static void +static struct ovn_lflow * do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, uint32_t hash, enum ovn_stage stage, uint16_t priority, const char *match, const char *actions, const char *io_port, @@ -4373,7 +4373,7 @@ do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, actions, ctrl_meter, hash); if (old_lflow) { hmapx_add(_lflow->od_group, od); -return; +return old_lflow; } } @@ -4388,10 +4388,11 @@ do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, ovn_lflow_hint(stage_hint), where); hmapx_add(>od_group, od); hmap_insert_fast(lflow_map, >hmap_node, hash); +return lflow; } /* Adds a row with the specified contents to the Logical_Flow table. */ -static void +static struct ovn_lflow * ovn_lflow_add_at_with_hash(struct hmap *lflow_map, struct ovn_datapath *od, enum ovn_stage stage, uint16_t priority, const char *match, const char *actions, @@ -4399,16 +4400,21 @@ ovn_lflow_add_at_with_hash(struct hmap *lflow_map, struct ovn_datapath *od, const struct ovsdb_idl_row *stage_hint, const char *where, uint32_t hash) { +struct ovn_lflow *lflow; + ovs_assert(ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od)); if (use_logical_dp_groups && use_parallel_build) { lock_hash_row(_locks, hash); -do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, - actions, io_port, stage_hint, where, ctrl_meter); +lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, + actions, io_port, stage_hint, where, + ctrl_meter); unlock_hash_row(_locks, hash); } else { -do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, - actions, io_port, stage_hint, where, ctrl_meter); +lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, + actions, io_port, stage_hint, where, + ctrl_meter); } +return lflow; } static void @@ -9422,6 +9428,26 @@ build_lrouter_defrag_flows_for_lb(struct ovn_northd_lb *lb, ds_destroy(_actions); } +static bool +ovn_dp_group_update_with_reference(struct ovn_lflow *lflow_ref, + struct ovn_datapath *od, + uint32_t hash) +{ +if (!use_logical_dp_groups || !lflow_ref) { +return false; +} + +if (use_parallel_build) { +lock_hash_row(_locks, hash); +hmapx_add(_ref->od_group, od); +unlock_hash_row(_locks, hash); +} else { +hmapx_add(_ref->od_group, od); +} + +return true; +} + static void build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, struct ovn_lb_vip *lb_vip, @@ -9431,6 +9457,7 @@ build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, struct ds *action) { bool ipv4 = IN6_IS_ADDR_V4MAPPED(_vip->vip); +struct ovn_lflow *lflow_ref = NULL; ovs_be32 ipv4_addr; ds_clear(match); @@ -9474,7 +9501,15 @@ build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, (!ipv4 && lrouter_port_ipv6_reachable(op, _vip->vip))) { continue; } -ovn_lflow_add_at_with_hash(lflows, peer->od, + +if (ovn_dp_group_update_with_reference(lflow_ref, od, hash)) { +/* if we are running dp_groups, we do not need to run full + * lookup since lflow just depends on the vip and not on + * the ovn_port. + */ +
[ovs-dev] [PATCH ovn 2/2] northd: improve unreachable_ips flows processing for dp_groups
Improve code efficiency of load-balancer unreachable_ips flows processing when datapath_groups are enabled. We can avoid to perform a flow lookup for each ovn_port in build_lrouter_flows_for_unreachable_ips() since lflows only depends on vips (match and action filters). Signed-off-by: Lorenzo Bianconi --- northd/ovn-northd.c | 51 ++--- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c index a8e69b3cb..1a48574f2 100644 --- a/northd/ovn-northd.c +++ b/northd/ovn-northd.c @@ -4357,7 +4357,7 @@ static struct hashrow_locks lflow_locks; /* Adds a row with the specified contents to the Logical_Flow table. * Version to use when locking is required. */ -static void +static struct ovn_lflow * do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, uint32_t hash, enum ovn_stage stage, uint16_t priority, const char *match, const char *actions, const char *io_port, @@ -4373,7 +4373,7 @@ do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, actions, ctrl_meter, hash); if (old_lflow) { hmapx_add(_lflow->od_group, od); -return; +return old_lflow; } } @@ -4388,10 +4388,11 @@ do_ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od, ovn_lflow_hint(stage_hint), where); hmapx_add(>od_group, od); hmap_insert_fast(lflow_map, >hmap_node, hash); +return lflow; } /* Adds a row with the specified contents to the Logical_Flow table. */ -static void +static struct ovn_lflow * ovn_lflow_add_at_with_hash(struct hmap *lflow_map, struct ovn_datapath *od, enum ovn_stage stage, uint16_t priority, const char *match, const char *actions, @@ -4399,16 +4400,21 @@ ovn_lflow_add_at_with_hash(struct hmap *lflow_map, struct ovn_datapath *od, const struct ovsdb_idl_row *stage_hint, const char *where, uint32_t hash) { +struct ovn_lflow *lflow; + ovs_assert(ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od)); if (use_logical_dp_groups && use_parallel_build) { lock_hash_row(_locks, hash); -do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, - actions, io_port, stage_hint, where, ctrl_meter); +lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, + actions, io_port, stage_hint, where, + ctrl_meter); unlock_hash_row(_locks, hash); } else { -do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, - actions, io_port, stage_hint, where, ctrl_meter); +lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match, + actions, io_port, stage_hint, where, + ctrl_meter); } +return lflow; } static void @@ -9422,6 +9428,26 @@ build_lrouter_defrag_flows_for_lb(struct ovn_northd_lb *lb, ds_destroy(_actions); } +static bool +ovn_dp_group_update_with_reference(struct ovn_lflow *lflow_ref, + struct ovn_datapath *od, + uint32_t hash) +{ +if (!use_logical_dp_groups || !lflow_ref) { +return false; +} + +if (use_parallel_build) { +lock_hash_row(_locks, hash); +hmapx_add(_ref->od_group, od); +unlock_hash_row(_locks, hash); +} else { +hmapx_add(_ref->od_group, od); +} + +return true; +} + static void build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, struct ovn_lb_vip *lb_vip, @@ -9431,6 +9457,7 @@ build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, struct ds *action) { bool ipv4 = IN6_IS_ADDR_V4MAPPED(_vip->vip); +struct ovn_lflow *lflow_ref = NULL; ovs_be32 ipv4_addr; ds_clear(match); @@ -9474,7 +9501,15 @@ build_lrouter_flows_for_unreachable_ips(struct ovn_northd_lb *lb, (!ipv4 && lrouter_port_ipv6_reachable(op, _vip->vip))) { continue; } -ovn_lflow_add_at_with_hash(lflows, peer->od, + +if (ovn_dp_group_update_with_reference(lflow_ref, od, hash)) { +/* if we are running dp_groups, we do not need to run full + * lookup since lflow just depends on the vip and not on + * the ovn_port. + */ +continue; +} +lflow_ref = ovn_lflow_add_at_with_hash(lflows, peer->od, S_SWITCH_IN_L2_LKUP, 90,