Re: [ovs-dev] [PATCH net v4] net: openvswitch: fix to make sure flow_lookup() is not preempted

2020-10-18 Thread Jakub Kicinski
On Sat, 17 Oct 2020 20:24:51 +0200 Eelco Chaudron wrote:
> The flow_lookup() function uses per CPU variables, which must be called
> with BH disabled. However, this is fine in the general NAPI use case
> where the local BH is disabled. But, it's also called from the netlink
> context. The below patch makes sure that even in the netlink path, the
> BH is disabled.
> 
> In addition, u64_stats_update_begin() requires a lock to ensure one writer
> which is not ensured here. Making it per-CPU and disabling NAPI (softirq)
> ensures that there is always only one writer.
> 
> Fixes: eac87c413bf9 ("net: openvswitch: reorder masks array based on usage")
> Reported-by: Juri Lelli 
> Signed-off-by: Eelco Chaudron 

Applied and queued for 5.9.2. Thanks Eelco!
___
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev


[ovs-dev] [PATCH net v4] net: openvswitch: fix to make sure flow_lookup() is not preempted

2020-10-17 Thread Eelco Chaudron
The flow_lookup() function uses per CPU variables, which must be called
with BH disabled. However, this is fine in the general NAPI use case
where the local BH is disabled. But, it's also called from the netlink
context. The below patch makes sure that even in the netlink path, the
BH is disabled.

In addition, u64_stats_update_begin() requires a lock to ensure one writer
which is not ensured here. Making it per-CPU and disabling NAPI (softirq)
ensures that there is always only one writer.

Fixes: eac87c413bf9 ("net: openvswitch: reorder masks array based on usage")
Reported-by: Juri Lelli 
Signed-off-by: Eelco Chaudron 
---
v2: - Add u64_stats_update_begin() sync point protection
- Moved patch to net from net-next branch

v3: - Add comment to flow_lookup() call
- Some update in code comments, and commit message

v4: - Rebase to latest net branch

 net/openvswitch/flow_table.c |   58 +-
 net/openvswitch/flow_table.h |8 --
 2 files changed, 41 insertions(+), 25 deletions(-)

diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 87c286ad660e..f3486a37361a 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -175,7 +175,7 @@ static struct table_instance *table_instance_alloc(int 
new_size)
 
 static void __mask_array_destroy(struct mask_array *ma)
 {
-   free_percpu(ma->masks_usage_cntr);
+   free_percpu(ma->masks_usage_stats);
kfree(ma);
 }
 
@@ -199,15 +199,15 @@ static void tbl_mask_array_reset_counters(struct 
mask_array *ma)
ma->masks_usage_zero_cntr[i] = 0;
 
for_each_possible_cpu(cpu) {
-   u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
- cpu);
+   struct mask_array_stats *stats;
unsigned int start;
u64 counter;
 
+   stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
-   start = u64_stats_fetch_begin_irq(>syncp);
-   counter = usage_counters[i];
-   } while (u64_stats_fetch_retry_irq(>syncp, start));
+   start = 
u64_stats_fetch_begin_irq(>syncp);
+   counter = stats->usage_cntrs[i];
+   } while (u64_stats_fetch_retry_irq(>syncp, 
start));
 
ma->masks_usage_zero_cntr[i] += counter;
}
@@ -230,9 +230,10 @@ static struct mask_array *tbl_mask_array_alloc(int size)
 sizeof(struct sw_flow_mask *) *
 size);
 
-   new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
-  __alignof__(u64));
-   if (!new->masks_usage_cntr) {
+   new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) 
+
+   sizeof(u64) * size,
+   __alignof__(u64));
+   if (!new->masks_usage_stats) {
kfree(new);
return NULL;
}
@@ -722,6 +723,8 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
 
 /* Flow lookup does full lookup on flow table. It starts with
  * mask from index passed in *index.
+ * This function MUST be called with BH disabled due to the use
+ * of CPU specific variables.
  */
 static struct sw_flow *flow_lookup(struct flow_table *tbl,
   struct table_instance *ti,
@@ -731,7 +734,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
   u32 *n_cache_hit,
   u32 *index)
 {
-   u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
+   struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
struct sw_flow *flow;
struct sw_flow_mask *mask;
int i;
@@ -741,9 +744,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
if (mask) {
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) {
-   u64_stats_update_begin(>syncp);
-   usage_counters[*index]++;
-   u64_stats_update_end(>syncp);
+   u64_stats_update_begin(>syncp);
+   stats->usage_cntrs[*index]++;
+   u64_stats_update_end(>syncp);
(*n_cache_hit)++;
return flow;
}
@@ -762,9 +765,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found