On 2022-03-22 2:04 PM, Tao Liu wrote:
If netdev goes away(i.e. qemu with a vnet shutdown), kernel delete all tc
filters, those tcf_id related to the netdev will be left in ufid_to_tc
hashmap. When qemu restart with a same vnet but different ifindex assigned,
a dup ufid may add. Especially after hashmap_expand, the old entry will
insert before the new one, delete or modify tc flow will always fail.

So delete the stale entry to avoid a duplicated ufid in hashmap.

Fixes: dd8ca104acd7 ("netdev-tc-offloads: Don't delete ufid mapping if fail to 
delete filter")))
Signed-off-by: Tao Liu <[email protected]>
---
  lib/netdev-offload-tc.c | 37 ++++++++++++++++++++++---------------
  1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c
index 12d0a9af3..e9dd66790 100644
--- a/lib/netdev-offload-tc.c
+++ b/lib/netdev-offload-tc.c
@@ -167,6 +167,9 @@ struct ufid_tc_data {
      struct netdev *netdev;
  };
+static void parse_tc_flower_to_stats(struct tc_flower *flower,
+                                     struct dpif_flow_stats *stats);
+
  static void
  del_ufid_tc_mapping_unlocked(const ovs_u128 *ufid)
  {
@@ -200,11 +203,24 @@ del_ufid_tc_mapping(const ovs_u128 *ufid)
/* Wrapper function to delete filter and ufid tc mapping */
  static int
-del_filter_and_ufid_mapping(struct tcf_id *id, const ovs_u128 *ufid)
+del_filter_and_ufid_mapping(struct tcf_id *id, const ovs_u128 *ufid,
+                            struct dpif_flow_stats *stats)
  {
-    int err;
+    struct tc_flower flower;
+    int err = 0, get_err;
+
+    get_err = tc_get_flower(id, &flower);
+    if (!get_err) {
+        err = tc_del_filter(id);
+    }

hi, beside moving the fill stats part here I'm not sure I understand
the fix for del stale ufid. I understand instead of calling del on the
id you check it exists first. so the result seems the same? if not,
can you explain?

+
+    if (stats) {
+        memset(stats, 0, sizeof *stats);
+        if (!get_err) {
+            parse_tc_flower_to_stats(&flower, stats);
+        }
+    }
- err = tc_del_filter(id);
      if (!err) {
          del_ufid_tc_mapping(ufid);
      }
@@ -1946,7 +1962,8 @@ netdev_tc_flow_put(struct netdev *netdev, struct match 
*match,
      if (get_ufid_tc_mapping(ufid, &id) == 0) {
          VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d",
                      id.handle, id.prio);
-        info->tc_modify_flow_deleted = !del_filter_and_ufid_mapping(&id, ufid);
+        info->tc_modify_flow_deleted =
+                    !del_filter_and_ufid_mapping(&id, ufid, NULL);
      }
prio = get_prio_for_tc_flower(&flower);
@@ -2017,7 +2034,6 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
                     const ovs_u128 *ufid,
                     struct dpif_flow_stats *stats)
  {
-    struct tc_flower flower;
      struct tcf_id id;
      int error;
@@ -2026,16 +2042,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
          return error;
      }
- if (stats) {
-        memset(stats, 0, sizeof *stats);
-        if (!tc_get_flower(&id, &flower)) {
-            parse_tc_flower_to_stats(&flower, stats);
-        }
-    }
-
-    error = del_filter_and_ufid_mapping(&id, ufid);
-
-    return error;
+    return del_filter_and_ufid_mapping(&id, ufid, stats);
  }
static int
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to