From: Ido Schimmel <ido...@nvidia.com>

The kernel periodically checks the idle time of nexthop buckets to
determine if they are idle and can be re-populated with a new nexthop.

When the resilient nexthop group is offloaded to hardware, the kernel
will not see activity on nexthop buckets unless it is reported from
hardware.

Add a function that can be periodically called by device drivers to
report activity on nexthop buckets after querying it from the underlying
device.

Signed-off-by: Ido Schimmel <ido...@nvidia.com>
Reviewed-by: Petr Machata <pe...@nvidia.com>
Signed-off-by: Petr Machata <pe...@nvidia.com>
---
 include/net/nexthop.h |  2 ++
 net/ipv4/nexthop.c    | 35 +++++++++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index e1c30584e601..406bf0d959c6 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -221,6 +221,8 @@ int unregister_nexthop_notifier(struct net *net, struct 
notifier_block *nb);
 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u32 bucket_index,
                                 bool offload, bool trap);
+void nexthop_res_grp_activity_update(struct net *net, u32 id, u32 num_buckets,
+                                    unsigned long *activity);
 
 /* caller is holding rcu or rtnl; no reference taken to nexthop */
 struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index aa5c8343ded7..0e80d34b20a7 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -3099,6 +3099,41 @@ void nexthop_bucket_set_hw_flags(struct net *net, u32 
id, u32 bucket_index,
 }
 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
 
+void nexthop_res_grp_activity_update(struct net *net, u32 id, u32 num_buckets,
+                                    unsigned long *activity)
+{
+       struct nh_res_table *res_table;
+       struct nexthop *nexthop;
+       struct nh_group *nhg;
+       u32 i;
+
+       rcu_read_lock();
+
+       nexthop = nexthop_find_by_id(net, id);
+       if (!nexthop || !nexthop->is_group)
+               goto out;
+
+       nhg = rcu_dereference(nexthop->nh_grp);
+       if (!nhg->resilient)
+               goto out;
+
+       /* Instead of silently ignoring some buckets, demand that the sizes
+        * be the same.
+        */
+       if (num_buckets != nhg->res_table->num_nh_buckets)
+               goto out;
+
+       res_table = rcu_dereference(nhg->res_table);
+       for (i = 0; i < num_buckets; i++) {
+               if (test_bit(i, activity))
+                       nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
+       }
+
+out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(nexthop_res_grp_activity_update);
+
 static void __net_exit nexthop_net_exit(struct net *net)
 {
        rtnl_lock();
-- 
2.26.2

Reply via email to