From: SeongJae Park <sjp...@amazon.de>

For each 'fqdir_exit()' call, a work for destroy of the 'fqdir' is
enqueued.  The work function, 'fqdir_work_fn()', internally calls
'rcu_barrier()'.  In case of intensive 'fqdir_exit()' (e.g., frequent
'unshare()' systemcalls), this increased contention could result in
unacceptably high latency of 'rcu_barrier()'.  This commit avoids such
contention by doing the 'rcu_barrier()' and subsequent lightweight works
in a batched manner using a dedicated singlethread worker, as similar to
that of 'cleanup_net()'.

Signed-off-by: SeongJae Park <sjp...@amazon.de>
---
 include/net/inet_frag.h  |  1 +
 net/ipv4/inet_fragment.c | 45 +++++++++++++++++++++++++++++++++-------
 2 files changed, 39 insertions(+), 7 deletions(-)

diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index bac79e817776..48cc5795ceda 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,6 +21,7 @@ struct fqdir {
        /* Keep atomic mem on separate cachelines in structs that include it */
        atomic_long_t           mem ____cacheline_aligned_in_smp;
        struct work_struct      destroy_work;
+       struct llist_node       free_list;
 };
 
 /**
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 10d31733297d..a6fc4afcc323 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -145,12 +145,17 @@ static void inet_frags_free_cb(void *ptr, void *arg)
                inet_frag_destroy(fq);
 }
 
-static void fqdir_work_fn(struct work_struct *work)
+static struct workqueue_struct *fqdir_wq;
+static LLIST_HEAD(free_list);
+
+static void fqdir_free_fn(struct work_struct *work)
 {
-       struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
-       struct inet_frags *f = fqdir->f;
+       struct llist_node *kill_list;
+       struct fqdir *fqdir, *tmp;
+       struct inet_frags *f;
 
-       rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, 
NULL);
+       /* Atomically snapshot the list of fqdirs to free */
+       kill_list = llist_del_all(&free_list);
 
        /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
         * have completed, since they need to dereference fqdir.
@@ -158,12 +163,38 @@ static void fqdir_work_fn(struct work_struct *work)
         */
        rcu_barrier();
 
-       if (refcount_dec_and_test(&f->refcnt))
-               complete(&f->completion);
+       llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
+               f = fqdir->f;
+               if (refcount_dec_and_test(&f->refcnt))
+                       complete(&f->completion);
 
-       kfree(fqdir);
+               kfree(fqdir);
+       }
 }
 
+static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
+
+static void fqdir_work_fn(struct work_struct *work)
+{
+       struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
+
+       rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, 
NULL);
+
+       if (llist_add(&fqdir->free_list, &free_list))
+               queue_work(fqdir_wq, &fqdir_free_work);
+}
+
+static int __init fqdir_wq_init(void)
+{
+       fqdir_wq = create_singlethread_workqueue("fqdir");
+       if (!fqdir_wq)
+               panic("Could not create fqdir workq");
+       return 0;
+}
+
+pure_initcall(fqdir_wq_init);
+
+
 int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
 {
        struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
-- 
2.17.1

Reply via email to