This patch introduces a small enhancement which allows to do a
direct wake-up of synchronize_rcu() callers. It occurs after a
completion of grace period, thus by the gp-kthread.

Number of clients is limited by the hard-coded maximum allowed
threshold. The remaining part, if still exists is deferred to
a main worker.

Link: https://lore.kernel.org/lkml/Zd0ZtNu+Rt0qXkfS@lothringen/

Reviewed-by: Paul E. McKenney <[email protected]>
Signed-off-by: Uladzislau Rezki (Sony) <[email protected]>
---
 kernel/rcu/tree.c | 24 +++++++++++++++++++++++-
 kernel/rcu/tree.h |  6 ++++++
 2 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2e1c5be6d64b..2a270abade4d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1645,7 +1645,8 @@ static void rcu_sr_normal_gp_cleanup_work(struct 
work_struct *work)
  */
 static void rcu_sr_normal_gp_cleanup(void)
 {
-       struct llist_node *wait_tail;
+       struct llist_node *wait_tail, *next, *rcu;
+       int done = 0;
 
        wait_tail = rcu_state.srs_wait_tail;
        if (wait_tail == NULL)
@@ -1653,11 +1654,32 @@ static void rcu_sr_normal_gp_cleanup(void)
 
        rcu_state.srs_wait_tail = NULL;
        ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
+       WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
+
+       /*
+        * Process (a) and (d) cases. See an illustration.
+        */
+       llist_for_each_safe(rcu, next, wait_tail->next) {
+               if (rcu_sr_is_wait_head(rcu))
+                       break;
+
+               rcu_sr_normal_complete(rcu);
+               // It can be last, update a next on this step.
+               wait_tail->next = next;
+
+               if (++done == SR_MAX_USERS_WAKE_FROM_GP)
+                       break;
+       }
 
        // concurrent sr_normal_gp_cleanup work might observe this update.
        smp_store_release(&rcu_state.srs_done_tail, wait_tail);
        ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
 
+       /*
+        * We schedule a work in order to perform a final processing
+        * of outstanding users(if still left) and releasing wait-heads
+        * added by rcu_sr_normal_gp_init() call.
+        */
        schedule_work(&rcu_state.srs_cleanup_work);
 }
 
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index b942b9437438..2832787cee1d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -315,6 +315,12 @@ do {                                                       
                \
        __set_current_state(TASK_RUNNING);                              \
 } while (0)
 
+/*
+ * A max threshold for synchronize_rcu() users which are
+ * awaken directly by the rcu_gp_kthread(). Left part is
+ * deferred to the main worker.
+ */
+#define SR_MAX_USERS_WAKE_FROM_GP 5
 #define SR_NORMAL_GP_WAIT_HEAD_MAX 5
 
 struct sr_wait_node {
-- 
2.39.2


Reply via email to