The bypass flush decision logic is duplicated in rcu_nocb_try_bypass()
and nocb_gp_wait() with similar conditions.
This commit therefore extracts the functionality into a common helper
function nocb_bypass_needs_flush() improving the code readability.
A flush_faster parameter is added to controlling the flushing thresholds
and timeouts. This design was in the original commit d1b222c6be1f
("rcu/nocb: Add bypass callback queueing") to avoid having the GP
kthread aggressively flush the bypass queue.
Signed-off-by: Joel Fernandes <[email protected]>
---
kernel/rcu/tree_nocb.h | 51 ++++++++++++++++++++++++++++++------------
1 file changed, 37 insertions(+), 14 deletions(-)
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 946795ddf7d1..6835e5131187 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -378,6 +378,38 @@ static void rcu_nocb_try_flush_bypass(struct rcu_data
*rdp, unsigned long j)
WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
}
+/*
+ * Determine if the bypass queue needs to be flushed based on time and size.
+ * For lazy-only bypass queues, use the lazy flush timeout; otherwise flush
+ * based on jiffy advancement. The flush_faster controls flush aggressiveness.
+ */
+static bool nocb_bypass_needs_flush(struct rcu_data *rdp, long bypass_ncbs,
+ long lazy_ncbs, unsigned long j,
+ bool flush_faster)
+{
+ bool bypass_is_lazy;
+ unsigned long bypass_first;
+ unsigned long flush_timeout;
+ long qhimark_thresh;
+
+ if (!bypass_ncbs)
+ return false;
+
+ qhimark_thresh = flush_faster ? qhimark : 2 * qhimark;
+ if (bypass_ncbs >= qhimark_thresh)
+ return true;
+
+ bypass_first = READ_ONCE(rdp->nocb_bypass_first);
+ bypass_is_lazy = (bypass_ncbs == lazy_ncbs);
+
+ if (bypass_is_lazy)
+ flush_timeout = rcu_get_jiffies_lazy_flush();
+ else
+ flush_timeout = flush_faster ? 0 : 1;
+
+ return time_after(j, bypass_first + flush_timeout);
+}
+
/*
* See whether it is appropriate to use the ->nocb_bypass list in order
* to control contention on ->nocb_lock. A limited number of direct
@@ -404,7 +436,8 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp,
struct rcu_head *rhp,
unsigned long cur_gp_seq;
unsigned long j = jiffies;
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
- bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
+ long lazy_len = READ_ONCE(rdp->lazy_len);
+ bool bypass_is_lazy = (ncbs == lazy_len);
lockdep_assert_irqs_disabled();
@@ -456,10 +489,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp,
struct rcu_head *rhp,
// If ->nocb_bypass has been used too long or is too full,
// flush ->nocb_bypass to ->cblist.
- if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first))
||
- (ncbs && bypass_is_lazy &&
- (time_after(j, READ_ONCE(rdp->nocb_bypass_first) +
rcu_get_jiffies_lazy_flush()))) ||
- ncbs >= qhimark) {
+ if (nocb_bypass_needs_flush(rdp, ncbs, lazy_len, j, true)) {
rcu_nocb_lock(rdp);
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
@@ -699,15 +729,8 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
lazy_ncbs = READ_ONCE(rdp->lazy_len);
- if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
- (time_after(j, READ_ONCE(rdp->nocb_bypass_first) +
rcu_get_jiffies_lazy_flush()) ||
- bypass_ncbs > 2 * qhimark)) {
- flush_bypass = true;
- } else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
- (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
- bypass_ncbs > 2 * qhimark)) {
- flush_bypass = true;
- } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
+ flush_bypass = nocb_bypass_needs_flush(rdp, bypass_ncbs,
lazy_ncbs, j, false);
+ if (!flush_bypass && !bypass_ncbs &&
rcu_segcblist_empty(&rdp->cblist)) {
rdp->nocb_gp_wake_attempt = false;
rcu_nocb_unlock_irqrestore(rdp, flags);
continue; /* No callbacks here, try next. */
--
2.34.1