From: Lihao Liang <[email protected]>

Currently, PRCU core processing only consists of callback processing
in prcu_process_callbacks(), which is triggered by the scheduling-clock
interrupt.

Reviewed-by: Heng Zhang <[email protected]>
Signed-off-by: Lihao Liang <[email protected]>
---
 include/linux/interrupt.h |  3 ++
 include/linux/prcu.h      |  8 +++++
 kernel/rcu/prcu.c         | 86 +++++++++++++++++++++++++++++++++++++++++++++++
 kernel/rcu/tree.c         |  1 +
 kernel/time/timer.c       |  2 ++
 5 files changed, 100 insertions(+)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0991f973..f05ef62a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -456,6 +456,9 @@ enum
        SCHED_SOFTIRQ,
        HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
                            numbering. Sigh! */
+#ifdef CONFIG_PRCU
+       PRCU_SOFTIRQ,
+#endif
        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
diff --git a/include/linux/prcu.h b/include/linux/prcu.h
index e5e09c9b..4e7d5d65 100644
--- a/include/linux/prcu.h
+++ b/include/linux/prcu.h
@@ -31,11 +31,13 @@ struct prcu_local_struct {
        unsigned int locked;
        unsigned int online;
        unsigned long long version;
+       unsigned long long cb_version;
        struct prcu_cblist cblist;
 };
 
 struct prcu_struct {
        atomic64_t global_version;
+       atomic64_t cb_version;
        atomic_t active_ctr;
        struct mutex mtx;
        wait_queue_head_t wait_q;
@@ -48,6 +50,9 @@ void synchronize_prcu(void);
 void call_prcu(struct rcu_head *head, rcu_callback_t func);
 void prcu_init(void);
 void prcu_note_context_switch(void);
+int prcu_pending(void);
+void invoke_prcu_core(void);
+void prcu_check_callbacks(void);
 
 #else /* #ifdef CONFIG_PRCU */
 
@@ -57,6 +62,9 @@ void prcu_note_context_switch(void);
 #define call_prcu() do {} while (0)
 #define prcu_init() do {} while (0)
 #define prcu_note_context_switch() do {} while (0)
+#define prcu_pending() 0
+#define invoke_prcu_core() do {} while (0)
+#define prcu_check_callbacks() do {} while (0)
 
 #endif /* #ifdef CONFIG_PRCU */
 #endif /* __LINUX_PRCU_H */
diff --git a/kernel/rcu/prcu.c b/kernel/rcu/prcu.c
index f198285c..373039c5 100644
--- a/kernel/rcu/prcu.c
+++ b/kernel/rcu/prcu.c
@@ -1,6 +1,7 @@
 #include <linux/smp.h>
 #include <linux/percpu.h>
 #include <linux/prcu.h>
+#include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <asm/barrier.h>
@@ -11,6 +12,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct prcu_local_struct, 
prcu_local);
 
 struct prcu_struct global_prcu = {
        .global_version = ATOMIC64_INIT(0),
+       .cb_version = ATOMIC64_INIT(0),
        .active_ctr = ATOMIC_INIT(0),
        .mtx = __MUTEX_INITIALIZER(global_prcu.mtx),
        .wait_q = __WAIT_QUEUE_HEAD_INITIALIZER(global_prcu.wait_q)
@@ -27,6 +29,35 @@ static void prcu_cblist_init(struct prcu_cblist *rclp)
        rclp->len = 0;
 }
 
+/*
+ * Dequeue the oldest rcu_head structure from the specified callback list;
+ * store the callback grace period version number into the version pointer.
+ */
+static struct rcu_head *prcu_cblist_dequeue(struct prcu_cblist *rclp)
+{
+       struct rcu_head *rhp;
+       struct prcu_version_head *vhp;
+
+       rhp = rclp->head;
+       if (!rhp) {
+               WARN_ON(vhp);
+               WARN_ON(rclp->len);
+               return NULL;
+       }
+
+       vhp = rclp->version_head;
+       rclp->version_head = vhp->next;
+       rclp->head = rhp->next;
+       rclp->len--;
+
+       if (!rclp->head) {
+               rclp->tail = &rclp->head;
+               rclp->version_tail = &rclp->version_head;
+       }
+
+       return rhp;
+}
+
 static inline void prcu_report(struct prcu_local_struct *local)
 {
        unsigned long long global_version;
@@ -117,6 +148,7 @@ void synchronize_prcu(void)
        if (atomic_read(&prcu->active_ctr))
                wait_event(prcu->wait_q, !atomic_read(&prcu->active_ctr));
 
+       atomic64_set(&prcu->cb_version, version);
        mutex_unlock(&prcu->mtx);
 }
 EXPORT_SYMBOL(synchronize_prcu);
@@ -166,6 +198,58 @@ void call_prcu(struct rcu_head *head, rcu_callback_t func)
 }
 EXPORT_SYMBOL(call_prcu);
 
+int prcu_pending(void)
+{
+       struct prcu_local_struct *local = get_cpu_ptr(&prcu_local);
+       unsigned long long cb_version = local->cb_version;
+       struct prcu_cblist *rclp = &local->cblist;
+
+       put_cpu_ptr(&prcu_local);
+       return cb_version < atomic64_read(&prcu->cb_version) && rclp->head;
+}
+
+void invoke_prcu_core(void)
+{
+       if (cpu_online(smp_processor_id()))
+               raise_softirq(PRCU_SOFTIRQ);
+}
+
+void prcu_check_callbacks(void)
+{
+       if (prcu_pending())
+               invoke_prcu_core();
+}
+
+static __latent_entropy void prcu_process_callbacks(struct softirq_action 
*unused)
+{
+       unsigned long flags;
+       unsigned long long cb_version;
+       struct prcu_local_struct *local;
+       struct prcu_cblist *rclp;
+       struct rcu_head *rhp;
+       struct prcu_version_head *vhp;
+
+       if (cpu_is_offline(smp_processor_id()))
+               return;
+
+       cb_version = atomic64_read(&prcu->cb_version);
+
+       /* Disable interrupts to prevent races with call_prcu() */
+       local_irq_save(flags);
+       local = this_cpu_ptr(&prcu_local);
+       rclp = &local->cblist;
+       rhp = rclp->head;
+       vhp = rclp->version_head;
+       for (; rhp && vhp && vhp->version < cb_version;
+            rhp = rclp->head, vhp = rclp->version_head) {
+               rhp = prcu_cblist_dequeue(rclp);
+               debug_rcu_head_unqueue(rhp);
+               rhp->func(rhp);
+       }
+       local->cb_version = cb_version;
+       local_irq_restore(flags);
+}
+
 void prcu_init_local_struct(int cpu)
 {
        struct prcu_local_struct *local;
@@ -174,6 +258,7 @@ void prcu_init_local_struct(int cpu)
        local->locked = 0;
        local->online = 0;
        local->version = 0;
+       local->cb_version = 0;
        prcu_cblist_init(&local->cblist);
 }
 
@@ -181,6 +266,7 @@ void __init prcu_init(void)
 {
        int cpu;
 
+       open_softirq(PRCU_SOFTIRQ, prcu_process_callbacks);
        for_each_possible_cpu(cpu)
                prcu_init_local_struct(cpu);
 }
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e354e475..46910114 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2852,6 +2852,7 @@ void rcu_check_callbacks(int user)
 {
        trace_rcu_utilization(TPS("Start scheduler-tick"));
        increment_cpu_stall_ticks();
+
        if (user || rcu_is_cpu_rrupt_from_idle()) {
 
                /*
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index d3f33020..ed863e63 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -44,6 +44,7 @@
 #include <linux/sched/debug.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/prcu.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -1568,6 +1569,7 @@ void update_process_times(int user_tick)
        /* Note: this timer irq context must be accounted for as well. */
        account_process_tick(p, user_tick);
        run_local_timers();
+       prcu_check_callbacks();
        rcu_check_callbacks(user_tick);
 #ifdef CONFIG_IRQ_WORK
        if (in_irq())
-- 
2.14.1.729.g59c0ea183

Reply via email to