[PATCH tip/core/rcu 04/17] rcutorture: Break up too-long rcu_torture_fwd_prog() function

2018-11-11 Thread Paul E. McKenney
From: "Paul E. McKenney" 

This commit splits rcu_torture_fwd_prog_nr() and rcu_torture_fwd_prog_cr()
functions out of rcu_torture_fwd_prog() in order to reduce indentation
pain and because rcu_torture_fwd_prog() was getting a bit too long.
In addition, this will enable easier conditional execution of the
rcu_torture_fwd_prog_cr() function, which can give false-positive
failures in some NO_HZ_FULL configurations due to overloading the
housekeeping CPUs.

Signed-off-by: Paul E. McKenney 
---
 kernel/rcu/rcutorture.c | 254 +---
 1 file changed, 135 insertions(+), 119 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 17f480129a78..bcc33bb8d9a6 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1650,15 +1650,70 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
spin_unlock(_fwd_lock);
 }
 
-/* Carry out grace-period forward-progress testing. */
-static int rcu_torture_fwd_prog(void *args)
+/* Carry out need_resched()/cond_resched() forward-progress testing. */
+static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
 {
unsigned long cver;
unsigned long dur;
struct fwd_cb_state fcs;
unsigned long gps;
-   int i;
int idx;
+   int sd;
+   int sd4;
+   bool selfpropcb = false;
+   unsigned long stopat;
+   static DEFINE_TORTURE_RANDOM(trs);
+
+   if  (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
+   init_rcu_head_on_stack();
+   selfpropcb = true;
+   }
+
+   /* Tight loop containing cond_resched(). */
+   if  (selfpropcb) {
+   WRITE_ONCE(fcs.stop, 0);
+   cur_ops->call(, rcu_torture_fwd_prog_cb);
+   }
+   cver = READ_ONCE(rcu_torture_current_version);
+   gps = cur_ops->get_gp_seq();
+   sd = cur_ops->stall_dur() + 1;
+   sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
+   dur = sd4 + torture_random() % (sd - sd4);
+   rcu_fwd_startat = jiffies;
+   stopat = rcu_fwd_startat + dur;
+   while (time_before(jiffies, stopat) && !torture_must_stop()) {
+   idx = cur_ops->readlock();
+   udelay(10);
+   cur_ops->readunlock(idx);
+   if (!fwd_progress_need_resched || need_resched())
+   cond_resched();
+   }
+   (*tested_tries)++;
+   if (!time_before(jiffies, stopat) && !torture_must_stop()) {
+   (*tested)++;
+   cver = READ_ONCE(rcu_torture_current_version) - cver;
+   gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
+   WARN_ON(!cver && gps < 2);
+   pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, 
cver, gps);
+   }
+   if (selfpropcb) {
+   WRITE_ONCE(fcs.stop, 1);
+   cur_ops->sync(); /* Wait for running CB to complete. */
+   cur_ops->cb_barrier(); /* Wait for queued callbacks. */
+   }
+
+   if (selfpropcb) {
+   WARN_ON(READ_ONCE(fcs.stop) != 2);
+   destroy_rcu_head_on_stack();
+   }
+}
+
+/* Carry out call_rcu() forward-progress testing. */
+static void rcu_torture_fwd_prog_cr(void)
+{
+   unsigned long cver;
+   unsigned long gps;
+   int i;
int j;
long n_launders;
long n_launders_cb_snap;
@@ -1667,136 +1722,97 @@ static int rcu_torture_fwd_prog(void *args)
long n_max_gps;
struct rcu_fwd_cb *rfcp;
struct rcu_fwd_cb *rfcpn;
-   int sd;
-   int sd4;
-   bool selfpropcb = false;
unsigned long stopat;
unsigned long stoppedat;
+
+   /* Loop continuously posting RCU callbacks. */
+   WRITE_ONCE(rcu_fwd_cb_nodelay, true);
+   cur_ops->sync(); /* Later readers see above write. */
+   rcu_fwd_startat = jiffies;
+   stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
+   n_launders = 0;
+   n_launders_cb = 0;
+   n_launders_sa = 0;
+   n_max_cbs = 0;
+   n_max_gps = 0;
+   for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
+   n_launders_hist[i] = 0;
+   cver = READ_ONCE(rcu_torture_current_version);
+   gps = cur_ops->get_gp_seq();
+   while (time_before(jiffies, stopat) && !torture_must_stop()) {
+   rfcp = READ_ONCE(rcu_fwd_cb_head);
+   rfcpn = NULL;
+   if (rfcp)
+   rfcpn = READ_ONCE(rfcp->rfc_next);
+   if (rfcpn) {
+   if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
+   ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
+   break;
+   rcu_fwd_cb_head = rfcpn;
+   n_launders++;
+   n_launders_sa++;
+   } else {
+   rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
+   if (WARN_ON_ONCE(!rfcp)) {
+  

[PATCH tip/core/rcu 04/17] rcutorture: Break up too-long rcu_torture_fwd_prog() function

2018-11-11 Thread Paul E. McKenney
From: "Paul E. McKenney" 

This commit splits rcu_torture_fwd_prog_nr() and rcu_torture_fwd_prog_cr()
functions out of rcu_torture_fwd_prog() in order to reduce indentation
pain and because rcu_torture_fwd_prog() was getting a bit too long.
In addition, this will enable easier conditional execution of the
rcu_torture_fwd_prog_cr() function, which can give false-positive
failures in some NO_HZ_FULL configurations due to overloading the
housekeeping CPUs.

Signed-off-by: Paul E. McKenney 
---
 kernel/rcu/rcutorture.c | 254 +---
 1 file changed, 135 insertions(+), 119 deletions(-)

diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 17f480129a78..bcc33bb8d9a6 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1650,15 +1650,70 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
spin_unlock(_fwd_lock);
 }
 
-/* Carry out grace-period forward-progress testing. */
-static int rcu_torture_fwd_prog(void *args)
+/* Carry out need_resched()/cond_resched() forward-progress testing. */
+static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
 {
unsigned long cver;
unsigned long dur;
struct fwd_cb_state fcs;
unsigned long gps;
-   int i;
int idx;
+   int sd;
+   int sd4;
+   bool selfpropcb = false;
+   unsigned long stopat;
+   static DEFINE_TORTURE_RANDOM(trs);
+
+   if  (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
+   init_rcu_head_on_stack();
+   selfpropcb = true;
+   }
+
+   /* Tight loop containing cond_resched(). */
+   if  (selfpropcb) {
+   WRITE_ONCE(fcs.stop, 0);
+   cur_ops->call(, rcu_torture_fwd_prog_cb);
+   }
+   cver = READ_ONCE(rcu_torture_current_version);
+   gps = cur_ops->get_gp_seq();
+   sd = cur_ops->stall_dur() + 1;
+   sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
+   dur = sd4 + torture_random() % (sd - sd4);
+   rcu_fwd_startat = jiffies;
+   stopat = rcu_fwd_startat + dur;
+   while (time_before(jiffies, stopat) && !torture_must_stop()) {
+   idx = cur_ops->readlock();
+   udelay(10);
+   cur_ops->readunlock(idx);
+   if (!fwd_progress_need_resched || need_resched())
+   cond_resched();
+   }
+   (*tested_tries)++;
+   if (!time_before(jiffies, stopat) && !torture_must_stop()) {
+   (*tested)++;
+   cver = READ_ONCE(rcu_torture_current_version) - cver;
+   gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
+   WARN_ON(!cver && gps < 2);
+   pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, 
cver, gps);
+   }
+   if (selfpropcb) {
+   WRITE_ONCE(fcs.stop, 1);
+   cur_ops->sync(); /* Wait for running CB to complete. */
+   cur_ops->cb_barrier(); /* Wait for queued callbacks. */
+   }
+
+   if (selfpropcb) {
+   WARN_ON(READ_ONCE(fcs.stop) != 2);
+   destroy_rcu_head_on_stack();
+   }
+}
+
+/* Carry out call_rcu() forward-progress testing. */
+static void rcu_torture_fwd_prog_cr(void)
+{
+   unsigned long cver;
+   unsigned long gps;
+   int i;
int j;
long n_launders;
long n_launders_cb_snap;
@@ -1667,136 +1722,97 @@ static int rcu_torture_fwd_prog(void *args)
long n_max_gps;
struct rcu_fwd_cb *rfcp;
struct rcu_fwd_cb *rfcpn;
-   int sd;
-   int sd4;
-   bool selfpropcb = false;
unsigned long stopat;
unsigned long stoppedat;
+
+   /* Loop continuously posting RCU callbacks. */
+   WRITE_ONCE(rcu_fwd_cb_nodelay, true);
+   cur_ops->sync(); /* Later readers see above write. */
+   rcu_fwd_startat = jiffies;
+   stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
+   n_launders = 0;
+   n_launders_cb = 0;
+   n_launders_sa = 0;
+   n_max_cbs = 0;
+   n_max_gps = 0;
+   for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
+   n_launders_hist[i] = 0;
+   cver = READ_ONCE(rcu_torture_current_version);
+   gps = cur_ops->get_gp_seq();
+   while (time_before(jiffies, stopat) && !torture_must_stop()) {
+   rfcp = READ_ONCE(rcu_fwd_cb_head);
+   rfcpn = NULL;
+   if (rfcp)
+   rfcpn = READ_ONCE(rfcp->rfc_next);
+   if (rfcpn) {
+   if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
+   ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
+   break;
+   rcu_fwd_cb_head = rfcpn;
+   n_launders++;
+   n_launders_sa++;
+   } else {
+   rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
+   if (WARN_ON_ONCE(!rfcp)) {
+