On Tue, Jul 08, 2014 at 05:46:50PM -0400, Pranith Kumar wrote:
> Change the remaining uses of ACCESS_ONCE() so that each ACCESS_ONCE() either 
> does a load or a store, but not both.
> 
> Signed-off-by: Pranith Kumar <[email protected]>

Queued for 3.18, thank you Pranith!

                                                                Thanx, Paul

> ---
>  kernel/rcu/tree.c        | 6 ++++--
>  kernel/rcu/tree_plugin.h | 8 +++++---
>  2 files changed, 9 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index dac6d20..c356bf6 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -1700,7 +1700,8 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int 
> fqs_state_in)
>       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
>               raw_spin_lock_irq(&rnp->lock);
>               smp_mb__after_unlock_lock();
> -             ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
> +             ACCESS_ONCE(rsp->gp_flags) =
> +                     ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
>               raw_spin_unlock_irq(&rnp->lock);
>       }
>       return fqs_state;
> @@ -2514,7 +2515,8 @@ static void force_quiescent_state(struct rcu_state *rsp)
>               raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
>               return;  /* Someone beat us to it. */
>       }
> -     ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
> +     ACCESS_ONCE(rsp->gp_flags) =
> +             ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
>       raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
>       wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
>  }
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index 637a8a9..f87b88c 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -897,7 +897,8 @@ void synchronize_rcu_expedited(void)
> 
>       /* Clean up and exit. */
>       smp_mb(); /* ensure expedited GP seen before counter increment. */
> -     ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
> +     ACCESS_ONCE(sync_rcu_preempt_exp_count) =
> +                                     sync_rcu_preempt_exp_count + 1;
>  unlock_mb_ret:
>       mutex_unlock(&sync_rcu_preempt_exp_mutex);
>  mb_ret:
> @@ -2307,8 +2308,9 @@ static int rcu_nocb_kthread(void *arg)
>                       list = next;
>               }
>               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
> -             ACCESS_ONCE(rdp->nocb_p_count) -= c;
> -             ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
> +             ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
> +             ACCESS_ONCE(rdp->nocb_p_count_lazy) =
> +                                             rdp->nocb_p_count_lazy - cl;
>               rdp->n_nocbs_invoked += c;
>       }
>       return 0;
> -- 
> 1.9.1
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to