On Wed, Jul 05, 2017 at 03:31:03AM -0700, tip-bot for Andy Lutomirski wrote:

> @@ -132,6 +135,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, 
> struct task_struct *tsk)
>  static inline int init_new_context(struct task_struct *tsk,
>                                  struct mm_struct *mm)
>  {
> +     mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);

You could use atomic64_inc_return_relaxed() here; but since its x86
specific there is no difference.

> +     atomic64_set(&mm->context.tlb_gen, 0);
> +
>       #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
>       if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
>               /* pkey 0 is the default and always allocated */

> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 50ea348..ad21353 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -57,6 +57,23 @@ static inline void invpcid_flush_all_nonglobals(void)
>       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
>  }
>  
> +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
> +{
> +     u64 new_tlb_gen;
> +
> +     /*
> +      * Bump the generation count.  This also serves as a full barrier
> +      * that synchronizes with switch_mm(): callers are required to order
> +      * their read of mm_cpumask after their writes to the paging
> +      * structures.
> +      */
> +     smp_mb__before_atomic();
> +     new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
> +     smp_mb__after_atomic();

that's wrong... smp_mb__{before,after}_atomic() are entirely superfluous
here:

 - they're no-ops on x86
 - atomic_*_return() is already fully serializing


Reply via email to