atomic64_inc_return() already implies smp_mb() before and after.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 arch/x86/include/asm/tlbflush.h |    8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,19 +62,13 @@ static inline void invpcid_flush_all_non
 
 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 {
-       u64 new_tlb_gen;
-
        /*
         * Bump the generation count.  This also serves as a full barrier
         * that synchronizes with switch_mm(): callers are required to order
         * their read of mm_cpumask after their writes to the paging
         * structures.
         */
-       smp_mb__before_atomic();
-       new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
-       smp_mb__after_atomic();
-
-       return new_tlb_gen;
+       return atomic64_inc_return(&mm->context.tlb_gen);
 }
 
 /* There are 12 bits of space for ASIDS in CR3 */


Reply via email to