The compiler is smart enough without these hints.

Cc: Andy Lutomirski <l...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Suggested-by: Dave Hansen <dave.han...@linux.intel.com>
Signed-off-by: Nadav Amit <na...@vmware.com>
---
 arch/x86/mm/tlb.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 64afe1215495..48a3d4453e50 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -189,7 +189,7 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
        }
 }
 
-static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
        unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
@@ -753,7 +753,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, 
flush_tlb_info);
 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
 #endif
 
-static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
                        unsigned long start, unsigned long end,
                        unsigned int stride_shift, bool freed_tables,
                        u64 new_tlb_gen)
@@ -779,7 +779,7 @@ static inline struct flush_tlb_info 
*get_flush_tlb_info(struct mm_struct *mm,
        return info;
 }
 
-static inline void put_flush_tlb_info(void)
+static void put_flush_tlb_info(void)
 {
 #ifdef CONFIG_DEBUG_VM
        /* Complete reentrency prevention checks */
-- 
2.17.1

Reply via email to