From: Peter Zijlstra <[email protected]> Some architectures require different TLB invalidation instructions depending on whether it is only the last-level of page table being changed, or whether there are also changes to the intermediate (directory) entries higher up the tree.
Add a new bit to the flags bitfield in struct mmu_gather so that the architecture code can operate accordingly if it's the intermediate levels being invalidated. Acked-by: Nicholas Piggin <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Will Deacon <[email protected]> https://jira.sw.ru/browse/PSBM-101300 (cherry picked from commit 22a61c3c4f1379ef8b0ce0d5cb78baf3178950e2) Signed-off-by: Andrey Ryabinin <[email protected]> --- include/asm-generic/tlb.h | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9b1e2af99ddf..bb8fd22ec20b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -96,12 +96,22 @@ struct mmu_gather { #endif unsigned long start; unsigned long end; - /* we are in the middle of an operation to clear - * a full mm and can make some optimizations */ - unsigned int fullmm : 1, - /* we have performed an operation which - * requires a complete flush of the tlb */ - need_flush_all : 1; + /* + * we are in the middle of an operation to clear + * a full mm and can make some optimizations + */ + unsigned int fullmm : 1; + + /* + * we have performed an operation which + * requires a complete flush of the tlb + */ + unsigned int need_flush_all : 1; + + /* + * we have removed page directories + */ + unsigned int freed_tables : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; @@ -140,6 +150,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) { tlb->start = TASK_SIZE; tlb->end = 0; + tlb->freed_tables = 0; } /* @@ -211,6 +222,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) @@ -218,6 +230,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif @@ -225,6 +238,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #define pmd_free_tlb(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; __pmd_free_tlb(tlb, pmdp, address); \ } while (0) -- 2.26.2 _______________________________________________ Devel mailing list [email protected] https://lists.openvz.org/mailman/listinfo/devel
