Le 01/09/2022 à 13:03, Nicholas Piggin a écrit :
> Allow PTE changes to avoid flushing the TLB when access permissions are
> being relaxed, the dirty bit is being set, and the accessed bit is being
> changed.
> 
> Relaxing access permissions and setting dirty and accessed bits do not
> require a flush because the MMU will re-load the PTE and notice the
> updates (it may also cause a spurious fault).
> 
> Clearing the accessed bit does not require a flush because of the
> imprecise PTE accessed bit accounting that is already performed, as
> documented in ptep_clear_flush_young().
> 
> This reduces TLB flushing for some mprotect(2) calls.
> 
> Signed-off-by: Nicholas Piggin <npig...@gmail.com>

Reviewed-by: Christophe Leroy <christophe.le...@csgroup.eu>

> ---
> Changes in v2:
> - Change VM_BUG_ON to VM_WARN_ON (Christophe)
> - Remove ifndef guard around helper defines (Christophe)
> - Adjust comments slightly.
> 
>   arch/powerpc/include/asm/book3s/64/pgtable.h  |  3 +
>   arch/powerpc/include/asm/book3s/64/tlbflush.h | 56 +++++++++++++++++++
>   arch/powerpc/mm/book3s64/hash_tlb.c           |  1 +
>   arch/powerpc/mm/book3s64/pgtable.c            |  1 +
>   4 files changed, 61 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
> b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index cb9d5fd39d7f..a5042bb9a30c 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -411,6 +411,9 @@ static inline int __ptep_test_and_clear_young(struct 
> mm_struct *mm,
>    * event of it not getting flushed for a long time the delay
>    * shouldn't really matter because there's no real memory
>    * pressure for swapout to react to. ]
> + *
> + * Note: this optimisation also exists in pte_needs_flush() and
> + * huge_pmd_needs_flush().
>    */
>   #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
>   #define ptep_clear_flush_young ptep_test_and_clear_young
> diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h 
> b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> index d2e80f178b6d..66b0de0ce601 100644
> --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
> +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
> @@ -143,6 +143,62 @@ static inline void flush_tlb_fix_spurious_fault(struct 
> vm_area_struct *vma,
>               flush_tlb_page(vma, address);
>   }
>   
> +static inline bool __pte_flags_need_flush(unsigned long oldval,
> +                                       unsigned long newval)
> +{
> +     unsigned long delta = oldval ^ newval;
> +
> +     /*
> +      * The return value of this function doesn't matter for hash,
> +      * ptep_modify_prot_start() does a pte_update() which does or schedules
> +      * any necessary hash table update and flush.
> +      */
> +     if (!radix_enabled())
> +             return true;
> +
> +     /*
> +      * We do not expect kernel mappings or non-PTEs or not-present PTEs.
> +      */
> +     VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
> +     VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
> +     VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
> +     VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
> +     VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
> +     VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
> +
> +     /*
> +     *  Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
> +     *
> +      * In theory, some changed software bits could be tolerated, in
> +      * practice those should rarely if ever matter.
> +      */
> +
> +     if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
> +             return true;
> +
> +     /*
> +      * If any of the above was present in old but cleared in new, flush.
> +      * With the exception of _PAGE_ACCESSED, don't worry about flushing
> +      * if that was cleared (see the comment in ptep_clear_flush_young()).
> +      */
> +     if ((delta & ~_PAGE_ACCESSED) & oldval)
> +             return true;
> +
> +     return false;
> +}
> +
> +static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
> +{
> +     return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
> +}
> +#define pte_needs_flush pte_needs_flush
> +
> +static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
> +{
> +     return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
> +}
> +#define huge_pmd_needs_flush huge_pmd_needs_flush
> +
>   extern bool tlbie_capable;
>   extern bool tlbie_enabled;
>   

Reply via email to