Re: [PATCH v3 3/3] powerpc: mm: support page table check
Le 24/10/2022 à 02:35, Rohan McLure a écrit : > On creation and clearing of a page table mapping, instrument such calls > by invoking page_table_check_pte_set and page_table_check_pte_clear > respectively. These calls serve as a sanity check against illegal > mappings. > > Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit > platforms implementing Book3S. > > Change pud_pfn to be a runtime bug rather than a build bug as it is > consumed by page_table_check_pud_{clear,set} which are not called. > > See also: > > riscv support in commit 3fee229a8eb9 ("riscv/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > arm64 in commit 42b2547137f5 ("arm64/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table > check") > > Signed-off-by: Rohan McLure Reviewed-by: Christophe Leroy > --- > V2: Update spacing and types assigned to pte_update calls. > V3: Update one last pte_update call to remove __pte invocation. > --- > arch/powerpc/Kconfig | 1 + > arch/powerpc/include/asm/book3s/32/pgtable.h | 9 - > arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++--- > arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++- > arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++-- > arch/powerpc/include/asm/nohash/pgtable.h| 1 + > 6 files changed, 37 insertions(+), 7 deletions(-) > > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig > index 4c466acdc70d..6c213ac46a92 100644 > --- a/arch/powerpc/Kconfig > +++ b/arch/powerpc/Kconfig > @@ -149,6 +149,7 @@ config PPC > select ARCH_STACKWALK > select ARCH_SUPPORTS_ATOMIC_RMW > select ARCH_SUPPORTS_DEBUG_PAGEALLOCif PPC_BOOK3S || PPC_8xx || 40x > + select ARCH_SUPPORTS_PAGE_TABLE_CHECK > select ARCH_USE_BUILTIN_BSWAP > select ARCH_USE_CMPXCHG_LOCKREF if PPC64 > select ARCH_USE_MEMTEST > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h > b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 8bf1c538839a..6a592426b935 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -53,6 +53,8 @@ > > #ifndef __ASSEMBLY__ > > +#include > + > static inline bool pte_user(pte_t pte) > { > return pte_val(pte) & _PAGE_USER; > @@ -353,7 +355,11 @@ static inline int __ptep_test_and_clear_young(struct > mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long > addr, > pte_t *ptep) > { > - return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > #define __HAVE_ARCH_PTEP_SET_WRPROTECT > @@ -545,6 +551,7 @@ static inline bool pmd_user(pmd_t pmd) > static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pte, int percpu) > { > + page_table_check_pte_set(mm, addr, ptep, pte); > #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) > /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use > the >* helper pte_update() which does an atomic update. We need to do that > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h > b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 3083111f9d0a..b5c5718d9b90 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -181,6 +181,8 @@ > #define PAGE_AGP(PAGE_KERNEL_NC) > > #ifndef __ASSEMBLY__ > +#include > + > /* >* page table defines >*/ > @@ -484,8 +486,11 @@ static inline void huge_ptep_set_wrprotect(struct > mm_struct *mm, > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, > unsigned long addr, pte_t *ptep) > { > - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); > - return __pte(old); > + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); > + > + page_table_check_pte_clear(mm, addr, old_pte); > + > + return old_pte; > } > > #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL > @@ -494,11 +499,16 @@ static inline pte_t ptep_get_and_clear_full(struct > mm_struct *mm, > pte_t *ptep, int full) > { > if (full && radix_enabled()) { > + pte_t old_pte; > + > /* >* We know that this is a full mm pte clear and >* hence can be sure there is no parallel set_pte. >*/ > - return radix__ptep_get_and_clear_full(mm, addr, ptep, full); > + old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full); > + page_table_check_pte_clear(mm, addr, old_pte); > + > +
Re: [PATCH v3 3/3] powerpc: mm: support page table check
On Mon, 2022-10-24 at 11:35 +1100, Rohan McLure wrote: > On creation and clearing of a page table mapping, instrument such > calls > by invoking page_table_check_pte_set and page_table_check_pte_clear > respectively. These calls serve as a sanity check against illegal > mappings. > > Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit > platforms implementing Book3S. > > Change pud_pfn to be a runtime bug rather than a build bug as it is > consumed by page_table_check_pud_{clear,set} which are not called. > > See also: > > riscv support in commit 3fee229a8eb9 ("riscv/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > arm64 in commit 42b2547137f5 ("arm64/mm: enable > ARCH_SUPPORTS_PAGE_TABLE_CHECK") > x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page > table > check") > > Signed-off-by: Rohan McLure Reviewed-by: Russell Currey
[PATCH v3 3/3] powerpc: mm: support page table check
On creation and clearing of a page table mapping, instrument such calls by invoking page_table_check_pte_set and page_table_check_pte_clear respectively. These calls serve as a sanity check against illegal mappings. Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit platforms implementing Book3S. Change pud_pfn to be a runtime bug rather than a build bug as it is consumed by page_table_check_pud_{clear,set} which are not called. See also: riscv support in commit 3fee229a8eb9 ("riscv/mm: enable ARCH_SUPPORTS_PAGE_TABLE_CHECK") arm64 in commit 42b2547137f5 ("arm64/mm: enable ARCH_SUPPORTS_PAGE_TABLE_CHECK") x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table check") Signed-off-by: Rohan McLure --- V2: Update spacing and types assigned to pte_update calls. V3: Update one last pte_update call to remove __pte invocation. --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 9 - arch/powerpc/include/asm/book3s/64/pgtable.h | 18 +++--- arch/powerpc/include/asm/nohash/32/pgtable.h | 7 ++- arch/powerpc/include/asm/nohash/64/pgtable.h | 8 ++-- arch/powerpc/include/asm/nohash/pgtable.h| 1 + 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4c466acdc70d..6c213ac46a92 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -149,6 +149,7 @@ config PPC select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOCif PPC_BOOK3S || PPC_8xx || 40x + select ARCH_SUPPORTS_PAGE_TABLE_CHECK select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 8bf1c538839a..6a592426b935 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -53,6 +53,8 @@ #ifndef __ASSEMBLY__ +#include + static inline bool pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; @@ -353,7 +355,11 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } #define __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -545,6 +551,7 @@ static inline bool pmd_user(pmd_t pmd) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { + page_table_check_pte_set(mm, addr, ptep, pte); #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 3083111f9d0a..b5c5718d9b90 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -181,6 +181,8 @@ #define PAGE_AGP (PAGE_KERNEL_NC) #ifndef __ASSEMBLY__ +#include + /* * page table defines */ @@ -484,8 +486,11 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); - return __pte(old); + pte_t old_pte = __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0)); + + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL @@ -494,11 +499,16 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pte_t *ptep, int full) { if (full && radix_enabled()) { + pte_t old_pte; + /* * We know that this is a full mm pte clear and * hence can be sure there is no parallel set_pte. */ - return radix__ptep_get_and_clear_full(mm, addr, ptep, full); + old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full); + page_table_check_pte_clear(mm, addr, old_pte); + + return old_pte; } return ptep_get_and_clear(mm, addr, ptep); } @@ -884,6 +894,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); +