Re: [PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page

2021-02-02 Thread Nicholas Piggin
Excerpts from Ding Tianhong's message of January 28, 2021 1:13 pm:
> On 2021/1/26 12:44, Nicholas Piggin wrote:
>> vmalloc_to_page returns NULL for addresses mapped by larger pages[*].
>> Whether or not a vmap is huge depends on the architecture details,
>> alignments, boot options, etc., which the caller can not be expected
>> to know. Therefore HUGE_VMAP is a regression for vmalloc_to_page.
>> 
>> This change teaches vmalloc_to_page about larger pages, and returns
>> the struct page that corresponds to the offset within the large page.
>> This makes the API agnostic to mapping implementation details.
>> 
>> [*] As explained by commit 029c54b095995 ("mm/vmalloc.c: huge-vmap:
>> fail gracefully on unexpected huge vmap mappings")
>> 
>> Reviewed-by: Christoph Hellwig 
>> Signed-off-by: Nicholas Piggin 
>> ---
>>  mm/vmalloc.c | 41 ++---
>>  1 file changed, 26 insertions(+), 15 deletions(-)
>> 
>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>> index e6f352bf0498..62372f9e0167 100644
>> --- a/mm/vmalloc.c
>> +++ b/mm/vmalloc.c
>> @@ -34,7 +34,7 @@
>>  #include 
>>  #include 
>>  #include 
>> -
>> +#include 
>>  #include 
>>  #include 
>>  #include 
>> @@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x)
>>  }
>>  
>>  /*
>> - * Walk a vmap address to the struct page it maps.
>> + * Walk a vmap address to the struct page it maps. Huge vmap mappings will
>> + * return the tail page that corresponds to the base page address, which
>> + * matches small vmap mappings.
>>   */
>>  struct page *vmalloc_to_page(const void *vmalloc_addr)
>>  {
>> @@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
>>  
>>  if (pgd_none(*pgd))
>>  return NULL;
>> +if (WARN_ON_ONCE(pgd_leaf(*pgd)))
>> +return NULL; /* XXX: no allowance for huge pgd */
>> +if (WARN_ON_ONCE(pgd_bad(*pgd)))
>> +return NULL;
>> +
>>  p4d = p4d_offset(pgd, addr);
>>  if (p4d_none(*p4d))
>>  return NULL;
>> -pud = pud_offset(p4d, addr);
>> +if (p4d_leaf(*p4d))
>> +return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
>> +if (WARN_ON_ONCE(p4d_bad(*p4d)))
>> +return NULL;
>>  
>> -/*
>> - * Don't dereference bad PUD or PMD (below) entries. This will also
>> - * identify huge mappings, which we may encounter on architectures
>> - * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
>> - * identified as vmalloc addresses by is_vmalloc_addr(), but are
>> - * not [unambiguously] associated with a struct page, so there is
>> - * no correct value to return for them.
>> - */
>> -WARN_ON_ONCE(pud_bad(*pud));
>> -if (pud_none(*pud) || pud_bad(*pud))
>> +pud = pud_offset(p4d, addr);
>> +if (pud_none(*pud))
>> +return NULL;
>> +if (pud_leaf(*pud))
>> +return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
> 
> Hi Nicho:
> 
> /builds/1mzfdQzleCy69KZFb5qHNSEgabZ/mm/vmalloc.c: In function 
> 'vmalloc_to_page':
> /builds/1mzfdQzleCy69KZFb5qHNSEgabZ/include/asm-generic/pgtable-nop4d-hack.h:48:27:
>  error: implicit declaration of function 'pud_page'; did you mean 'put_page'? 
> [-Werror=implicit-function-declaration]
>48 | #define pgd_page(pgd)(pud_page((pud_t){ pgd }))
>   |   ^~~~
> 
> the pug_page is not defined for aarch32 when enabling 2-level page config, it 
> break the system building.

Hey thanks for finding that, not sure why that didn't trigger any CI.

Anyway newer kernels don't have the ptable-*-hack.h headers, but even so 
it still breaks upstream. arm is using some hand-rolled 2-level folding
of its own (which is fair enough because most 32-bit archs were 2 level
at the time I added pgtable-nopud.h header).

This patch seems to at least make it build.

Thanks,
Nick

---
 arch/arm/include/asm/pgtable-3level.h | 2 --
 arch/arm/include/asm/pgtable.h| 3 +++
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/pgtable-3level.h 
b/arch/arm/include/asm/pgtable-3level.h
index 2b85d175e999..d4edab51a77c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -186,8 +186,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
 
 #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
 #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-#define pud_page(pud)  pmd_page(__pmd(pud_val(pud)))
-#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
 
 #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
 #define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index c02f24400369..d63a5bb6bd0c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -166,6 +166,9 @@ extern struct page *empty_zero_page;
 
 extern 

Re: [PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page

2021-01-27 Thread Ding Tianhong
On 2021/1/26 12:44, Nicholas Piggin wrote:
> vmalloc_to_page returns NULL for addresses mapped by larger pages[*].
> Whether or not a vmap is huge depends on the architecture details,
> alignments, boot options, etc., which the caller can not be expected
> to know. Therefore HUGE_VMAP is a regression for vmalloc_to_page.
> 
> This change teaches vmalloc_to_page about larger pages, and returns
> the struct page that corresponds to the offset within the large page.
> This makes the API agnostic to mapping implementation details.
> 
> [*] As explained by commit 029c54b095995 ("mm/vmalloc.c: huge-vmap:
> fail gracefully on unexpected huge vmap mappings")
> 
> Reviewed-by: Christoph Hellwig 
> Signed-off-by: Nicholas Piggin 
> ---
>  mm/vmalloc.c | 41 ++---
>  1 file changed, 26 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index e6f352bf0498..62372f9e0167 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -34,7 +34,7 @@
>  #include 
>  #include 
>  #include 
> -
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x)
>  }
>  
>  /*
> - * Walk a vmap address to the struct page it maps.
> + * Walk a vmap address to the struct page it maps. Huge vmap mappings will
> + * return the tail page that corresponds to the base page address, which
> + * matches small vmap mappings.
>   */
>  struct page *vmalloc_to_page(const void *vmalloc_addr)
>  {
> @@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
>  
>   if (pgd_none(*pgd))
>   return NULL;
> + if (WARN_ON_ONCE(pgd_leaf(*pgd)))
> + return NULL; /* XXX: no allowance for huge pgd */
> + if (WARN_ON_ONCE(pgd_bad(*pgd)))
> + return NULL;
> +
>   p4d = p4d_offset(pgd, addr);
>   if (p4d_none(*p4d))
>   return NULL;
> - pud = pud_offset(p4d, addr);
> + if (p4d_leaf(*p4d))
> + return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
> + if (WARN_ON_ONCE(p4d_bad(*p4d)))
> + return NULL;
>  
> - /*
> -  * Don't dereference bad PUD or PMD (below) entries. This will also
> -  * identify huge mappings, which we may encounter on architectures
> -  * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
> -  * identified as vmalloc addresses by is_vmalloc_addr(), but are
> -  * not [unambiguously] associated with a struct page, so there is
> -  * no correct value to return for them.
> -  */
> - WARN_ON_ONCE(pud_bad(*pud));
> - if (pud_none(*pud) || pud_bad(*pud))
> + pud = pud_offset(p4d, addr);
> + if (pud_none(*pud))
> + return NULL;
> + if (pud_leaf(*pud))
> + return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);

Hi Nicho:

/builds/1mzfdQzleCy69KZFb5qHNSEgabZ/mm/vmalloc.c: In function 'vmalloc_to_page':
/builds/1mzfdQzleCy69KZFb5qHNSEgabZ/include/asm-generic/pgtable-nop4d-hack.h:48:27:
 error: implicit declaration of function 'pud_page'; did you mean 'put_page'? 
[-Werror=implicit-function-declaration]
   48 | #define pgd_page(pgd)(pud_page((pud_t){ pgd }))
  |   ^~~~

the pug_page is not defined for aarch32 when enabling 2-level page config, it 
break the system building.


> + if (WARN_ON_ONCE(pud_bad(*pud)))
>   return NULL;
> +
>   pmd = pmd_offset(pud, addr);
> - WARN_ON_ONCE(pmd_bad(*pmd));
> - if (pmd_none(*pmd) || pmd_bad(*pmd))
> + if (pmd_none(*pmd))
> + return NULL;
> + if (pmd_leaf(*pmd))
> + return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
> + if (WARN_ON_ONCE(pmd_bad(*pmd)))
>   return NULL;
>  
>   ptep = pte_offset_map(pmd, addr);
> @@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
>   if (pte_present(pte))
>   page = pte_page(pte);
>   pte_unmap(ptep);
> +
>   return page;
>  }
>  EXPORT_SYMBOL(vmalloc_to_page);
> 



Re: [PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page

2021-01-26 Thread Miaohe Lin
Hi:
On 2021/1/26 12:44, Nicholas Piggin wrote:
> vmalloc_to_page returns NULL for addresses mapped by larger pages[*].
> Whether or not a vmap is huge depends on the architecture details,
> alignments, boot options, etc., which the caller can not be expected
> to know. Therefore HUGE_VMAP is a regression for vmalloc_to_page.
> 
> This change teaches vmalloc_to_page about larger pages, and returns
> the struct page that corresponds to the offset within the large page.
> This makes the API agnostic to mapping implementation details.
> 
> [*] As explained by commit 029c54b095995 ("mm/vmalloc.c: huge-vmap:
> fail gracefully on unexpected huge vmap mappings")
> 
> Reviewed-by: Christoph Hellwig 
> Signed-off-by: Nicholas Piggin 
> ---
>  mm/vmalloc.c | 41 ++---
>  1 file changed, 26 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index e6f352bf0498..62372f9e0167 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -34,7 +34,7 @@
>  #include 
>  #include 
>  #include 
> -
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x)
>  }
>  
>  /*
> - * Walk a vmap address to the struct page it maps.
> + * Walk a vmap address to the struct page it maps. Huge vmap mappings will
> + * return the tail page that corresponds to the base page address, which
> + * matches small vmap mappings.
>   */
>  struct page *vmalloc_to_page(const void *vmalloc_addr)
>  {
> @@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
>  
>   if (pgd_none(*pgd))
>   return NULL;
> + if (WARN_ON_ONCE(pgd_leaf(*pgd)))
> + return NULL; /* XXX: no allowance for huge pgd */
> + if (WARN_ON_ONCE(pgd_bad(*pgd)))
> + return NULL;
> +
>   p4d = p4d_offset(pgd, addr);
>   if (p4d_none(*p4d))
>   return NULL;
> - pud = pud_offset(p4d, addr);
> + if (p4d_leaf(*p4d))
> + return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
> + if (WARN_ON_ONCE(p4d_bad(*p4d)))
> + return NULL;
>  
> - /*
> -  * Don't dereference bad PUD or PMD (below) entries. This will also
> -  * identify huge mappings, which we may encounter on architectures
> -  * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
> -  * identified as vmalloc addresses by is_vmalloc_addr(), but are
> -  * not [unambiguously] associated with a struct page, so there is
> -  * no correct value to return for them.
> -  */
> - WARN_ON_ONCE(pud_bad(*pud));
> - if (pud_none(*pud) || pud_bad(*pud))
> + pud = pud_offset(p4d, addr);
> + if (pud_none(*pud))
> + return NULL;
> + if (pud_leaf(*pud))
> + return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
> + if (WARN_ON_ONCE(pud_bad(*pud)))
>   return NULL;
> +
>   pmd = pmd_offset(pud, addr);
> - WARN_ON_ONCE(pmd_bad(*pmd));
> - if (pmd_none(*pmd) || pmd_bad(*pmd))
> + if (pmd_none(*pmd))
> + return NULL;
> + if (pmd_leaf(*pmd))
> + return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
> + if (WARN_ON_ONCE(pmd_bad(*pmd)))
>   return NULL;
>  
>   ptep = pte_offset_map(pmd, addr);
> @@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
>   if (pte_present(pte))
>   page = pte_page(pte);
>   pte_unmap(ptep);
> +
>   return page;
>  }
>  EXPORT_SYMBOL(vmalloc_to_page);
> 

LGTM. Thanks.

Reviewed-by: Miaohe Lin 


[PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page

2021-01-25 Thread Nicholas Piggin
vmalloc_to_page returns NULL for addresses mapped by larger pages[*].
Whether or not a vmap is huge depends on the architecture details,
alignments, boot options, etc., which the caller can not be expected
to know. Therefore HUGE_VMAP is a regression for vmalloc_to_page.

This change teaches vmalloc_to_page about larger pages, and returns
the struct page that corresponds to the offset within the large page.
This makes the API agnostic to mapping implementation details.

[*] As explained by commit 029c54b095995 ("mm/vmalloc.c: huge-vmap:
fail gracefully on unexpected huge vmap mappings")

Reviewed-by: Christoph Hellwig 
Signed-off-by: Nicholas Piggin 
---
 mm/vmalloc.c | 41 ++---
 1 file changed, 26 insertions(+), 15 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e6f352bf0498..62372f9e0167 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -34,7 +34,7 @@
 #include 
 #include 
 #include 
-
+#include 
 #include 
 #include 
 #include 
@@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x)
 }
 
 /*
- * Walk a vmap address to the struct page it maps.
+ * Walk a vmap address to the struct page it maps. Huge vmap mappings will
+ * return the tail page that corresponds to the base page address, which
+ * matches small vmap mappings.
  */
 struct page *vmalloc_to_page(const void *vmalloc_addr)
 {
@@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
 
if (pgd_none(*pgd))
return NULL;
+   if (WARN_ON_ONCE(pgd_leaf(*pgd)))
+   return NULL; /* XXX: no allowance for huge pgd */
+   if (WARN_ON_ONCE(pgd_bad(*pgd)))
+   return NULL;
+
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
return NULL;
-   pud = pud_offset(p4d, addr);
+   if (p4d_leaf(*p4d))
+   return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
+   if (WARN_ON_ONCE(p4d_bad(*p4d)))
+   return NULL;
 
-   /*
-* Don't dereference bad PUD or PMD (below) entries. This will also
-* identify huge mappings, which we may encounter on architectures
-* that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
-* identified as vmalloc addresses by is_vmalloc_addr(), but are
-* not [unambiguously] associated with a struct page, so there is
-* no correct value to return for them.
-*/
-   WARN_ON_ONCE(pud_bad(*pud));
-   if (pud_none(*pud) || pud_bad(*pud))
+   pud = pud_offset(p4d, addr);
+   if (pud_none(*pud))
+   return NULL;
+   if (pud_leaf(*pud))
+   return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+   if (WARN_ON_ONCE(pud_bad(*pud)))
return NULL;
+
pmd = pmd_offset(pud, addr);
-   WARN_ON_ONCE(pmd_bad(*pmd));
-   if (pmd_none(*pmd) || pmd_bad(*pmd))
+   if (pmd_none(*pmd))
+   return NULL;
+   if (pmd_leaf(*pmd))
+   return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+   if (WARN_ON_ONCE(pmd_bad(*pmd)))
return NULL;
 
ptep = pte_offset_map(pmd, addr);
@@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
+
return page;
 }
 EXPORT_SYMBOL(vmalloc_to_page);
-- 
2.23.0