Re: [PATCH] powerpc/mm: Handle mmap_min_addr correctly in get_unmapped_area callback

2019-02-22 Thread Aneesh Kumar K.V
Michael Ellerman  writes:

> "Aneesh Kumar K.V"  writes:
>
>> After we ALIGN up the address we need to make sure we didn't overflow
>> and resulted in zero address. In that case, we need to make sure that
>> the returned address is greater than mmap_min_addr.
>>
>> Also when doing top-down search the low_limit is not PAGE_SIZE but rather
>> max(PAGE_SIZE, mmap_min_addr). This handle cases in which mmap_min_addr >
>> PAGE_SIZE.
>>
>> This fixes selftest va_128TBswitch --run-hugetlb reporting failures when
>> run as non root user for
>>
>> mmap(-1, MAP_HUGETLB)
>> mmap(-1, MAP_HUGETLB)
>>
>> We also avoid the first mmap(-1, MAP_HUGETLB) returning NULL address as mmap 
>> address
>> with this change
>
> So we think this is not a security issue, because it only affects
> whether we choose an address below mmap_min_addr, not whether we
> actually allow that address to be mapped.
>
> ie. there are existing capability checks to prevent a user mapping below
> mmap_min_addr and those will still be honoured even without this fix.
>
> However there is a bug in that a non-root user requesting address -1
> will be given address 0 which will then fail, whereas they should have
> been given something else that would have succeeded.
>
> Did I get that all right?

Correct

>> CC: Laurent Dufour 
>> Signed-off-by: Aneesh Kumar K.V 
>
> Seems like this should have a Fixes: tag?

I guess the hugetlb is 

Fixes: 484837601d4d ("powerpc/mm: Add radix support for hugetlb")

The slice related fix is possibly

Fixes: fba2369e6ceb ("mm: use vm_unmapped_area() on powerpc architecture")

This introduced info.low_limit which we are fixing in the patch. But
a similar logic was present even before via.

Fixes: d0f13e3c20b6 ("[POWERPC] Introduce address space "slices"")

-aneesh



Re: [PATCH] powerpc/mm: Handle mmap_min_addr correctly in get_unmapped_area callback

2019-02-19 Thread Michael Ellerman
"Aneesh Kumar K.V"  writes:

> After we ALIGN up the address we need to make sure we didn't overflow
> and resulted in zero address. In that case, we need to make sure that
> the returned address is greater than mmap_min_addr.
>
> Also when doing top-down search the low_limit is not PAGE_SIZE but rather
> max(PAGE_SIZE, mmap_min_addr). This handle cases in which mmap_min_addr >
> PAGE_SIZE.
>
> This fixes selftest va_128TBswitch --run-hugetlb reporting failures when
> run as non root user for
>
> mmap(-1, MAP_HUGETLB)
> mmap(-1, MAP_HUGETLB)
>
> We also avoid the first mmap(-1, MAP_HUGETLB) returning NULL address as mmap 
> address
> with this change

So we think this is not a security issue, because it only affects
whether we choose an address below mmap_min_addr, not whether we
actually allow that address to be mapped.

ie. there are existing capability checks to prevent a user mapping below
mmap_min_addr and those will still be honoured even without this fix.

However there is a bug in that a non-root user requesting address -1
will be given address 0 which will then fail, whereas they should have
been given something else that would have succeeded.

Did I get that all right?

> CC: Laurent Dufour 
> Signed-off-by: Aneesh Kumar K.V 

Seems like this should have a Fixes: tag?

cheers

> ---
>  arch/powerpc/mm/hugetlbpage-radix.c |  5 +++--
>  arch/powerpc/mm/slice.c | 10 ++
>  2 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/arch/powerpc/mm/hugetlbpage-radix.c 
> b/arch/powerpc/mm/hugetlbpage-radix.c
> index 2486bee0f93e..97c7a39ebc00 100644
> --- a/arch/powerpc/mm/hugetlbpage-radix.c
> +++ b/arch/powerpc/mm/hugetlbpage-radix.c
> @@ -1,6 +1,7 @@
>  // SPDX-License-Identifier: GPL-2.0
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, 
> unsigned long addr,
>   if (addr) {
>   addr = ALIGN(addr, huge_page_size(h));
>   vma = find_vma(mm, addr);
> - if (high_limit - len >= addr &&
> + if (high_limit - len >= addr && addr >= mmap_min_addr &&
>   (!vma || addr + len <= vm_start_gap(vma)))
>   return addr;
>   }
> @@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, 
> unsigned long addr,
>*/
>   info.flags = VM_UNMAPPED_AREA_TOPDOWN;
>   info.length = len;
> - info.low_limit = PAGE_SIZE;
> + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
>   info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
>   info.align_mask = PAGE_MASK & ~huge_page_mask(h);
>   info.align_offset = 0;
> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index 06898c13901d..aec91dbcdc0b 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -32,6 +32,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -377,6 +378,7 @@ static unsigned long slice_find_area_topdown(struct 
> mm_struct *mm,
>   int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
>   unsigned long addr, found, prev;
>   struct vm_unmapped_area_info info;
> + unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
>  
>   info.flags = VM_UNMAPPED_AREA_TOPDOWN;
>   info.length = len;
> @@ -393,7 +395,7 @@ static unsigned long slice_find_area_topdown(struct 
> mm_struct *mm,
>   if (high_limit > DEFAULT_MAP_WINDOW)
>   addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
>  
> - while (addr > PAGE_SIZE) {
> + while (addr > min_addr) {
>   info.high_limit = addr;
>   if (!slice_scan_available(addr - 1, available, 0, ))
>   continue;
> @@ -405,8 +407,8 @@ static unsigned long slice_find_area_topdown(struct 
> mm_struct *mm,
>* Check if we need to reduce the range, or if we can
>* extend it to cover the previous available slice.
>*/
> - if (addr < PAGE_SIZE)
> - addr = PAGE_SIZE;
> + if (addr < min_addr)
> + addr = min_addr;
>   else if (slice_scan_available(addr - 1, available, 0, )) {
>   addr = prev;
>   goto prev_slice;
> @@ -528,7 +530,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, 
> unsigned long len,
>   addr = _ALIGN_UP(addr, page_size);
>   slice_dbg(" aligned addr=%lx\n", addr);
>   /* Ignore hint if it's too large or overlaps a VMA */
> - if (addr > high_limit - len ||
> + if (addr > high_limit - len || addr < mmap_min_addr ||
>   !slice_area_is_free(mm, addr, len))
>   addr = 0;
>   }
> -- 
> 2.20.1


Re: [PATCH] powerpc/mm: Handle mmap_min_addr correctly in get_unmapped_area callback

2019-02-15 Thread Laurent Dufour

Le 15/02/2019 à 09:16, Aneesh Kumar K.V a écrit :

After we ALIGN up the address we need to make sure we didn't overflow
and resulted in zero address. In that case, we need to make sure that
the returned address is greater than mmap_min_addr.

Also when doing top-down search the low_limit is not PAGE_SIZE but rather
max(PAGE_SIZE, mmap_min_addr). This handle cases in which mmap_min_addr >
PAGE_SIZE.

This fixes selftest va_128TBswitch --run-hugetlb reporting failures when
run as non root user for

mmap(-1, MAP_HUGETLB)
mmap(-1, MAP_HUGETLB)

We also avoid the first mmap(-1, MAP_HUGETLB) returning NULL address as mmap 
address
with this change


FWIW:
Reviewed-by: Laurent Dufour 


CC: Laurent Dufour 
Signed-off-by: Aneesh Kumar K.V 
---
  arch/powerpc/mm/hugetlbpage-radix.c |  5 +++--
  arch/powerpc/mm/slice.c | 10 ++
  2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/hugetlbpage-radix.c 
b/arch/powerpc/mm/hugetlbpage-radix.c
index 2486bee0f93e..97c7a39ebc00 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -1,6 +1,7 @@
  // SPDX-License-Identifier: GPL-2.0
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned 
long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
-   if (high_limit - len >= addr &&
+   if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned 
long addr,
 */
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
-   info.low_limit = PAGE_SIZE;
+   info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 06898c13901d..aec91dbcdc0b 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -32,6 +32,7 @@
  #include 
  #include 
  #include 
+#include 
  #include 
  #include 
  #include 
@@ -377,6 +378,7 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
struct vm_unmapped_area_info info;
+   unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
@@ -393,7 +395,7 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
if (high_limit > DEFAULT_MAP_WINDOW)
addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;

-   while (addr > PAGE_SIZE) {
+   while (addr > min_addr) {
info.high_limit = addr;
if (!slice_scan_available(addr - 1, available, 0, ))
continue;
@@ -405,8 +407,8 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
 * Check if we need to reduce the range, or if we can
 * extend it to cover the previous available slice.
 */
-   if (addr < PAGE_SIZE)
-   addr = PAGE_SIZE;
+   if (addr < min_addr)
+   addr = min_addr;
else if (slice_scan_available(addr - 1, available, 0, )) {
addr = prev;
goto prev_slice;
@@ -528,7 +530,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, 
unsigned long len,
addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
-   if (addr > high_limit - len ||
+   if (addr > high_limit - len || addr < mmap_min_addr ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}





[PATCH] powerpc/mm: Handle mmap_min_addr correctly in get_unmapped_area callback

2019-02-15 Thread Aneesh Kumar K.V
After we ALIGN up the address we need to make sure we didn't overflow
and resulted in zero address. In that case, we need to make sure that
the returned address is greater than mmap_min_addr.

Also when doing top-down search the low_limit is not PAGE_SIZE but rather
max(PAGE_SIZE, mmap_min_addr). This handle cases in which mmap_min_addr >
PAGE_SIZE.

This fixes selftest va_128TBswitch --run-hugetlb reporting failures when
run as non root user for

mmap(-1, MAP_HUGETLB)
mmap(-1, MAP_HUGETLB)

We also avoid the first mmap(-1, MAP_HUGETLB) returning NULL address as mmap 
address
with this change

CC: Laurent Dufour 
Signed-off-by: Aneesh Kumar K.V 
---
 arch/powerpc/mm/hugetlbpage-radix.c |  5 +++--
 arch/powerpc/mm/slice.c | 10 ++
 2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/hugetlbpage-radix.c 
b/arch/powerpc/mm/hugetlbpage-radix.c
index 2486bee0f93e..97c7a39ebc00 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned 
long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
-   if (high_limit - len >= addr &&
+   if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned 
long addr,
 */
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
-   info.low_limit = PAGE_SIZE;
+   info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 06898c13901d..aec91dbcdc0b 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -32,6 +32,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -377,6 +378,7 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
struct vm_unmapped_area_info info;
+   unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
 
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
@@ -393,7 +395,7 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
if (high_limit > DEFAULT_MAP_WINDOW)
addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
 
-   while (addr > PAGE_SIZE) {
+   while (addr > min_addr) {
info.high_limit = addr;
if (!slice_scan_available(addr - 1, available, 0, ))
continue;
@@ -405,8 +407,8 @@ static unsigned long slice_find_area_topdown(struct 
mm_struct *mm,
 * Check if we need to reduce the range, or if we can
 * extend it to cover the previous available slice.
 */
-   if (addr < PAGE_SIZE)
-   addr = PAGE_SIZE;
+   if (addr < min_addr)
+   addr = min_addr;
else if (slice_scan_available(addr - 1, available, 0, )) {
addr = prev;
goto prev_slice;
@@ -528,7 +530,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, 
unsigned long len,
addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
-   if (addr > high_limit - len ||
+   if (addr > high_limit - len || addr < mmap_min_addr ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}
-- 
2.20.1