Re: [PATCH] mm/hugetlb: Move page order check inside hugetlb_cma_reserve()

2024-02-12 Thread David Hildenbrand

On 09.02.24 06:42, Anshuman Khandual wrote:

All platforms could benefit from page order check against MAX_PAGE_ORDER
before allocating a CMA area for gigantic hugetlb pages. Let's move this
check from individual platforms to generic hugetlb.

Cc: Catalin Marinas 
Cc: Will Deacon 
Cc: Michael Ellerman 
Cc: Nicholas Piggin 
Cc: linux-arm-ker...@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux...@kvack.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
---
This applies on v6.8-rc3
  
  arch/arm64/mm/hugetlbpage.c   | 7 ---

  arch/powerpc/mm/hugetlbpage.c | 4 +---
  mm/hugetlb.c  | 7 +++
  3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..6720ec8d50e7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
  
-	/*

-* HugeTLB CMA reservation is required for gigantic
-* huge pages which could not be allocated via the
-* page allocator. Just warn if there is any change
-* breaking this assumption.
-*/
-   WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
  }
  #endif /* CONFIG_CMA */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab6..16557d008eef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
 */
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
  
-	if (order) {

-   VM_WARN_ON(order <= MAX_PAGE_ORDER);
+   if (order)
hugetlb_cma_reserve(order);
-   }
  }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf9c9b2906ea..345b3524df35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
bool node_specific_cma_alloc = false;
int nid;
  
+	/*

+* HugeTLB CMA reservation is required for gigantic
+* huge pages which could not be allocated via the
+* page allocator. Just warn if there is any change
+* breaking this assumption.
+*/
+   VM_WARN_ON(order <= MAX_PAGE_ORDER);
cma_reserve_called = true;
  
  	if (!hugetlb_cma_size)


Reviewed-by: David Hildenbrand 

--
Cheers,

David / dhildenb



Re: [PATCH] mm/hugetlb: Move page order check inside hugetlb_cma_reserve()

2024-02-09 Thread Jane Chu

On 2/8/2024 9:42 PM, Anshuman Khandual wrote:


All platforms could benefit from page order check against MAX_PAGE_ORDER
before allocating a CMA area for gigantic hugetlb pages. Let's move this
check from individual platforms to generic hugetlb.

Cc: Catalin Marinas 
Cc: Will Deacon 
Cc: Michael Ellerman 
Cc: Nicholas Piggin 
Cc: linux-arm-ker...@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux...@kvack.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
---
This applies on v6.8-rc3
  
  arch/arm64/mm/hugetlbpage.c   | 7 ---

  arch/powerpc/mm/hugetlbpage.c | 4 +---
  mm/hugetlb.c  | 7 +++
  3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..6720ec8d50e7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
  
-	/*

-* HugeTLB CMA reservation is required for gigantic
-* huge pages which could not be allocated via the
-* page allocator. Just warn if there is any change
-* breaking this assumption.
-*/
-   WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
  }
  #endif /* CONFIG_CMA */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab6..16557d008eef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
 */
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
  
-	if (order) {

-   VM_WARN_ON(order <= MAX_PAGE_ORDER);
+   if (order)
hugetlb_cma_reserve(order);
-   }
  }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf9c9b2906ea..345b3524df35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
bool node_specific_cma_alloc = false;
int nid;
  
+	/*

+* HugeTLB CMA reservation is required for gigantic
+* huge pages which could not be allocated via the
+* page allocator. Just warn if there is any change
+* breaking this assumption.
+*/
+   VM_WARN_ON(order <= MAX_PAGE_ORDER);
cma_reserve_called = true;
  
  	if (!hugetlb_cma_size)


Looks straight forward to me.

Reviewed-by: Jane Chu 




[PATCH] mm/hugetlb: Move page order check inside hugetlb_cma_reserve()

2024-02-08 Thread Anshuman Khandual
All platforms could benefit from page order check against MAX_PAGE_ORDER
before allocating a CMA area for gigantic hugetlb pages. Let's move this
check from individual platforms to generic hugetlb.

Cc: Catalin Marinas 
Cc: Will Deacon 
Cc: Michael Ellerman 
Cc: Nicholas Piggin 
Cc: linux-arm-ker...@lists.infradead.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux...@kvack.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
---
This applies on v6.8-rc3
 
 arch/arm64/mm/hugetlbpage.c   | 7 ---
 arch/powerpc/mm/hugetlbpage.c | 4 +---
 mm/hugetlb.c  | 7 +++
 3 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 8116ac599f80..6720ec8d50e7 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
 
-   /*
-* HugeTLB CMA reservation is required for gigantic
-* huge pages which could not be allocated via the
-* page allocator. Just warn if there is any change
-* breaking this assumption.
-*/
-   WARN_ON(order <= MAX_PAGE_ORDER);
hugetlb_cma_reserve(order);
 }
 #endif /* CONFIG_CMA */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab6..16557d008eef 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
 */
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
 
-   if (order) {
-   VM_WARN_ON(order <= MAX_PAGE_ORDER);
+   if (order)
hugetlb_cma_reserve(order);
-   }
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf9c9b2906ea..345b3524df35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7699,6 +7699,13 @@ void __init hugetlb_cma_reserve(int order)
bool node_specific_cma_alloc = false;
int nid;
 
+   /*
+* HugeTLB CMA reservation is required for gigantic
+* huge pages which could not be allocated via the
+* page allocator. Just warn if there is any change
+* breaking this assumption.
+*/
+   VM_WARN_ON(order <= MAX_PAGE_ORDER);
cma_reserve_called = true;
 
if (!hugetlb_cma_size)
-- 
2.25.1