Re: [PATCH v2 5/6] mm/mm_init.c: remove unneeded calc_memmap_size()

2024-03-27 Thread Baoquan He
On 03/27/24 at 06:21pm, Mike Rapoport wrote:
> On Mon, Mar 25, 2024 at 10:56:45PM +0800, Baoquan He wrote:
> > Nobody calls calc_memmap_size() now.
> > 
> > Signed-off-by: Baoquan He 
> 
> Reviewed-by: Mike Rapoport (IBM) 
> 
> Looks like I replied to patch 6/6 twice by mistake and missed this one.

Thanks for your careful reviewing.

> 
> > ---
> >  mm/mm_init.c | 20 
> >  1 file changed, 20 deletions(-)
> > 
> > diff --git a/mm/mm_init.c b/mm/mm_init.c
> > index 7f71e56e83f3..e269a724f70e 100644
> > --- a/mm/mm_init.c
> > +++ b/mm/mm_init.c
> > @@ -1331,26 +1331,6 @@ static void __init calculate_node_totalpages(struct 
> > pglist_data *pgdat,
> > pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, 
> > realtotalpages);
> >  }
> >  
> > -static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
> > -   unsigned long present_pages)
> > -{
> > -   unsigned long pages = spanned_pages;
> > -
> > -   /*
> > -* Provide a more accurate estimation if there are holes within
> > -* the zone and SPARSEMEM is in use. If there are holes within the
> > -* zone, each populated memory region may cost us one or two extra
> > -* memmap pages due to alignment because memmap pages for each
> > -* populated regions may not be naturally aligned on page boundary.
> > -* So the (present_pages >> 4) heuristic is a tradeoff for that.
> > -*/
> > -   if (spanned_pages > present_pages + (present_pages >> 4) &&
> > -   IS_ENABLED(CONFIG_SPARSEMEM))
> > -   pages = present_pages;
> > -
> > -   return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
> > -}
> > -
> >  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> >  static void pgdat_init_split_queue(struct pglist_data *pgdat)
> >  {
> > -- 
> > 2.41.0
> > 
> 
> -- 
> Sincerely yours,
> Mike.
> 



Re: [PATCH v2 5/6] mm/mm_init.c: remove unneeded calc_memmap_size()

2024-03-27 Thread Mike Rapoport
On Mon, Mar 25, 2024 at 10:56:45PM +0800, Baoquan He wrote:
> Nobody calls calc_memmap_size() now.
> 
> Signed-off-by: Baoquan He 

Reviewed-by: Mike Rapoport (IBM) 

Looks like I replied to patch 6/6 twice by mistake and missed this one.

> ---
>  mm/mm_init.c | 20 
>  1 file changed, 20 deletions(-)
> 
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 7f71e56e83f3..e269a724f70e 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1331,26 +1331,6 @@ static void __init calculate_node_totalpages(struct 
> pglist_data *pgdat,
>   pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, 
> realtotalpages);
>  }
>  
> -static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
> - unsigned long present_pages)
> -{
> - unsigned long pages = spanned_pages;
> -
> - /*
> -  * Provide a more accurate estimation if there are holes within
> -  * the zone and SPARSEMEM is in use. If there are holes within the
> -  * zone, each populated memory region may cost us one or two extra
> -  * memmap pages due to alignment because memmap pages for each
> -  * populated regions may not be naturally aligned on page boundary.
> -  * So the (present_pages >> 4) heuristic is a tradeoff for that.
> -  */
> - if (spanned_pages > present_pages + (present_pages >> 4) &&
> - IS_ENABLED(CONFIG_SPARSEMEM))
> - pages = present_pages;
> -
> - return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
> -}
> -
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  static void pgdat_init_split_queue(struct pglist_data *pgdat)
>  {
> -- 
> 2.41.0
> 

-- 
Sincerely yours,
Mike.


[PATCH v2 5/6] mm/mm_init.c: remove unneeded calc_memmap_size()

2024-03-25 Thread Baoquan He
Nobody calls calc_memmap_size() now.

Signed-off-by: Baoquan He 
---
 mm/mm_init.c | 20 
 1 file changed, 20 deletions(-)

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7f71e56e83f3..e269a724f70e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1331,26 +1331,6 @@ static void __init calculate_node_totalpages(struct 
pglist_data *pgdat,
pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, 
realtotalpages);
 }
 
-static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
-   unsigned long present_pages)
-{
-   unsigned long pages = spanned_pages;
-
-   /*
-* Provide a more accurate estimation if there are holes within
-* the zone and SPARSEMEM is in use. If there are holes within the
-* zone, each populated memory region may cost us one or two extra
-* memmap pages due to alignment because memmap pages for each
-* populated regions may not be naturally aligned on page boundary.
-* So the (present_pages >> 4) heuristic is a tradeoff for that.
-*/
-   if (spanned_pages > present_pages + (present_pages >> 4) &&
-   IS_ENABLED(CONFIG_SPARSEMEM))
-   pages = present_pages;
-
-   return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
-}
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {
-- 
2.41.0