On Wed, Feb 11, 2026 at 08:51:08PM -0800, JP Kobryn wrote:
> It would be useful to see a breakdown of allocations to understand which
> NUMA policies are driving them. For example, when investigating memory
> pressure, having policy-specific counts could show that allocations were
> bound to the affected node (via MPOL_BIND).
> 
> Add per-policy page allocation counters as new node stat items. These
> counters can provide correlation between a mempolicy and pressure on a
> given node.
> 
> Signed-off-by: JP Kobryn <[email protected]>
> Suggested-by: Johannes Weiner <[email protected]>

[...]

>  int mempolicy_set_node_perf(unsigned int node, struct access_coordinate 
> *coords)
>  {
>       struct weighted_interleave_state *new_wi_state, *old_wi_state = NULL;
> @@ -2446,8 +2461,14 @@ static struct page *alloc_pages_mpol(gfp_t gfp, 
> unsigned int order,
>  
>       nodemask = policy_nodemask(gfp, pol, ilx, &nid);
>  
> -     if (pol->mode == MPOL_PREFERRED_MANY)
> -             return alloc_pages_preferred_many(gfp, order, nid, nodemask);
> +     if (pol->mode == MPOL_PREFERRED_MANY) {
> +             page = alloc_pages_preferred_many(gfp, order, nid, nodemask);
> +             if (page)
> +                     __mod_node_page_state(page_pgdat(page),
> +                                     mpol_node_stat(MPOL_PREFERRED_MANY), 1 
> << order);

Here and two places below, please use mod_node_page_state() instead of
__mod_node_page_state() as __foo() requires preempt disable or if the
given stat can be updated in IRQ, then IRQ disable. This code path does
not do either of that.

> +
> +             return page;
> +     }
>  
>       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
>           /* filter "hugepage" allocation, unless from alloc_pages() */
> @@ -2472,6 +2493,9 @@ static struct page *alloc_pages_mpol(gfp_t gfp, 
> unsigned int order,
>                       page = __alloc_frozen_pages_noprof(
>                               gfp | __GFP_THISNODE | __GFP_NORETRY, order,
>                               nid, NULL);
> +                     if (page)
> +                             __mod_node_page_state(page_pgdat(page),
> +                                             mpol_node_stat(pol->mode), 1 << 
> order);
>                       if (page || !(gfp & __GFP_DIRECT_RECLAIM))
>                               return page;
>                       /*
> @@ -2484,6 +2508,8 @@ static struct page *alloc_pages_mpol(gfp_t gfp, 
> unsigned int order,
>       }
>  
>       page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask);
> +     if (page)
> +             __mod_node_page_state(page_pgdat(page), 
> mpol_node_stat(pol->mode), 1 << order);
>  

Reply via email to