The nodemasks in these structures may come from a variety of sources, including tasks and cpusets - and should never be modified by any code when being passed around inside another context.
Signed-off-by: Gregory Price <[email protected]> --- include/linux/cpuset.h | 4 ++-- include/linux/mm.h | 4 ++-- include/linux/mmzone.h | 6 +++--- include/linux/oom.h | 2 +- include/linux/swap.h | 2 +- kernel/cgroup/cpuset.c | 2 +- mm/internal.h | 2 +- mm/mmzone.c | 5 +++-- mm/page_alloc.c | 4 ++-- mm/show_mem.c | 9 ++++++--- mm/vmscan.c | 6 +++--- 11 files changed, 25 insertions(+), 21 deletions(-) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2ddb256187b5..548eaf7ef8d0 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -80,7 +80,7 @@ extern bool cpuset_cpu_is_isolated(int cpu); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); -int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); +int cpuset_nodemask_valid_mems_allowed(const nodemask_t *nodemask); extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask); @@ -219,7 +219,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) #define cpuset_current_mems_allowed (node_states[N_MEMORY]) static inline void cpuset_init_current_mems_allowed(void) {} -static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) +static inline int cpuset_nodemask_valid_mems_allowed(const nodemask_t *nodemask) { return 1; } diff --git a/include/linux/mm.h b/include/linux/mm.h index d16b33bacc32..1a874917eae6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3343,7 +3343,7 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); extern void mem_init(void); extern void __init mmap_init(void); -extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); +extern void __show_mem(unsigned int flags, const nodemask_t *nodemask, int max_zone_idx); static inline void show_mem(void) { __show_mem(0, NULL, MAX_NR_ZONES - 1); @@ -3353,7 +3353,7 @@ extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); extern __printf(3, 4) -void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); +void warn_alloc(gfp_t gfp_mask, const nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7fb7331c5725..5c96b2c52817 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1725,7 +1725,7 @@ static inline int zonelist_node_idx(const struct zoneref *zoneref) struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes); + const nodemask_t *nodes); /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point @@ -1744,7 +1744,7 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, */ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes) + const nodemask_t *nodes) { if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) return z; @@ -1770,7 +1770,7 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, - nodemask_t *nodes) + const nodemask_t *nodes) { return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes); diff --git a/include/linux/oom.h b/include/linux/oom.h index 7b02bc1d0a7e..00da05d227e6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -30,7 +30,7 @@ struct oom_control { struct zonelist *zonelist; /* Used to determine mempolicy */ - nodemask_t *nodemask; + const nodemask_t *nodemask; /* Memory cgroup in which oom is invoked, or NULL for global oom */ struct mem_cgroup *memcg; diff --git a/include/linux/swap.h b/include/linux/swap.h index e818fbade1e2..f5154499bafd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -381,7 +381,7 @@ extern void swap_setup(void); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, - gfp_t gfp_mask, nodemask_t *mask); + gfp_t gfp_mask, const nodemask_t *mask); #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 52468d2c178a..cd3e2ae83d70 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -4238,7 +4238,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) * * Are any of the nodes in the nodemask allowed in current->mems_allowed? */ -int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) +int cpuset_nodemask_valid_mems_allowed(const nodemask_t *nodemask) { return nodes_intersects(*nodemask, current->mems_allowed); } diff --git a/mm/internal.h b/mm/internal.h index 1561fc2ff5b8..464e60dd7ba1 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -587,7 +587,7 @@ void page_alloc_sysctl_init(void); */ struct alloc_context { struct zonelist *zonelist; - nodemask_t *nodemask; + const nodemask_t *nodemask; struct zoneref *preferred_zoneref; int migratetype; diff --git a/mm/mmzone.c b/mm/mmzone.c index 0c8f181d9d50..59dc3f2076a6 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -43,7 +43,8 @@ struct zone *next_zone(struct zone *zone) return zone; } -static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) +static inline int zref_in_nodemask(struct zoneref *zref, + const nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); @@ -55,7 +56,7 @@ static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, - nodemask_t *nodes) + const nodemask_t *nodes) { /* * Find the next suitable zone to use for the allocation. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 600d9e981c23..fd5401fb5e00 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3924,7 +3924,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, return NULL; } -static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) +static void warn_alloc_show_mem(gfp_t gfp_mask, const nodemask_t *nodemask) { unsigned int filter = SHOW_MEM_FILTER_NODES; @@ -3943,7 +3943,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) __show_mem(filter, nodemask, gfp_zone(gfp_mask)); } -void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) +void warn_alloc(gfp_t gfp_mask, const nodemask_t *nodemask, const char *fmt, ...) { struct va_format vaf; va_list args; diff --git a/mm/show_mem.c b/mm/show_mem.c index 3a4b5207635d..24685b5c6dcf 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -116,7 +116,8 @@ void si_meminfo_node(struct sysinfo *val, int nid) * Determine whether the node should be displayed or not, depending on whether * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). */ -static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) +static bool show_mem_node_skip(unsigned int flags, int nid, + const nodemask_t *nodemask) { if (!(flags & SHOW_MEM_FILTER_NODES)) return false; @@ -177,7 +178,8 @@ static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's * cpuset. */ -static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +static void show_free_areas(unsigned int filter, const nodemask_t *nodemask, + int max_zone_idx) { unsigned long free_pcp = 0; int cpu, nid; @@ -399,7 +401,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z show_swap_cache_info(); } -void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +void __show_mem(unsigned int filter, const nodemask_t *nodemask, + int max_zone_idx) { unsigned long total = 0, reserved = 0, highmem = 0; struct zone *zone; diff --git a/mm/vmscan.c b/mm/vmscan.c index b2fc8b626d3d..03e7f5206ad9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -80,7 +80,7 @@ struct scan_control { * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ - nodemask_t *nodemask; + const nodemask_t *nodemask; /* * The memory cgroup that hit its limit and as a result is the @@ -6530,7 +6530,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) * happens, the page allocator should not consider triggering the OOM killer. */ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, - nodemask_t *nodemask) + const nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; @@ -6610,7 +6610,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, - gfp_t gfp_mask, nodemask_t *nodemask) + gfp_t gfp_mask, const nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { -- 2.51.1

