A later commit will add support for live updating a memfd backed by HugeTLB. It needs access to these internal functions to prepare the folios and properly queue them to the hstate and the file. Move them out to a separate hugetlb-internal header.
There does exist include/linux/hugetlb.h, but that contains higher level routines. It also prefixes the function names to make it clear they belong to hugetlb. These are low-level routines that do not need to be exposed to the public API, and renaming them to prefix with hugetlb is going to cause a lot of code churn. So create mm/hugetlb_internal.h that contains these definitions. Signed-off-by: Pratyush Yadav <[email protected]> --- MAINTAINERS | 1 + mm/hugetlb.c | 33 +++++++++------------------------ mm/hugetlb_internal.h | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 24 deletions(-) create mode 100644 mm/hugetlb_internal.h diff --git a/MAINTAINERS b/MAINTAINERS index 2722f98d0ed7..fc23a0381e19 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11540,6 +11540,7 @@ F: mm/hugetlb.c F: mm/hugetlb_cgroup.c F: mm/hugetlb_cma.c F: mm/hugetlb_cma.h +F: mm/hugetlb_internal.h F: mm/hugetlb_vmemmap.c F: mm/hugetlb_vmemmap.h F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0455119716ec..0f818086bf4f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -55,6 +55,8 @@ #include "hugetlb_cma.h" #include <linux/page-isolation.h> +#include "hugetlb_internal.h" + int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; @@ -733,9 +735,8 @@ static int allocate_file_region_entries(struct resv_map *resv, * fail; region_chg will always allocate at least 1 entry and a region_add for * 1 page will only require at most 1 entry. */ -static long region_add(struct resv_map *resv, long f, long t, - long in_regions_needed, struct hstate *h, - struct hugetlb_cgroup *h_cg) +long region_add(struct resv_map *resv, long f, long t, long in_regions_needed, + struct hstate *h, struct hugetlb_cgroup *h_cg) { long add = 0, actual_regions_needed = 0; @@ -800,8 +801,7 @@ static long region_add(struct resv_map *resv, long f, long t, * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */ -static long region_chg(struct resv_map *resv, long f, long t, - long *out_regions_needed) +long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) { long chg = 0; @@ -836,8 +836,7 @@ static long region_chg(struct resv_map *resv, long f, long t, * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */ -static void region_abort(struct resv_map *resv, long f, long t, - long regions_needed) +void region_abort(struct resv_map *resv, long f, long t, long regions_needed) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); @@ -1162,19 +1161,6 @@ void resv_map_release(struct kref *ref) kfree(resv_map); } -static inline struct resv_map *inode_resv_map(struct inode *inode) -{ - /* - * At inode evict time, i_mapping may not point to the original - * address space within the inode. This original address space - * contains the pointer to the resv_map. So, always use the - * address space embedded within the inode. - * The VERY common case is inode->mapping == &inode->i_data but, - * this may not be true for device special inodes. - */ - return (struct resv_map *)(&inode->i_data)->i_private_data; -} - static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); @@ -1887,14 +1873,14 @@ void free_huge_folio(struct folio *folio) /* * Must be called with the hugetlb lock held */ -static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio) +void account_new_hugetlb_folio(struct hstate *h, struct folio *folio) { lockdep_assert_held(&hugetlb_lock); h->nr_huge_pages++; h->nr_huge_pages_node[folio_nid(folio)]++; } -static void init_new_hugetlb_folio(struct folio *folio) +void init_new_hugetlb_folio(struct folio *folio) { __folio_set_hugetlb(folio); INIT_LIST_HEAD(&folio->lru); @@ -2006,8 +1992,7 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, return folio; } -static void prep_and_add_allocated_folios(struct hstate *h, - struct list_head *folio_list) +void prep_and_add_allocated_folios(struct hstate *h, struct list_head *folio_list) { unsigned long flags; struct folio *folio, *tmp_f; diff --git a/mm/hugetlb_internal.h b/mm/hugetlb_internal.h new file mode 100644 index 000000000000..edfb4eb75828 --- /dev/null +++ b/mm/hugetlb_internal.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2025 Pratyush Yadav <[email protected]> + */ +#ifndef __HUGETLB_INTERNAL_H +#define __HUGETLB_INTERNAL_H + +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/hugetlb_cgroup.h> +#include <linux/list.h> + +void init_new_hugetlb_folio(struct folio *folio); +void account_new_hugetlb_folio(struct hstate *h, struct folio *folio); + +long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed); +long region_add(struct resv_map *resv, long f, long t, long in_regions_needed, + struct hstate *h, struct hugetlb_cgroup *h_cg); +void region_abort(struct resv_map *resv, long f, long t, long regions_needed); +void prep_and_add_allocated_folios(struct hstate *h, struct list_head *folio_list); + +static inline struct resv_map *inode_resv_map(struct inode *inode) +{ + /* + * At inode evict time, i_mapping may not point to the original + * address space within the inode. This original address space + * contains the pointer to the resv_map. So, always use the + * address space embedded within the inode. + * The VERY common case is inode->mapping == &inode->i_data but, + * this may not be true for device special inodes. + */ + return (struct resv_map *)(&inode->i_data)->i_private_data; +} + +#endif /* __HUGETLB_INTERNAL_H */ -- 2.43.0
