Add routines to support allocation of large order zone device folios and helper functions for zone device folios, to check if a folio is device private and helpers for setting zone device data.
When large folios are used, the existing page_free() callback in pgmap is called when the folio is freed, this is true for both PAGE_SIZE and higher order pages. Zone device private large folios do not support deferred split and scan like normal THP folios. Cc: Andrew Morton <a...@linux-foundation.org> Cc: David Hildenbrand <da...@redhat.com> Cc: Zi Yan <z...@nvidia.com> Cc: Joshua Hahn <joshua.hah...@gmail.com> Cc: Rakie Kim <rakie....@sk.com> Cc: Byungchul Park <byungc...@sk.com> Cc: Gregory Price <gou...@gourry.net> Cc: Ying Huang <ying.hu...@linux.alibaba.com> Cc: Alistair Popple <apop...@nvidia.com> Cc: Oscar Salvador <osalva...@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoa...@oracle.com> Cc: Baolin Wang <baolin.w...@linux.alibaba.com> Cc: "Liam R. Howlett" <liam.howl...@oracle.com> Cc: Nico Pache <npa...@redhat.com> Cc: Ryan Roberts <ryan.robe...@arm.com> Cc: Dev Jain <dev.j...@arm.com> Cc: Barry Song <bao...@kernel.org> Cc: Lyude Paul <ly...@redhat.com> Cc: Danilo Krummrich <d...@kernel.org> Cc: David Airlie <airl...@gmail.com> Cc: Simona Vetter <sim...@ffwll.ch> Cc: Ralph Campbell <rcampb...@nvidia.com> Cc: Mika Penttilä <mpent...@redhat.com> Cc: Matthew Brost <matthew.br...@intel.com> Cc: Francois Dugast <francois.dug...@intel.com> Signed-off-by: Balbir Singh <balb...@nvidia.com> --- include/linux/memremap.h | 10 +++++++++- mm/memremap.c | 38 +++++++++++++++++++++++++------------- mm/rmap.c | 6 +++++- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 4aa151914eab..a0723b35eeaa 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -199,7 +199,7 @@ static inline bool folio_is_fsdax(const struct folio *folio) } #ifdef CONFIG_ZONE_DEVICE -void zone_device_page_init(struct page *page); +void zone_device_folio_init(struct folio *folio, unsigned int order); void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); @@ -209,6 +209,14 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long memremap_compat_align(void); + +static inline void zone_device_page_init(struct page *page) +{ + struct folio *folio = page_folio(page); + + zone_device_folio_init(folio, 0); +} + #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) diff --git a/mm/memremap.c b/mm/memremap.c index b0ce0d8254bd..13e87dd743ad 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -427,20 +427,19 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); void free_zone_device_folio(struct folio *folio) { struct dev_pagemap *pgmap = folio->pgmap; + unsigned long nr = folio_nr_pages(folio); + int i; if (WARN_ON_ONCE(!pgmap)) return; mem_cgroup_uncharge(folio); - /* - * Note: we don't expect anonymous compound pages yet. Once supported - * and we could PTE-map them similar to THP, we'd have to clear - * PG_anon_exclusive on all tail pages. - */ if (folio_test_anon(folio)) { - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - __ClearPageAnonExclusive(folio_page(folio, 0)); + for (i = 0; i < nr; i++) + __ClearPageAnonExclusive(folio_page(folio, i)); + } else { + VM_WARN_ON_ONCE(folio_test_large(folio)); } /* @@ -464,11 +463,15 @@ void free_zone_device_folio(struct folio *folio) switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: + percpu_ref_put_many(&folio->pgmap->ref, nr); + pgmap->ops->page_free(&folio->page); + folio->page.mapping = NULL; + break; case MEMORY_DEVICE_COHERENT: if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) break; - pgmap->ops->page_free(folio_page(folio, 0)); - put_dev_pagemap(pgmap); + pgmap->ops->page_free(&folio->page); + percpu_ref_put(&folio->pgmap->ref); break; case MEMORY_DEVICE_GENERIC: @@ -491,14 +494,23 @@ void free_zone_device_folio(struct folio *folio) } } -void zone_device_page_init(struct page *page) +void zone_device_folio_init(struct folio *folio, unsigned int order) { + struct page *page = folio_page(folio, 0); + + VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES); + /* * Drivers shouldn't be allocating pages after calling * memunmap_pages(). */ - WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref)); - set_page_count(page, 1); + WARN_ON_ONCE(!percpu_ref_tryget_many(&page_pgmap(page)->ref, 1 << order)); + folio_set_count(folio, 1); lock_page(page); + + if (order > 1) { + prep_compound_page(page, order); + folio_set_large_rmappable(folio); + } } -EXPORT_SYMBOL_GPL(zone_device_page_init); +EXPORT_SYMBOL_GPL(zone_device_folio_init); diff --git a/mm/rmap.c b/mm/rmap.c index 568198e9efc2..b5837075b6e0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1769,9 +1769,13 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, * the folio is unmapped and at least one page is still mapped. * * Check partially_mapped first to ensure it is a large folio. + * + * Device private folios do not support deferred splitting and + * shrinker based scanning of the folios to free. */ if (partially_mapped && folio_test_anon(folio) && - !folio_test_partially_mapped(folio)) + !folio_test_partially_mapped(folio) && + !folio_is_device_private(folio)) deferred_split_folio(folio, true); __folio_mod_stat(folio, -nr, -nr_pmdmapped); -- 2.50.1