This new helper helps ensure all accesses to zone_device_data use the correct API whether the page is part of a folio or not.
v2: - Move to drm_pagemap.h, stick to folio_zone_device_data (Matthew Brost) - Return struct drm_pagemap_zdd * (Matthew Brost) Suggested-by: Matthew Brost <[email protected]> Signed-off-by: Francois Dugast <[email protected]> --- drivers/gpu/drm/drm_gpusvm.c | 7 +++++-- drivers/gpu/drm/drm_pagemap.c | 21 ++++++++++++--------- include/drm/drm_pagemap.h | 15 +++++++++++++++ 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index aa9a0b60e727..585d913d3d19 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1488,12 +1488,15 @@ int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm, order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { + struct drm_pagemap_zdd *__zdd = + drm_pagemap_page_zone_device_data(page); + if (!ctx->allow_mixed && - zdd != page->zone_device_data && i > 0) { + zdd != __zdd && i > 0) { err = -EOPNOTSUPP; goto err_unmap; } - zdd = page->zone_device_data; + zdd = __zdd; if (pagemap != page_pgmap(page)) { if (i > 0) { err = -EOPNOTSUPP; diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index f63d72004a71..db3795f03aca 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -252,7 +252,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev, order = folio_order(folio); if (is_device_private_page(page)) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); struct drm_pagemap *dpagemap = zdd->dpagemap; struct drm_pagemap_addr addr; @@ -323,7 +323,7 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev, goto next; if (is_zone_device_page(page)) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); struct drm_pagemap *dpagemap = zdd->dpagemap; dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]); @@ -601,7 +601,8 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, pages[i] = NULL; if (src_page && is_device_private_page(src_page)) { - struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data; + struct drm_pagemap_zdd *src_zdd = + drm_pagemap_page_zone_device_data(src_page); if (page_pgmap(src_page) == pagemap && !mdetails->can_migrate_same_pagemap) { @@ -721,8 +722,8 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, goto next; if (fault_page) { - if (src_page->zone_device_data != - fault_page->zone_device_data) + if (drm_pagemap_page_zone_device_data(src_page) != + drm_pagemap_page_zone_device_data(fault_page)) goto next; } @@ -1063,7 +1064,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, void *buf; int i, err = 0; - zdd = page->zone_device_data; + zdd = drm_pagemap_page_zone_device_data(page); if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration)) return 0; @@ -1146,7 +1147,9 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, */ static void drm_pagemap_folio_free(struct folio *folio) { - drm_pagemap_zdd_put(folio->page.zone_device_data); + struct page *page = folio_page(folio, 0); + + drm_pagemap_zdd_put(drm_pagemap_page_zone_device_data(page)); } /** @@ -1162,7 +1165,7 @@ static void drm_pagemap_folio_free(struct folio *folio) */ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) { - struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(vmf->page); int err; err = __drm_pagemap_migrate_to_ram(vmf->vma, @@ -1228,7 +1231,7 @@ EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); */ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); return zdd->devmem_allocation->dpagemap; } diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index 46e9c58f09e0..736fb6cb7b33 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -4,6 +4,7 @@ #include <linux/dma-direction.h> #include <linux/hmm.h> +#include <linux/memremap.h> #include <linux/types.h> #define NR_PAGES(order) (1U << (order)) @@ -359,4 +360,18 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim); int drm_pagemap_reinit(struct drm_pagemap *dpagemap); + +/** + * drm_pagemap_page_zone_device_data() - Page to zone_device_data + * @page: Pointer to the page + * + * Return: Page's zone_device_data + */ +static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page) +{ + struct folio *folio = page_folio(page); + + return folio_zone_device_data(folio); +} + #endif -- 2.43.0
