Hi,

thanks for reviewing.

Am 27.01.26 um 15:45 schrieb Boris Brezillon:
Hello Thomas,

On Tue, 27 Jan 2026 14:16:36 +0100
Thomas Zimmermann <[email protected]> wrote:

Gem-shmem operates on pages instead of I/O memory ranges, so use them
for mmap. This will allow for tracking page dirty/accessed flags. If
hugepage support is available, insert the page's folio if possible.
Otherwise fall back to mapping individual pages.

As the PFN is no longer required for hugepage mappings, simplify the
related code and make it depend on CONFIG_TRANSPARENT_HUGEPAGE. Prepare
for tracking folio status.

Signed-off-by: Thomas Zimmermann <[email protected]>
---
  drivers/gpu/drm/drm_gem_shmem_helper.c | 58 ++++++++++++++++----------
  1 file changed, 35 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 3871a6d92f77..b6ddabbfcc52 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -553,17 +553,18 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
  static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long 
addr,
                                      struct page *page)
  {
-#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
-       unsigned long pfn = page_to_pfn(page);
-       unsigned long paddr = pfn << PAGE_SHIFT;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       phys_addr_t paddr = page_to_phys(page);
        bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
- if (aligned &&
-           pmd_none(*vmf->pmd) &&
-           folio_test_pmd_mappable(page_folio(page))) {
-               pfn &= PMD_MASK >> PAGE_SHIFT;
-               if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
-                       return true;
+       if (aligned && pmd_none(*vmf->pmd)) {
+               struct folio *folio = page_folio(page);
+
+               if (folio_test_pmd_mappable(folio)) {
+                       /* Read-only mapping; split upon write fault */
+                       if (vmf_insert_folio_pmd(vmf, folio, false) == 
VM_FAULT_NOPAGE)
+                               return true;
+               }
        }
  #endif
@@ -576,13 +577,10 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
        loff_t num_pages = obj->size >> PAGE_SHIFT;
-       vm_fault_t ret;
        struct page **pages = shmem->pages;
-       pgoff_t page_offset;
-       unsigned long pfn;
-
-       /* Offset to faulty address in the VMA. */
-       page_offset = vmf->pgoff - vma->vm_pgoff;
+       pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within 
VMA */
+       struct page *page = pages[page_offset];
+       vm_fault_t ret;
dma_resv_lock(shmem->base.resv, NULL); @@ -590,21 +588,35 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
            drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
            shmem->madv < 0) {
                ret = VM_FAULT_SIGBUS;
-               goto out;
+               goto err_dma_resv_unlock;
        }
- if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
-               ret = VM_FAULT_NOPAGE;
-               goto out;
+       page = pages[page_offset];
+       if (!page) {
+               ret = VM_FAULT_SIGBUS;
+               goto err_dma_resv_unlock;
        }
- pfn = page_to_pfn(pages[page_offset]);
-       ret = vmf_insert_pfn(vma, vmf->address, pfn);
+       if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, page)) {
+               ret = VM_FAULT_NOPAGE;
+       } else {
+               struct folio *folio = page_folio(page);
+
+               get_page(page);
+
+               folio_lock(folio);
+
+               vmf->page = page;
+               ret = VM_FAULT_LOCKED;
+       }
- out:
        dma_resv_unlock(shmem->base.resv);
return ret;
+
+err_dma_resv_unlock:
+       dma_resv_unlock(shmem->base.resv);
+       return ret;
Why do we need an error path that's exactly the same as the success
path?

I just found the explicit roll-back code easier to work with. But the other style does it as well. Noted for the next iteration.

Best regards
Thomas


  }
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
@@ -691,7 +703,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, 
struct vm_area_struct
        if (ret)
                return ret;
- vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+       vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        if (shmem->map_wc)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
The rest looks reasonable, if everyone is happy with the less
restrictive aspect that !PFNMAP implies, as mentioned by Matthew.

--
--
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Frankenstr. 146, 90461 Nürnberg, Germany, www.suse.com
GF: Jochen Jaser, Andrew McDonald, Werner Knoblich, (HRB 36809, AG Nürnberg)


Reply via email to