On Mon,  9 Feb 2026 14:27:13 +0100
Thomas Zimmermann <[email protected]> wrote:

> The current mmap page-fault handler requires some changes before it
> can track folio access.
> 
> Call to folio_test_pmd_mappable() into the mmap page-fault handler
> before calling drm_gem_shmem_try_map_pmd(). The folio will become
> useful for tracking the access status.
> 
> Also rename drm_gem_shmem_try_map_pmd() to _try_insert_pfn_pmd()
> and only pass the page fault and page-frame number. The new name and
> parameters make it similar to vmf_insert_pfn_pmd().
> 
> No functional changes. If PMD mapping fails or is not supported,
> insert a regular PFN as before.
> 
> Signed-off-by: Thomas Zimmermann <[email protected]>

Reviewed-by: Boris Brezillon <[email protected]>

> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 25 ++++++++++++-------------
>  1 file changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
> b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index ab8e331005f9..c3a054899ba3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -550,17 +550,14 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, 
> struct drm_device *dev,
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
>  
> -static vm_fault_t drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned 
> long addr,
> -                                         struct page *page)
> +static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, 
> unsigned long pfn)
>  {
>  #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
> -     unsigned long pfn = page_to_pfn(page);
>       unsigned long paddr = pfn << PAGE_SHIFT;
> -     bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
> +     bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
>  
> -     if (aligned &&
> -         pmd_none(*vmf->pmd) &&
> -         folio_test_pmd_mappable(page_folio(page))) {
> +     if (aligned && pmd_none(*vmf->pmd)) {
> +             /* Read-only mapping; split upon write fault */
>               pfn &= PMD_MASK >> PAGE_SHIFT;
>               return vmf_insert_pfn_pmd(vmf, pfn, false);
>       }
> @@ -580,6 +577,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault 
> *vmf)
>       struct page **pages = shmem->pages;
>       pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within 
> VMA */
>       struct page *page;
> +     struct folio *folio;
>       unsigned long pfn;
>  
>       dma_resv_lock(obj->resv, NULL);
> @@ -591,15 +589,16 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault 
> *vmf)
>       page = pages[page_offset];
>       if (drm_WARN_ON_ONCE(dev, !page))
>               goto out;
> -
> -     ret = drm_gem_shmem_try_map_pmd(vmf, vmf->address, page);
> -     if (ret == VM_FAULT_NOPAGE)
> -             goto out;
> +     folio = page_folio(page);
>  
>       pfn = page_to_pfn(page);
> -     ret = vmf_insert_pfn(vma, vmf->address, pfn);
>  
> - out:
> +     if (folio_test_pmd_mappable(folio))
> +             ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
> +     if (ret != VM_FAULT_NOPAGE)
> +             ret = vmf_insert_pfn(vma, vmf->address, pfn);
> +
> +out:
>       dma_resv_unlock(obj->resv);
>  
>       return ret;

Reply via email to