Add VM_PFNMAP to vm_flags in the mmap handler to ensure that
the mappings would be managed without using struct page.

And, in the vm_fault handler, use vmf_insert_pfn to share the
page's pfn to userspace instead of directly sharing the page
(via struct page *).

Cc: David Hildenbrand <da...@redhat.com>
Cc: Daniel Vetter <daniel.vet...@ffwll.ch>
Cc: Mike Kravetz <mike.krav...@oracle.com>
Cc: Hugh Dickins <hu...@google.com>
Cc: Peter Xu <pet...@redhat.com>
Cc: Jason Gunthorpe <j...@nvidia.com>
Cc: Gerd Hoffmann <kra...@redhat.com>
Cc: Dongwon Kim <dongwon....@intel.com>
Cc: Junxiao Chang <junxiao.ch...@intel.com>
Suggested-by: David Hildenbrand <da...@redhat.com>
Acked-by: David Hildenbrand <da...@redhat.com>
Signed-off-by: Vivek Kasireddy <vivek.kasire...@intel.com>
---
 drivers/dma-buf/udmabuf.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index c40645999648..820c993c8659 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -35,12 +35,13 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct udmabuf *ubuf = vma->vm_private_data;
        pgoff_t pgoff = vmf->pgoff;
+       unsigned long pfn;
 
        if (pgoff >= ubuf->pagecount)
                return VM_FAULT_SIGBUS;
-       vmf->page = ubuf->pages[pgoff];
-       get_page(vmf->page);
-       return 0;
+
+       pfn = page_to_pfn(ubuf->pages[pgoff]);
+       return vmf_insert_pfn(vma, vmf->address, pfn);
 }
 
 static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -56,6 +57,7 @@ static int mmap_udmabuf(struct dma_buf *buf, struct 
vm_area_struct *vma)
 
        vma->vm_ops = &udmabuf_vm_ops;
        vma->vm_private_data = ubuf;
+       vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
        return 0;
 }
 
-- 
2.39.2

Reply via email to