1. Change vfio from get_user_pages_remote(), to
pin_user_pages_remote().

2. Because all FOLL_PIN-acquired pages must be released via
put_user_page(), also convert the put_page() call over to
put_user_pages_dirty_lock().

Note that this effectively changes the code's behavior in
vfio_iommu_type1.c: put_pfn(): it now ultimately calls
set_page_dirty_lock(), instead of set_page_dirty(). This is
probably more accurate.

As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]

[1] https://lore.kernel.org/r/20190723153640.gb...@lst.de

Tested-by: Alex Williamson <alex.william...@redhat.com>
Acked-by: Alex Williamson <alex.william...@redhat.com>
Signed-off-by: John Hubbard <jhubb...@nvidia.com>
---
 drivers/vfio/vfio_iommu_type1.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b800fc9a0251..18bfc2fc8e6d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -309,9 +309,8 @@ static int put_pfn(unsigned long pfn, int prot)
 {
        if (!is_invalid_reserved_pfn(pfn)) {
                struct page *page = pfn_to_page(pfn);
-               if (prot & IOMMU_WRITE)
-                       SetPageDirty(page);
-               put_page(page);
+
+               put_user_pages_dirty_lock(&page, 1, prot & IOMMU_WRITE);
                return 1;
        }
        return 0;
@@ -329,7 +328,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
long vaddr,
                flags |= FOLL_WRITE;
 
        down_read(&mm->mmap_sem);
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
+       ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
                                    page, NULL, NULL);
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
-- 
2.24.0

Reply via email to