A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have
normal PFN and must be handled separately.

Prepare for this by adding a PVMW_DEVICE_PRIVATE flag to
page_vma_mapped_walk::flags. This indicates that
page_vma_mapped_walk::pfn contains a device private offset rather than a
normal pfn.

Once the device private pages are removed from the physical address
space this flag will be used to ensure a device private offset is
returned.

Signed-off-by: Jordan Niethe <[email protected]>
Signed-off-by: Alistair Popple <[email protected]>
---
v1:
  - Update for HMM huge page support
v2:
  - Move adding device_private param to check_pmd() until final patch
v3:
  - Track device private offset in pvmw::flags instead of pvmw::pfn
---
 include/linux/rmap.h | 24 ++++++++++++++++++++++--
 mm/page_vma_mapped.c |  4 ++--
 mm/rmap.c            |  4 ++--
 mm/vmscan.c          |  2 +-
 4 files changed, 27 insertions(+), 7 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..1b03297f13dc 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -921,6 +921,8 @@ struct page *make_device_exclusive(struct mm_struct *mm, 
unsigned long addr,
 #define PVMW_SYNC              (1 << 0)
 /* Look for migration entries rather than present PTEs */
 #define PVMW_MIGRATION         (1 << 1)
+/* pvmw::pfn is a device private offset */
+#define PVMW_DEVICE_PRIVATE    (1 << 2)
 
 /* Result flags */
 
@@ -939,14 +941,32 @@ struct page_vma_mapped_walk {
        unsigned int flags;
 };
 
+static inline unsigned long page_vma_walk_flags(const struct folio *folio,
+                                               unsigned long flags)
+{
+       if (folio_is_device_private(folio))
+               return flags | PVMW_DEVICE_PRIVATE;
+       return flags;
+}
+
+static inline unsigned long folio_page_vma_walk_pfn(const struct folio *folio)
+{
+       return folio_pfn(folio);
+}
+
+static inline struct folio *page_vma_walk_pfn_to_folio(struct 
page_vma_mapped_walk *pvmw)
+{
+       return pfn_folio(pvmw->pfn);
+}
+
 #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags)    \
        struct page_vma_mapped_walk name = {                            \
-               .pfn = folio_pfn(_folio),                               \
+               .pfn = folio_page_vma_walk_pfn(_folio),                 \
                .nr_pages = folio_nr_pages(_folio),                     \
                .pgoff = folio_pgoff(_folio),                           \
                .vma = _vma,                                            \
                .address = _address,                                    \
-               .flags = _flags,                                        \
+               .flags = page_vma_walk_flags(_folio, _flags),           \
        }
 
 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index b38a1d00c971..039a2d71e92f 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -350,10 +350,10 @@ unsigned long page_mapped_in_vma(const struct page *page,
 {
        const struct folio *folio = page_folio(page);
        struct page_vma_mapped_walk pvmw = {
-               .pfn = page_to_pfn(page),
+               .pfn = folio_page_vma_walk_pfn(folio),
                .nr_pages = 1,
                .vma = vma,
-               .flags = PVMW_SYNC,
+               .flags = page_vma_walk_flags(folio, PVMW_SYNC),
        };
 
        pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
diff --git a/mm/rmap.c b/mm/rmap.c
index f955f02d570e..7f12934725d1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1871,7 +1871,7 @@ static bool try_to_unmap_one(struct folio *folio, struct 
vm_area_struct *vma,
         * if page table locking is skipped: use TTU_SYNC to wait for that.
         */
        if (flags & TTU_SYNC)
-               pvmw.flags = PVMW_SYNC;
+               pvmw.flags = page_vma_walk_flags(folio, PVMW_SYNC);
 
        /*
         * For THP, we have to assume the worse case ie pmd for invalidation.
@@ -2299,7 +2299,7 @@ static bool try_to_migrate_one(struct folio *folio, 
struct vm_area_struct *vma,
         * if page table locking is skipped: use TTU_SYNC to wait for that.
         */
        if (flags & TTU_SYNC)
-               pvmw.flags = PVMW_SYNC;
+               pvmw.flags = page_vma_walk_flags(folio, PVMW_SYNC);
 
        /*
         * For THP, we have to assume the worse case ie pmd for invalidation.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 670fe9fae5ba..5d81939bf12a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4203,7 +4203,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk 
*pvmw)
        pte_t *pte = pvmw->pte;
        unsigned long addr = pvmw->address;
        struct vm_area_struct *vma = pvmw->vma;
-       struct folio *folio = pfn_folio(pvmw->pfn);
+       struct folio *folio = page_vma_walk_pfn_to_folio(pvmw);
        struct mem_cgroup *memcg = folio_memcg(folio);
        struct pglist_data *pgdat = folio_pgdat(folio);
        struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
-- 
2.34.1


Reply via email to