To create a new device private entry for a given struct page, that page
is first converted to its pfn, before passing the pfn to
make_writable_device_private_entry() (and friends).

A future change will remove device private pages from the physical
address space. This will mean that device private pages no longer have a
pfn and must be handled separately.

Prepare for this with a new set of helpers:

- make_readable_device_private_entry_from_page()
- make_writable_device_private_entry_from_page()

These helpers take a struct page as parameter instead of a pfn. This
will allow more flexibility for handling the swap offset field
differently for device private pages.

Signed-off-by: Jordan Niethe <[email protected]>
---
v1:
  - New to series
v2:
  - Add flag param
---
 include/linux/swapops.h | 24 ++++++++++++++++++++++++
 mm/huge_memory.c        | 14 ++++++--------
 mm/migrate.c            |  6 ++----
 mm/migrate_device.c     | 12 ++++--------
 4 files changed, 36 insertions(+), 20 deletions(-)

diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index bae76d3831fb..f7d85a451a2b 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -138,11 +138,23 @@ static inline swp_entry_t 
make_readable_device_private_entry(pgoff_t offset)
        return swp_entry(SWP_DEVICE_READ, offset);
 }
 
+static inline swp_entry_t make_readable_device_private_entry_from_page(struct 
page *page,
+                                                                      pgoff_t 
flags)
+{
+       return swp_entry(SWP_DEVICE_READ, page_to_pfn(page) | flags);
+}
+
 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
 {
        return swp_entry(SWP_DEVICE_WRITE, offset);
 }
 
+static inline swp_entry_t make_writable_device_private_entry_from_page(struct 
page *page,
+                                                                      pgoff_t 
flags)
+{
+       return swp_entry(SWP_DEVICE_WRITE, page_to_pfn(page) | flags);
+}
+
 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
 {
        return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
@@ -191,11 +203,23 @@ static inline swp_entry_t 
make_readable_device_private_entry(pgoff_t offset)
        return swp_entry(0, 0);
 }
 
+static inline swp_entry_t make_readable_device_private_entry_from_page(struct 
page *page,
+                                                                      pgoff_t 
flags)
+{
+       return swp_entry(0, 0);
+}
+
 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
 {
        return swp_entry(0, 0);
 }
 
+static inline swp_entry_t make_writable_device_private_entry_from_page(struct 
page *page,
+                                                                      pgoff_t 
flags)
+{
+       return swp_entry(0, 0);
+}
+
 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
 {
        return swp_entry(0, 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e3a448cdb34d..03f1f13bb24c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3219,11 +3219,11 @@ static void __split_huge_pmd_locked(struct 
vm_area_struct *vma, pmd_t *pmd,
                         * is false.
                         */
                        if (write)
-                               swp_entry = make_writable_device_private_entry(
-                                                       page_to_pfn(page + i));
+                               swp_entry = 
make_writable_device_private_entry_from_page(
+                                                       page + i, 0);
                        else
-                               swp_entry = make_readable_device_private_entry(
-                                                       page_to_pfn(page + i));
+                               swp_entry = 
make_readable_device_private_entry_from_page(
+                                                       page + i, 0);
                        /*
                         * Young and dirty bits are not progated via swp_entry
                         */
@@ -4950,11 +4950,9 @@ void remove_migration_pmd(struct page_vma_mapped_walk 
*pvmw, struct page *new)
                swp_entry_t entry;
 
                if (pmd_write(pmde))
-                       entry = make_writable_device_private_entry(
-                                                       page_to_pfn(new));
+                       entry = 
make_writable_device_private_entry_from_page(new, 0);
                else
-                       entry = make_readable_device_private_entry(
-                                                       page_to_pfn(new));
+                       entry = 
make_readable_device_private_entry_from_page(new, 0);
                pmde = swp_entry_to_pmd(entry);
 
                if (pmd_swp_soft_dirty(*pvmw->pmd))
diff --git a/mm/migrate.c b/mm/migrate.c
index 5169f9717f60..6cc6c989ab6b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -399,11 +399,9 @@ static bool remove_migration_pte(struct folio *folio,
 
                if (unlikely(is_device_private_page(new))) {
                        if (pte_write(pte))
-                               entry = make_writable_device_private_entry(
-                                                       page_to_pfn(new));
+                               entry = 
make_writable_device_private_entry_from_page(new, 0);
                        else
-                               entry = make_readable_device_private_entry(
-                                                       page_to_pfn(new));
+                               entry = 
make_readable_device_private_entry_from_page(new, 0);
                        pte = softleaf_to_pte(entry);
                        if (pte_swp_soft_dirty(old_pte))
                                pte = pte_swp_mksoft_dirty(pte);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index c876526ac6a3..0ca6f78df0e2 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -836,11 +836,9 @@ static int migrate_vma_insert_huge_pmd_page(struct 
migrate_vma *migrate,
                swp_entry_t swp_entry;
 
                if (vma->vm_flags & VM_WRITE)
-                       swp_entry = make_writable_device_private_entry(
-                                               page_to_pfn(page));
+                       swp_entry = 
make_writable_device_private_entry_from_page(page, 0);
                else
-                       swp_entry = make_readable_device_private_entry(
-                                               page_to_pfn(page));
+                       swp_entry = 
make_readable_device_private_entry_from_page(page, 0);
                entry = swp_entry_to_pmd(swp_entry);
        } else {
                if (folio_is_zone_device(folio) &&
@@ -1033,11 +1031,9 @@ static void migrate_vma_insert_page(struct migrate_vma 
*migrate,
                swp_entry_t swp_entry;
 
                if (vma->vm_flags & VM_WRITE)
-                       swp_entry = make_writable_device_private_entry(
-                                               page_to_pfn(page));
+                       swp_entry = 
make_writable_device_private_entry_from_page(page, 0);
                else
-                       swp_entry = make_readable_device_private_entry(
-                                               page_to_pfn(page));
+                       swp_entry = 
make_readable_device_private_entry_from_page(page, 0);
                entry = swp_entry_to_pte(swp_entry);
        } else {
                if (folio_is_zone_device(folio) &&
-- 
2.34.1


Reply via email to