To create a new migration entry for a given struct page, that page is first converted to its pfn, before passing the pfn to make_readable_migration_entry() (and friends).
A future change will remove device private pages from the physical address space. This will mean that device private pages no longer have a pfn and must be handled separately. Prepare for this with a new set of helpers: - make_readable_migration_entry_from_page() - make_readable_exclusive_migration_entry_from_page() - make_writable_migration_entry_from_page() These helpers take a struct page as parameter instead of a pfn. This will allow more flexibility for handling the swap offset field differently for device private pages. Signed-off-by: Jordan Niethe <[email protected]> --- v1: - New to series v2: - Add flags param --- include/linux/leafops.h | 14 ++++++++++++++ include/linux/swapops.h | 33 +++++++++++++++++++++++++++++++++ mm/huge_memory.c | 29 +++++++++++++++++------------ mm/hugetlb.c | 15 +++++++++------ mm/memory.c | 5 +++-- mm/migrate_device.c | 12 ++++++------ mm/mprotect.c | 10 +++++++--- mm/rmap.c | 12 ++++++------ 8 files changed, 95 insertions(+), 35 deletions(-) diff --git a/include/linux/leafops.h b/include/linux/leafops.h index cfafe7a5e7b1..2fde8208da13 100644 --- a/include/linux/leafops.h +++ b/include/linux/leafops.h @@ -363,6 +363,20 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry) return swp_offset(entry) & SWP_PFN_MASK; } +/** + * softleaf_to_flags() - Obtain flags encoded within leaf entry. + * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true. + * + * Returns: The flags associated with the leaf entry. + */ +static inline unsigned long softleaf_to_flags(softleaf_t entry) +{ + VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); + + /* Temporary until swp_entry_t eliminated. */ + return swp_offset(entry) & (SWP_MIG_YOUNG | SWP_MIG_DIRTY); +} + /** * softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry. * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true. diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 8cfc966eae48..a9ad997bd5ec 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -173,16 +173,33 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) return swp_entry(SWP_MIGRATION_READ, offset); } +static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page, pgoff_t flags) +{ + return swp_entry(SWP_MIGRATION_READ, page_to_pfn(page) | flags); +} + static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) { return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); } +static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page, + pgoff_t flags) +{ + return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, page_to_pfn(page) | flags); +} + static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) { return swp_entry(SWP_MIGRATION_WRITE, offset); } +static inline swp_entry_t make_writable_migration_entry_from_page(struct page *page, + pgoff_t flags) +{ + return swp_entry(SWP_MIGRATION_WRITE, page_to_pfn(page) | flags); +} + /* * Returns whether the host has large enough swap offset field to support * carrying over pgtable A/D bits for page migrations. The result is @@ -222,11 +239,27 @@ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) return swp_entry(0, 0); } +static inline swp_entry_t make_readable_migration_entry_from_page(struct page *page, pgoff_t flags) +{ + return swp_entry(0, 0); +} + +static inline swp_entry_t make_writeable_migration_entry_from_page(struct page *page, pgoff_t flags) +{ + return swp_entry(0, 0); +} + static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) { return swp_entry(0, 0); } +static inline swp_entry_t make_readable_exclusive_migration_entry_from_page(struct page *page, + pgoff_t flags) +{ + return swp_entry(0, 0); +} + static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) { return swp_entry(0, 0); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40cf59301c21..e3a448cdb34d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1800,7 +1800,8 @@ static void copy_huge_non_present_pmd( if (softleaf_is_migration_write(entry) || softleaf_is_migration_read_exclusive(entry)) { - entry = make_readable_migration_entry(swp_offset(entry)); + entry = make_readable_migration_entry_from_page(softleaf_to_page(entry), + softleaf_to_flags(entry)); pmd = swp_entry_to_pmd(entry); if (pmd_swp_soft_dirty(*src_pmd)) pmd = pmd_swp_mksoft_dirty(pmd); @@ -2524,9 +2525,13 @@ static void change_non_present_huge_pmd(struct mm_struct *mm, * just be safe and disable write */ if (folio_test_anon(folio)) - entry = make_readable_exclusive_migration_entry(swp_offset(entry)); + entry = make_readable_exclusive_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); else - entry = make_readable_migration_entry(swp_offset(entry)); + entry = make_readable_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); newpmd = swp_entry_to_pmd(entry); if (pmd_swp_soft_dirty(*pmd)) newpmd = pmd_swp_mksoft_dirty(newpmd); @@ -3183,14 +3188,14 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { if (write) - swp_entry = make_writable_migration_entry( - page_to_pfn(page + i)); + swp_entry = make_writable_migration_entry_from_page( + page + i, 0); else if (anon_exclusive) - swp_entry = make_readable_exclusive_migration_entry( - page_to_pfn(page + i)); + swp_entry = make_readable_exclusive_migration_entry_from_page( + page + i, 0); else - swp_entry = make_readable_migration_entry( - page_to_pfn(page + i)); + swp_entry = make_readable_migration_entry_from_page( + page + i, 0); if (young) swp_entry = make_migration_entry_young(swp_entry); if (dirty) @@ -4890,11 +4895,11 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, if (pmd_dirty(pmdval)) folio_mark_dirty(folio); if (pmd_write(pmdval)) - entry = make_writable_migration_entry(page_to_pfn(page)); + entry = make_writable_migration_entry_from_page(page, 0); else if (anon_exclusive) - entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); + entry = make_readable_exclusive_migration_entry_from_page(page, 0); else - entry = make_readable_migration_entry(page_to_pfn(page)); + entry = make_readable_migration_entry_from_page(page, 0); if (pmd_young(pmdval)) entry = make_migration_entry_young(entry); if (pmd_dirty(pmdval)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 51273baec9e5..6a5e40d4cfc2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4939,8 +4939,9 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * COW mappings require pages in both * parent and child to be set to read. */ - softleaf = make_readable_migration_entry( - swp_offset(softleaf)); + softleaf = make_readable_migration_entry_from_page( + softleaf_to_page(softleaf), + softleaf_to_flags(softleaf)); entry = swp_entry_to_pte(softleaf); if (userfaultfd_wp(src_vma) && uffd_wp) entry = pte_swp_mkuffd_wp(entry); @@ -6491,11 +6492,13 @@ long hugetlb_change_protection(struct vm_area_struct *vma, if (softleaf_is_migration_write(entry)) { if (folio_test_anon(folio)) - entry = make_readable_exclusive_migration_entry( - swp_offset(entry)); + entry = make_readable_exclusive_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); else - entry = make_readable_migration_entry( - swp_offset(entry)); + entry = make_readable_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); newpte = swp_entry_to_pte(entry); pages++; } diff --git a/mm/memory.c b/mm/memory.c index 2a55edc48a65..16493fbb3adb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -963,8 +963,9 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * to be set to read. A previously exclusive entry is * now shared. */ - entry = make_readable_migration_entry( - swp_offset(entry)); + entry = make_readable_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); pte = softleaf_to_pte(entry); if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a2baaa2a81f9..c876526ac6a3 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -432,14 +432,14 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, /* Setup special migration page table entry */ if (mpfn & MIGRATE_PFN_WRITE) - entry = make_writable_migration_entry( - page_to_pfn(page)); + entry = make_writable_migration_entry_from_page( + page, 0); else if (anon_exclusive) - entry = make_readable_exclusive_migration_entry( - page_to_pfn(page)); + entry = make_readable_exclusive_migration_entry_from_page( + page, 0); else - entry = make_readable_migration_entry( - page_to_pfn(page)); + entry = make_readable_migration_entry_from_page( + page, 0); if (pte_present(pte)) { if (pte_young(pte)) entry = make_migration_entry_young(entry); diff --git a/mm/mprotect.c b/mm/mprotect.c index 283889e4f1ce..adfe1b7a4a19 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -328,10 +328,14 @@ static long change_pte_range(struct mmu_gather *tlb, * just be safe and disable write */ if (folio_test_anon(folio)) - entry = make_readable_exclusive_migration_entry( - swp_offset(entry)); + entry = make_readable_exclusive_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); else - entry = make_readable_migration_entry(swp_offset(entry)); + entry = make_readable_migration_entry_from_page( + softleaf_to_page(entry), + softleaf_to_flags(entry)); + newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); diff --git a/mm/rmap.c b/mm/rmap.c index 79a2478b4aa9..6a63333f8722 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2539,14 +2539,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * pte is removed and then restart fault handling. */ if (writable) - entry = make_writable_migration_entry( - page_to_pfn(subpage)); + entry = make_writable_migration_entry_from_page( + subpage, 0); else if (anon_exclusive) - entry = make_readable_exclusive_migration_entry( - page_to_pfn(subpage)); + entry = make_readable_exclusive_migration_entry_from_page( + subpage, 0); else - entry = make_readable_migration_entry( - page_to_pfn(subpage)); + entry = make_readable_migration_entry_from_page( + subpage, 0); if (likely(pte_present(pteval))) { if (pte_young(pteval)) entry = make_migration_entry_young(entry); -- 2.34.1
