Remove multiple similar inline functions for dealing with different
types of special swap entries.

Both migration and device private swap entries use the swap offset to
store a pfn. Instead of multiple inline functions to obtain a struct
page for each swap entry type use a common function
pfn_swap_entry_to_page(). Also open-code the various entry_to_pfn()
functions as this results is shorter code that is easier to understand.

Signed-off-by: Alistair Popple <apop...@nvidia.com>
Reviewed-by: Ralph Campbell <rcampb...@nvidia.com>
Reviewed-by: Christoph Hellwig <h...@lst.de>

---

v11:
* Rebased on mmotm

v9:
* Rebased on v5.13-rc2

v8:
* No changes

v7:
* Reworded commit message to include pfn_swap_entry_to_page()
* Added Christoph's Reviewed-by

v6:
* Removed redundant compound_page() call from inside PageLocked()
* Fixed a minor build issue for s390 reported by kernel test bot

v4:
* Added pfn_swap_entry_to_page()
* Reinstated check that migration entries point to locked pages
* Removed #define swapcache_prepare which isn't needed for CONFIG_SWAP=0
  builds
---
 arch/s390/mm/pgtable.c  |  2 +-
 fs/proc/task_mmu.c      | 23 +++++---------
 include/linux/swap.h    |  4 +--
 include/linux/swapops.h | 69 ++++++++++++++---------------------------
 mm/hmm.c                |  5 ++-
 mm/huge_memory.c        |  6 ++--
 mm/memcontrol.c         |  2 +-
 mm/memory.c             | 10 +++---
 mm/migrate.c            |  6 ++--
 mm/page_vma_mapped.c    |  6 ++--
 10 files changed, 51 insertions(+), 82 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18205f851c24..eec3a9d7176e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -691,7 +691,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, 
swp_entry_t entry)
        if (!non_swap_entry(entry))
                dec_mm_counter(mm, MM_SWAPENTS);
        else if (is_migration_entry(entry)) {
-               struct page *page = migration_entry_to_page(entry);
+               struct page *page = pfn_swap_entry_to_page(entry);
 
                dec_mm_counter(mm, mm_counter(page));
        }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 95c8f1e8fea6..eb97468dfe4c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -514,10 +514,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                        } else {
                                mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
                        }
-               } else if (is_migration_entry(swpent))
-                       page = migration_entry_to_page(swpent);
-               else if (is_device_private_entry(swpent))
-                       page = device_private_entry_to_page(swpent);
+               } else if (is_pfn_swap_entry(swpent))
+                       page = pfn_swap_entry_to_page(swpent);
        } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
                                                        && pte_none(*pte))) {
                page = xa_load(&vma->vm_file->f_mapping->i_pages,
@@ -549,7 +547,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
                swp_entry_t entry = pmd_to_swp_entry(*pmd);
 
                if (is_migration_entry(entry))
-                       page = migration_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
        }
        if (IS_ERR_OR_NULL(page))
                return;
@@ -694,10 +692,8 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long 
hmask,
        } else if (is_swap_pte(*pte)) {
                swp_entry_t swpent = pte_to_swp_entry(*pte);
 
-               if (is_migration_entry(swpent))
-                       page = migration_entry_to_page(swpent);
-               else if (is_device_private_entry(swpent))
-                       page = device_private_entry_to_page(swpent);
+               if (is_pfn_swap_entry(swpent))
+                       page = pfn_swap_entry_to_page(swpent);
        }
        if (page) {
                int mapcount = page_mapcount(page);
@@ -1389,11 +1385,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct 
pagemapread *pm,
                        frame = swp_type(entry) |
                                (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
                flags |= PM_SWAP;
-               if (is_migration_entry(entry))
-                       page = migration_entry_to_page(entry);
-
-               if (is_device_private_entry(entry))
-                       page = device_private_entry_to_page(entry);
+               if (is_pfn_swap_entry(entry))
+                       page = pfn_swap_entry_to_page(entry);
        }
 
        if (page && !PageAnon(page))
@@ -1454,7 +1447,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long 
addr, unsigned long end,
                        if (pmd_swp_uffd_wp(pmd))
                                flags |= PM_UFFD_WP;
                        VM_BUG_ON(!is_pmd_migration_entry(pmd));
-                       page = migration_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
                }
 #endif
 
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ac9bd84c905e..df7cbb6b3d3e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -564,8 +564,8 @@ static inline void show_swap_cache_info(void)
 {
 }
 
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || 
is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || 
is_device_private_entry(e));})
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
 
 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 {
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 708fbeb21dd3..c24c79812bc1 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -128,16 +128,6 @@ static inline bool 
is_write_device_private_entry(swp_entry_t entry)
 {
        return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
 }
-
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
-{
-       return swp_offset(entry);
-}
-
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
-{
-       return pfn_to_page(swp_offset(entry));
-}
 #else /* CONFIG_DEVICE_PRIVATE */
 static inline swp_entry_t make_device_private_entry(struct page *page, bool 
write)
 {
@@ -157,16 +147,6 @@ static inline bool 
is_write_device_private_entry(swp_entry_t entry)
 {
        return false;
 }
-
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
-{
-       return 0;
-}
-
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
-{
-       return NULL;
-}
 #endif /* CONFIG_DEVICE_PRIVATE */
 
 #ifdef CONFIG_MIGRATION
@@ -189,22 +169,6 @@ static inline int is_write_migration_entry(swp_entry_t 
entry)
        return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
 }
 
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
-{
-       return swp_offset(entry);
-}
-
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
-{
-       struct page *p = pfn_to_page(swp_offset(entry));
-       /*
-        * Any use of migration entries may only occur while the
-        * corresponding page is locked
-        */
-       BUG_ON(!PageLocked(compound_head(p)));
-       return p;
-}
-
 static inline void make_migration_entry_read(swp_entry_t *entry)
 {
        *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
@@ -224,16 +188,6 @@ static inline int is_migration_entry(swp_entry_t swp)
        return 0;
 }
 
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
-{
-       return 0;
-}
-
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
-{
-       return NULL;
-}
-
 static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
                                        spinlock_t *ptl) { }
@@ -248,6 +202,29 @@ static inline int is_write_migration_entry(swp_entry_t 
entry)
 
 #endif
 
+static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
+{
+       struct page *p = pfn_to_page(swp_offset(entry));
+
+       /*
+        * Any use of migration entries may only occur while the
+        * corresponding page is locked
+        */
+       BUG_ON(is_migration_entry(entry) && !PageLocked(p));
+
+       return p;
+}
+
+/*
+ * A pfn swap entry is a special type of swap entry that always has a pfn 
stored
+ * in the swap offset. They are used to represent unaddressable device memory
+ * and to restrict access to a page undergoing migration.
+ */
+static inline bool is_pfn_swap_entry(swp_entry_t entry)
+{
+       return is_migration_entry(entry) || is_device_private_entry(entry);
+}
+
 struct page_vma_mapped_walk;
 
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
diff --git a/mm/hmm.c b/mm/hmm.c
index 943cb2ba4442..3b2dda71d0ed 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -214,7 +214,7 @@ static inline bool hmm_is_device_private_entry(struct 
hmm_range *range,
                swp_entry_t entry)
 {
        return is_device_private_entry(entry) &&
-               device_private_entry_to_page(entry)->pgmap->owner ==
+               pfn_swap_entry_to_page(entry)->pgmap->owner ==
                range->dev_private_owner;
 }
 
@@ -257,8 +257,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, 
unsigned long addr,
                        cpu_flags = HMM_PFN_VALID;
                        if (is_write_device_private_entry(entry))
                                cpu_flags |= HMM_PFN_WRITE;
-                       *hmm_pfn = device_private_entry_to_pfn(entry) |
-                                       cpu_flags;
+                       *hmm_pfn = swp_offset(entry) | cpu_flags;
                        return 0;
                }
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d513b0cd1161..327b8d9d8d2f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1643,7 +1643,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
 
                        VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
                        entry = pmd_to_swp_entry(orig_pmd);
-                       page = migration_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
                        flush_needed = 0;
                } else
                        WARN_ONCE(1, "Non present huge pmd without pmd 
migration enabled!");
@@ -2012,7 +2012,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct 
*vma, pmd_t *pmd,
                        swp_entry_t entry;
 
                        entry = pmd_to_swp_entry(old_pmd);
-                       page = migration_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
                } else {
                        page = pmd_page(old_pmd);
                        if (!PageDirty(page) && pmd_dirty(old_pmd))
@@ -2066,7 +2066,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct 
*vma, pmd_t *pmd,
                swp_entry_t entry;
 
                entry = pmd_to_swp_entry(old_pmd);
-               page = migration_entry_to_page(entry);
+               page = pfn_swap_entry_to_page(entry);
                write = is_write_migration_entry(entry);
                young = false;
                soft_dirty = pmd_swp_soft_dirty(old_pmd);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dce5154fbb8f..ae1f5d0cb581 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5537,7 +5537,7 @@ static struct page *mc_handle_swap_pte(struct 
vm_area_struct *vma,
         * as special swap entry in the CPU page table.
         */
        if (is_device_private_entry(ent)) {
-               page = device_private_entry_to_page(ent);
+               page = pfn_swap_entry_to_page(ent);
                /*
                 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
                 * a refcount of 1 when free (unlike normal page)
diff --git a/mm/memory.c b/mm/memory.c
index a5af7d854776..a6c5132354a2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -729,7 +729,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
                }
                rss[MM_SWAPENTS]++;
        } else if (is_migration_entry(entry)) {
-               page = migration_entry_to_page(entry);
+               page = pfn_swap_entry_to_page(entry);
 
                rss[mm_counter(page)]++;
 
@@ -748,7 +748,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
                        set_pte_at(src_mm, addr, src_pte, pte);
                }
        } else if (is_device_private_entry(entry)) {
-               page = device_private_entry_to_page(entry);
+               page = pfn_swap_entry_to_page(entry);
 
                /*
                 * Update rss count even for unaddressable pages, as
@@ -1280,7 +1280,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 
                entry = pte_to_swp_entry(ptent);
                if (is_device_private_entry(entry)) {
-                       struct page *page = device_private_entry_to_page(entry);
+                       struct page *page = pfn_swap_entry_to_page(entry);
 
                        if (unlikely(details && details->check_mapping)) {
                                /*
@@ -1309,7 +1309,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                else if (is_migration_entry(entry)) {
                        struct page *page;
 
-                       page = migration_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
                        rss[mm_counter(page)]--;
                }
                if (unlikely(!free_swap_and_cache(entry)))
@@ -3372,7 +3372,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                        migration_entry_wait(vma->vm_mm, vmf->pmd,
                                             vmf->address);
                } else if (is_device_private_entry(entry)) {
-                       vmf->page = device_private_entry_to_page(entry);
+                       vmf->page = pfn_swap_entry_to_page(entry);
                        ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;
diff --git a/mm/migrate.c b/mm/migrate.c
index 8810c1421f5d..b4abb87249e1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -296,7 +296,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t 
*ptep,
        if (!is_migration_entry(entry))
                goto out;
 
-       page = migration_entry_to_page(entry);
+       page = pfn_swap_entry_to_page(entry);
        page = compound_head(page);
 
        /*
@@ -337,7 +337,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t 
*pmd)
        ptl = pmd_lock(mm, pmd);
        if (!is_pmd_migration_entry(*pmd))
                goto unlock;
-       page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
+       page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
        if (!get_page_unless_zero(page))
                goto unlock;
        spin_unlock(ptl);
@@ -2289,7 +2289,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
                        if (!is_device_private_entry(entry))
                                goto next;
 
-                       page = device_private_entry_to_page(entry);
+                       page = pfn_swap_entry_to_page(entry);
                        if (!(migrate->flags &
                                MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
                            page->pgmap->owner != migrate->pgmap_owner)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 3dd350a941aa..082e8073af56 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
                if (!is_migration_entry(entry))
                        return false;
 
-               pfn = migration_entry_to_pfn(entry);
+               pfn = swp_offset(entry);
        } else if (is_swap_pte(*pvmw->pte)) {
                swp_entry_t entry;
 
@@ -105,7 +105,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
                if (!is_device_private_entry(entry))
                        return false;
 
-               pfn = device_private_entry_to_pfn(entry);
+               pfn = swp_offset(entry);
        } else {
                if (!pte_present(*pvmw->pte))
                        return false;
@@ -233,7 +233,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                                        return not_found(pvmw);
                                entry = pmd_to_swp_entry(pmde);
                                if (!is_migration_entry(entry) ||
-                                   migration_entry_to_page(entry) != page)
+                                   pfn_swap_entry_to_page(entry) != page)
                                        return not_found(pvmw);
                                return true;
                        }
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to