We already read it, let's just forward it.

This patch is based on work by Ryan Roberts.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/memory.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 2aa2051ee51d3..185b4aff13d62 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -959,10 +959,9 @@ static inline void __copy_present_pte(struct 
vm_area_struct *dst_vma,
  */
 static inline int
 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct 
*src_vma,
-                pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-                struct folio **prealloc)
+                pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
+                int *rss, struct folio **prealloc)
 {
-       pte_t pte = ptep_get(src_pte);
        struct page *page;
        struct folio *folio;
 
@@ -1104,7 +1103,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma,
                }
                /* copy_present_pte() will clear `*prealloc' if consumed */
                ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
-                                      addr, rss, &prealloc);
+                                      ptent, addr, rss, &prealloc);
                /*
                 * If we need a pre-allocated page for this pte, drop the
                 * locks, allocate, and try again.
-- 
2.43.0

Reply via email to