Same change as the previous patch but for alloc_swap_folio:
pass vmf->address directly instead of ALIGN_DOWN(vmf->address, ...).

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
Assisted-by: cursor-agent:GPT-5.4-xhigh
---
 mm/memory.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 0824441a6ba1..74523bc00d8a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4734,8 +4734,7 @@ static struct folio *alloc_swap_folio(struct vm_fault 
*vmf)
        /* Try allocating the highest of the remaining orders. */
        gfp = vma_thp_gfp_mask(vma);
        while (orders) {
-               addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-               folio = vma_alloc_folio(gfp, order, vma, addr);
+               folio = vma_alloc_folio(gfp, order, vma, vmf->address);
                if (folio) {
                        if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
                                                            gfp, entry))
-- 
MST


Reply via email to