On 23/03/2026 18:00, Ackerley Tng wrote:
"Kalyazin, Nikita" <[email protected]> writes:
From: Nikita Kalyazin <[email protected]>
This is to avoid excessive conversions folio->page->address when adding
helpers on top of set_direct_map_valid_noflush() in the next patch.
I can't take credit for what Sashiko [1] spotted.
Acked-by: David Hildenbrand (Arm) <[email protected]>
Signed-off-by: Nikita Kalyazin <[email protected]>
---
arch/arm64/include/asm/set_memory.h | 7 ++++---
arch/arm64/mm/pageattr.c | 19 +++++++++----------
arch/loongarch/include/asm/set_memory.h | 7 ++++---
arch/loongarch/mm/pageattr.c | 25 +++++++++++--------------
arch/riscv/include/asm/set_memory.h | 7 ++++---
arch/riscv/mm/pageattr.c | 17 +++++++++--------
arch/s390/include/asm/set_memory.h | 7 ++++---
arch/s390/mm/pageattr.c | 13 +++++++------
arch/x86/include/asm/set_memory.h | 7 ++++---
arch/x86/mm/pat/set_memory.c | 23 ++++++++++++-----------
include/linux/set_memory.h | 9 +++++----
kernel/power/snapshot.c | 4 ++--
mm/execmem.c | 6 ++++--
mm/secretmem.c | 6 +++---
mm/vmalloc.c | 11 +++++++----
15 files changed, 89 insertions(+), 79 deletions(-)
[...snip...]
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
index f5e910b68229..9e08905d3624 100644
--- a/arch/loongarch/mm/pageattr.c
+++ b/arch/loongarch/mm/pageattr.c
@@ -198,32 +198,29 @@ bool kernel_page_present(struct page *page)
return pte_present(ptep_get(pte));
}
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
{
- unsigned long addr = (unsigned long)page_address(page);
-
- if (addr < vm_map_base)
+ if ((unsigned long)addr < vm_map_base)
return 0;
- return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
+ return __set_memory((unsigned long)addr, 1, PAGE_KERNEL, __pgprot(0));
}
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
{
- unsigned long addr = (unsigned long)page_address(page);
-
- if (addr < vm_map_base)
+ if ((unsigned long)addr < vm_map_base)
return 0;
- return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT |
_PAGE_VALID));
+ return __set_memory((unsigned long)addr, 1, __pgprot(0),
+ __pgprot(_PAGE_PRESENT | _PAGE_VALID));
}
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+ bool valid)
{
- unsigned long addr = (unsigned long)page_address(page);
pgprot_t set, clear;
- if (addr < vm_map_base)
+ if ((unsigned long)addr < vm_map_base)
return 0;
if (valid) {
@@ -234,5 +231,5 @@ int set_direct_map_valid_noflush(struct page *page,
unsigned nr, bool valid)
clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
}
- return __set_memory(addr, 1, set, clear);
+ return __set_memory((unsigned long)addr, 1, set, clear);
Sashiko also spotted that there is a hard-coded 1 here. Before this
change, it was already hard-coded to 1. Not sure if this is a
bug.
Could this be addressed in a separate patch series?
Yes, I agree, it looks out of scope for this series.
}
[...snip...]
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 40581a720fe8..6aea1f470fd5 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2587,9 +2587,9 @@ int set_pages_rw(struct page *page, int numpages)
return set_memory_rw(addr, numpages);
}
-static int __set_pages_p(struct page *page, int numpages)
+static int __set_pages_p(const void *addr, int numpages)
{
- unsigned long tempaddr = (unsigned long) page_address(page);
+ unsigned long tempaddr = (unsigned long)addr;
struct cpa_data cpa = { .vaddr = &tempaddr,
.pgd = NULL,
.numpages = numpages,
@@ -2606,9 +2606,9 @@ static int __set_pages_p(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 1);
}
-static int __set_pages_np(struct page *page, int numpages)
+static int __set_pages_np(const void *addr, int numpages)
{
- unsigned long tempaddr = (unsigned long) page_address(page);
+ unsigned long tempaddr = (unsigned long)addr;
struct cpa_data cpa = { .vaddr = &tempaddr,
.pgd = NULL,
.numpages = numpages,
@@ -2625,22 +2625,23 @@ static int __set_pages_np(struct page *page, int
numpages)
return __change_page_attr_set_clr(&cpa, 1);
}
I agree that in arch/x86/mm/pat/set_memory.c, __kernel_map_pages(), has
calls to __set_pages_p() and __set_pages_np() that seems to have been
missed out in this patch. Those calls still pass struct page *. Maybe
that's because __kernel_map_pages() was guarded by
CONFIG_DEBUG_PAGEALLOC, so if you were using an lsp-guided refactoring
that call was missed.
Fixed, thanks!
Should probably try a grep to see what else needs replacing :)
[1] https://sashiko.dev/#/patchset/20260317141031.514-1-kalyazin%40amazon.com
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(const void *addr)
{
- return __set_pages_np(page, 1);
+ return __set_pages_np(addr, 1);
}
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(const void *addr)
{
- return __set_pages_p(page, 1);
+ return __set_pages_p(addr, 1);
}
-int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
+int set_direct_map_valid_noflush(const void *addr, unsigned long numpages,
+ bool valid)
{
if (valid)
- return __set_pages_p(page, nr);
+ return __set_pages_p(addr, numpages);
- return __set_pages_np(page, nr);
+ return __set_pages_np(addr, numpages);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
[...snip...]