We want to get rid of nth_page(), and kfence init code is the last user.

Unfortunately, we might actually walk a PFN range where the pages are
not contiguous, because we might be allocating an area from memblock
that could span memory sections in problematic kernel configs (SPARSEMEM
without SPARSEMEM_VMEMMAP).

We could check whether the page range is contiguous
using page_range_contiguous() and failing kfence init, or making kfence
incompatible these problemtic kernel configs.

Let's keep it simple and simply use pfn_to_page() by iterating PFNs.

Cc: Alexander Potapenko <gli...@google.com>
Cc: Marco Elver <el...@google.com>
Cc: Dmitry Vyukov <dvyu...@google.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/kfence/core.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 0ed3be100963a..793507c77f9e8 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -594,15 +594,15 @@ static void rcu_guarded_free(struct rcu_head *h)
  */
 static unsigned long kfence_init_pool(void)
 {
-       unsigned long addr;
-       struct page *pages;
+       unsigned long addr, pfn, start_pfn, end_pfn;
        int i;
 
        if (!arch_kfence_init_pool())
                return (unsigned long)__kfence_pool;
 
        addr = (unsigned long)__kfence_pool;
-       pages = virt_to_page(__kfence_pool);
+       start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
+       end_pfn = start_pfn + KFENCE_POOL_SIZE / PAGE_SIZE;
 
        /*
         * Set up object pages: they must have PGTY_slab set to avoid freeing
@@ -612,12 +612,13 @@ static unsigned long kfence_init_pool(void)
         * fast-path in SLUB, and therefore need to ensure kfree() correctly
         * enters __slab_free() slow-path.
         */
-       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
-               struct slab *slab = page_slab(nth_page(pages, i));
+       for (pfn = start_pfn; pfn != end_pfn; pfn++) {
+               struct slab *slab;
 
                if (!i || (i % 2))
                        continue;
 
+               slab = page_slab(pfn_to_page(pfn));
                __folio_set_slab(slab_folio(slab));
 #ifdef CONFIG_MEMCG
                slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 
1].obj_exts |
@@ -664,11 +665,13 @@ static unsigned long kfence_init_pool(void)
        return 0;
 
 reset_slab:
-       for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
-               struct slab *slab = page_slab(nth_page(pages, i));
+       for (pfn = start_pfn; pfn != end_pfn; pfn++) {
+               struct slab *slab;
 
                if (!i || (i % 2))
                        continue;
+
+               slab = page_slab(pfn_to_page(pfn));
 #ifdef CONFIG_MEMCG
                slab->obj_exts = 0;
 #endif
-- 
2.50.1


Reply via email to