When restoring a folio using kho_restore_folio(), no sanity checks are
done to make sure the folio actually came from a kexec handover. The
caller is trusted to pass in the right address. If the caller has a bug
and passes in a wrong address, an in-use folio might be "restored" and
returned, causing all sorts of memory corruption.

Harden the folio restore logic by stashing in a magic number in
page->private along with the folio order. If the magic number does not
match, the folio won't be touched. page->private is an unsigned long.
The union kho_page_info splits it into two parts, with one holding the
order and the other holding the magic number.

Signed-off-by: Pratyush Yadav <praty...@kernel.org>
---
 kernel/kexec_handover.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index ecd1ac210dbd7..68eb3c28abe41 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -32,6 +32,22 @@
 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
 #define PROP_SUB_FDT "fdt"
 
+#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
+
+/*
+ * KHO uses page->private, which is an unsigned long, to store page metadata.
+ * Use it to store both the magic and the order.
+ */
+union kho_page_info {
+       unsigned long page_private;
+       struct {
+               unsigned int order;
+               unsigned int magic;
+       };
+};
+
+static_assert(sizeof(union kho_page_info) == sizeof(((struct page 
*)0)->private));
+
 static bool kho_enable __ro_after_init;
 
 bool kho_is_enabled(void)
@@ -210,16 +226,16 @@ static void kho_restore_page(struct page *page, unsigned 
int order)
 struct folio *kho_restore_folio(phys_addr_t phys)
 {
        struct page *page = pfn_to_online_page(PHYS_PFN(phys));
-       unsigned long order;
+       union kho_page_info info;
 
        if (!page)
                return NULL;
 
-       order = page->private;
-       if (order > MAX_PAGE_ORDER)
+       info.page_private = page->private;
+       if (info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER)
                return NULL;
 
-       kho_restore_page(page, order);
+       kho_restore_page(page, info.order);
        return page_folio(page);
 }
 EXPORT_SYMBOL_GPL(kho_restore_folio);
@@ -341,10 +357,13 @@ static void __init deserialize_bitmap(unsigned int order,
                phys_addr_t phys =
                        elm->phys_start + (bit << (order + PAGE_SHIFT));
                struct page *page = phys_to_page(phys);
+               union kho_page_info info;
 
                memblock_reserve(phys, sz);
                memblock_reserved_mark_noinit(phys, sz);
-               page->private = order;
+               info.magic = KHO_PAGE_MAGIC;
+               info.order = order;
+               page->private = info.page_private;
        }
 }
 

base-commit: b320789d6883cc00ac78ce83bccbfe7ed58afcf0
-- 
2.47.3


Reply via email to