On 15/01/2026 10:54, Huacai Chen wrote:
Hi, Nikita,

Hi Huacai,


On Wed, Jan 14, 2026 at 9:45 PM Kalyazin, Nikita <[email protected]> wrote:

From: Nikita Kalyazin <[email protected]>

These allow guest_memfd to remove its memory from the direct map.
Only implement them for architectures that have direct map.
In folio_zap_direct_map(), flush TLB on architectures where
set_direct_map_valid_noflush() does not flush it internally.

The new helpers need to be accessible to KVM on architectures that
support guest_memfd (x86 and arm64).  Since arm64 does not support
building KVM as a module, only export them on x86.

Direct map removal gives guest_memfd the same protection that
memfd_secret does, such as hardening against Spectre-like attacks
through in-kernel gadgets.

Signed-off-by: Nikita Kalyazin <[email protected]>
---
  arch/arm64/include/asm/set_memory.h     |  2 ++
  arch/arm64/mm/pageattr.c                | 12 ++++++++++++
  arch/loongarch/include/asm/set_memory.h |  2 ++
  arch/loongarch/mm/pageattr.c            | 16 ++++++++++++++++
  arch/riscv/include/asm/set_memory.h     |  2 ++
  arch/riscv/mm/pageattr.c                | 16 ++++++++++++++++
  arch/s390/include/asm/set_memory.h      |  2 ++
  arch/s390/mm/pageattr.c                 | 18 ++++++++++++++++++
  arch/x86/include/asm/set_memory.h       |  2 ++
  arch/x86/mm/pat/set_memory.c            | 20 ++++++++++++++++++++
  include/linux/set_memory.h              | 10 ++++++++++
  11 files changed, 102 insertions(+)

diff --git a/arch/arm64/include/asm/set_memory.h 
b/arch/arm64/include/asm/set_memory.h
index 90f61b17275e..d949f1deb701 100644
--- a/arch/arm64/include/asm/set_memory.h
+++ b/arch/arm64/include/asm/set_memory.h
@@ -14,6 +14,8 @@ int set_memory_valid(unsigned long addr, int numpages, int 
enable);
  int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
  bool kernel_page_present(struct page *page);

  int set_memory_encrypted(unsigned long addr, int numpages);
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index f0e784b963e6..a94eff324dda 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -357,6 +357,18 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
         return set_memory_valid(addr, nr, valid);
  }

+int folio_zap_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), false);
+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), true);
+}
+
  #ifdef CONFIG_DEBUG_PAGEALLOC
  /*
   * This is - apart from the return value - doing the same
diff --git a/arch/loongarch/include/asm/set_memory.h 
b/arch/loongarch/include/asm/set_memory.h
index 55dfaefd02c8..9bc80ac420a9 100644
--- a/arch/loongarch/include/asm/set_memory.h
+++ b/arch/loongarch/include/asm/set_memory.h
@@ -18,5 +18,7 @@ bool kernel_page_present(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);

  #endif /* _ASM_LOONGARCH_SET_MEMORY_H */
diff --git a/arch/loongarch/mm/pageattr.c b/arch/loongarch/mm/pageattr.c
index f5e910b68229..14bd322dd112 100644
--- a/arch/loongarch/mm/pageattr.c
+++ b/arch/loongarch/mm/pageattr.c
@@ -236,3 +236,19 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)

         return __set_memory(addr, 1, set, clear);
  }
+
+int folio_zap_direct_map(struct folio *folio)
+{
+       int ret;
+
+       ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+                                          folio_nr_pages(folio), false);
+
+       return ret;
Why not use a single statement which is the same as the ARM64 version?
The RISCV version has the same problem.

No reason for them to be different. Will update in the next version. Thank you!


Huacai

+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), true);
+}
diff --git a/arch/riscv/include/asm/set_memory.h 
b/arch/riscv/include/asm/set_memory.h
index 87389e93325a..16557b70c830 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -43,6 +43,8 @@ static inline int set_kernel_memory(char *startp, char *endp,
  int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
  bool kernel_page_present(struct page *page);

  #endif /* __ASSEMBLER__ */
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 3f76db3d2769..2c218868114b 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -401,6 +401,22 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
         return __set_memory((unsigned long)page_address(page), nr, set, clear);
  }

+int folio_zap_direct_map(struct folio *folio)
+{
+       int ret;
+
+       ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+                                          folio_nr_pages(folio), false);
+
+       return ret;
+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), true);
+}
+
  #ifdef CONFIG_DEBUG_PAGEALLOC
  static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void 
*data)
  {
diff --git a/arch/s390/include/asm/set_memory.h 
b/arch/s390/include/asm/set_memory.h
index 94092f4ae764..fc73652e5715 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -63,6 +63,8 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
  int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
  bool kernel_page_present(struct page *page);

  #endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d3ce04a4b248..df4a487b484d 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -412,6 +412,24 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
         return __set_memory((unsigned long)page_to_virt(page), nr, flags);
  }

+int folio_zap_direct_map(struct folio *folio)
+{
+       unsigned long addr = (unsigned long)folio_address(folio);
+       int ret;
+
+       ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+                                          folio_nr_pages(folio), false);
+       flush_tlb_kernel_range(addr, addr + folio_size(folio));
+
+       return ret;
+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), true);
+}
+
  bool kernel_page_present(struct page *page)
  {
         unsigned long addr;
diff --git a/arch/x86/include/asm/set_memory.h 
b/arch/x86/include/asm/set_memory.h
index 61f56cdaccb5..7208af609121 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -90,6 +90,8 @@ int set_pages_rw(struct page *page, int numpages);
  int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
  bool kernel_page_present(struct page *page);

  extern int kernel_set_to_readonly;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 6c6eb486f7a6..3f0fc30eb320 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2656,6 +2656,26 @@ int set_direct_map_valid_noflush(struct page *page, 
unsigned nr, bool valid)
         return __set_pages_np(page, nr);
  }

+int folio_zap_direct_map(struct folio *folio)
+{
+       unsigned long addr = (unsigned long)folio_address(folio);
+       int ret;
+
+       ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+                                          folio_nr_pages(folio), false);
+       flush_tlb_kernel_range(addr, addr + folio_size(folio));
+
+       return ret;
+}
+EXPORT_SYMBOL_FOR_MODULES(folio_zap_direct_map, "kvm");
+
+int folio_restore_direct_map(struct folio *folio)
+{
+       return set_direct_map_valid_noflush(folio_page(folio, 0),
+                                           folio_nr_pages(folio), true);
+}
+EXPORT_SYMBOL_FOR_MODULES(folio_restore_direct_map, "kvm");
+
  #ifdef CONFIG_DEBUG_PAGEALLOC
  void __kernel_map_pages(struct page *page, int numpages, int enable)
  {
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 3030d9245f5a..8d1c8a7f7d79 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -40,6 +40,16 @@ static inline int set_direct_map_valid_noflush(struct page 
*page,
         return 0;
  }

+static inline int folio_zap_direct_map(struct folio *folio)
+{
+       return 0;
+}
+
+static inline int folio_restore_direct_map(struct folio *folio)
+{
+       return 0;
+}
+
  static inline bool kernel_page_present(struct page *page)
  {
         return true;
--
2.50.1




Reply via email to