+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+ return set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), true);
+}
diff --git a/arch/riscv/include/asm/set_memory.h
b/arch/riscv/include/asm/set_memory.h
index 87389e93325a..16557b70c830 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -43,6 +43,8 @@ static inline int set_kernel_memory(char *startp, char *endp,
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLER__ */
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 3f76db3d2769..2c218868114b 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -401,6 +401,22 @@ int set_direct_map_valid_noflush(struct page *page,
unsigned nr, bool valid)
return __set_memory((unsigned long)page_address(page), nr, set, clear);
}
+int folio_zap_direct_map(struct folio *folio)
+{
+ int ret;
+
+ ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), false);
+
+ return ret;
+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+ return set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), true);
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void
*data)
{
diff --git a/arch/s390/include/asm/set_memory.h
b/arch/s390/include/asm/set_memory.h
index 94092f4ae764..fc73652e5715 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -63,6 +63,8 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
bool kernel_page_present(struct page *page);
#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d3ce04a4b248..df4a487b484d 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -412,6 +412,24 @@ int set_direct_map_valid_noflush(struct page *page,
unsigned nr, bool valid)
return __set_memory((unsigned long)page_to_virt(page), nr, flags);
}
+int folio_zap_direct_map(struct folio *folio)
+{
+ unsigned long addr = (unsigned long)folio_address(folio);
+ int ret;
+
+ ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), false);
+ flush_tlb_kernel_range(addr, addr + folio_size(folio));
+
+ return ret;
+}
+
+int folio_restore_direct_map(struct folio *folio)
+{
+ return set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), true);
+}
+
bool kernel_page_present(struct page *page)
{
unsigned long addr;
diff --git a/arch/x86/include/asm/set_memory.h
b/arch/x86/include/asm/set_memory.h
index 61f56cdaccb5..7208af609121 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -90,6 +90,8 @@ int set_pages_rw(struct page *page, int numpages);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
+int folio_zap_direct_map(struct folio *folio);
+int folio_restore_direct_map(struct folio *folio);
bool kernel_page_present(struct page *page);
extern int kernel_set_to_readonly;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 6c6eb486f7a6..3f0fc30eb320 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2656,6 +2656,26 @@ int set_direct_map_valid_noflush(struct page *page,
unsigned nr, bool valid)
return __set_pages_np(page, nr);
}
+int folio_zap_direct_map(struct folio *folio)
+{
+ unsigned long addr = (unsigned long)folio_address(folio);
+ int ret;
+
+ ret = set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), false);
+ flush_tlb_kernel_range(addr, addr + folio_size(folio));
+
+ return ret;
+}
+EXPORT_SYMBOL_FOR_MODULES(folio_zap_direct_map, "kvm");
+
+int folio_restore_direct_map(struct folio *folio)
+{
+ return set_direct_map_valid_noflush(folio_page(folio, 0),
+ folio_nr_pages(folio), true);
+}
+EXPORT_SYMBOL_FOR_MODULES(folio_restore_direct_map, "kvm");
+
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 3030d9245f5a..8d1c8a7f7d79 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -40,6 +40,16 @@ static inline int set_direct_map_valid_noflush(struct page
*page,
return 0;
}
+static inline int folio_zap_direct_map(struct folio *folio)
+{
+ return 0;
+}
+
+static inline int folio_restore_direct_map(struct folio *folio)
+{
+ return 0;
+}
+
static inline bool kernel_page_present(struct page *page)
{
return true;
--
2.50.1