guest_memfd needs a way to unmap a folio from all userspace processes. This
is required as part of a folio's truncation process. The function
unmap_mapping_folio() provides exactly this functionality.

Move its declaration from the internal mm/internal.h to the public
include/linux/mm.h and export the symbol.

unmap_mapping_folio() will be used by guest_memfd in a later patch to
implement a custom truncation function.

No functional change intended.

Signed-off-by: Ackerley Tng <[email protected]>
---
 include/linux/mm.h | 2 ++
 mm/internal.h      | 2 --
 mm/memory.c        | 2 ++
 3 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7f04f1eaab15a..97fa861364590 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2690,6 +2690,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct 
*vma,
 extern int fixup_user_fault(struct mm_struct *mm,
                            unsigned long address, unsigned int fault_flags,
                            bool *unlocked);
+void unmap_mapping_folio(struct folio *folio);
 void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows);
 void unmap_mapping_range(struct address_space *mapping,
@@ -2710,6 +2711,7 @@ static inline int fixup_user_fault(struct mm_struct *mm, 
unsigned long address,
        BUG();
        return -EFAULT;
 }
+static inline void unmap_mapping_folio(struct folio *folio) { }
 static inline void unmap_mapping_pages(struct address_space *mapping,
                pgoff_t start, pgoff_t nr, bool even_cows) { }
 static inline void unmap_mapping_range(struct address_space *mapping,
diff --git a/mm/internal.h b/mm/internal.h
index f35dbcf99a86b..98351be76238b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -953,7 +953,6 @@ static inline bool free_area_empty(struct free_area *area, 
int migratetype)
 struct anon_vma *folio_anon_vma(const struct folio *folio);
 
 #ifdef CONFIG_MMU
-void unmap_mapping_folio(struct folio *folio);
 extern long populate_vma_page_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end, int *locked);
 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
@@ -1131,7 +1130,6 @@ static inline struct file 
*maybe_unlock_mmap_for_io(struct vm_fault *vmf,
        return fpin;
 }
 #else /* !CONFIG_MMU */
-static inline void unmap_mapping_folio(struct folio *folio) { }
 static inline void mlock_new_folio(struct folio *folio) { }
 static inline bool need_mlock_drain(int cpu) { return false; }
 static inline void mlock_drain_local(void) { }
diff --git a/mm/memory.c b/mm/memory.c
index da360a6eb8a48..983bb25517cb7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -78,6 +78,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/pgalloc.h>
 #include <linux/uaccess.h>
+#include <linux/kvm_types.h>
 
 #include <trace/events/kmem.h>
 
@@ -4244,6 +4245,7 @@ void unmap_mapping_folio(struct folio *folio)
                                         last_index, &details);
        i_mmap_unlock_read(mapping);
 }
+EXPORT_SYMBOL_FOR_KVM(unmap_mapping_folio);
 
 /**
  * unmap_mapping_pages() - Unmap pages from processes.
-- 
2.53.0.345.g96ddfc5eaa-goog


Reply via email to