For background, mm/userfaultfd.c provides a general mcopy_atomic
implementation. But some types of memory (e.g., hugetlb and shmem) need
a slightly different implementation, so they provide their own helpers
for this. In other words, userfaultfd is the only caller of this
function.

This patch achieves two things:

1. Don't spend time compiling code which will end up never being
referenced anyway (a small build time optimization).

2. In future patches (e.g. [1]), we plan to extend the signature of
these helpers with UFFD-specific state (e.g., enums or structs defined
conditionally in userfaultfd_k.h). Once this happens, this patch will be
needed to avoid build errors (or, we'd need to define more UFFD-only
stuff unconditionally, which seems messier to me).

Peter Xu suggested this be sent as a standalone patch, in the mailing
list discussion for [1].

[1] https://patchwork.kernel.org/project/linux-mm/list/?series=424091

Signed-off-by: Axel Rasmussen <axelrasmus...@google.com>
---
 include/linux/hugetlb.h | 4 ++++
 mm/hugetlb.c            | 2 ++
 2 files changed, 6 insertions(+)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ebca2ef02212..749701b5c153 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -134,11 +134,13 @@ void hugetlb_show_meminfo(void);
 unsigned long hugetlb_total_pages(void);
 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags);
+#ifdef CONFIG_USERFAULTFD
 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
                                struct vm_area_struct *dst_vma,
                                unsigned long dst_addr,
                                unsigned long src_addr,
                                struct page **pagep);
+#endif
 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
                                                struct vm_area_struct *vma,
                                                vm_flags_t vm_flags);
@@ -308,6 +310,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather 
*tlb,
        BUG();
 }
 
+#ifdef CONFIG_USERFAULTFD
 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                                                pte_t *dst_pte,
                                                struct vm_area_struct *dst_vma,
@@ -318,6 +321,7 @@ static inline int hugetlb_mcopy_atomic_pte(struct mm_struct 
*dst_mm,
        BUG();
        return 0;
 }
+#endif
 
 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
                                        unsigned long sz)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 18f6ee317900..821bfa9c0c80 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4615,6 +4615,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
        return ret;
 }
 
+#ifdef CONFIG_USERFAULTFD
 /*
  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
  * modifications for huge pages.
@@ -4745,6 +4746,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        put_page(page);
        goto out;
 }
+#endif
 
 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         struct page **pages, struct vm_area_struct **vmas,
-- 
2.30.0.365.g02bc693789-goog

Reply via email to