For now, FAULT_FLAG_UNSHARE only applies to anonymous pages, which
implies a COW mapping. Let's hide FAULT_FLAG_UNSHARE early if we're not
dealing with a COW mapping, such that we treat it like a read fault as
documented and don't have to worry about the flag throughout all fault
handlers.

While at it, centralize the check for mutual exclusion of
FAULT_FLAG_UNSHARE and FAULT_FLAG_WRITE and just drop the check that
either flag is set in the WP handler.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/huge_memory.c |  3 ---
 mm/hugetlb.c     |  5 -----
 mm/memory.c      | 23 ++++++++++++++++++++---
 3 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1d47b3f7b877..7173756d6868 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1267,9 +1267,6 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
        vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
 
-       VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
-
        if (is_huge_zero_pmd(orig_pmd))
                goto fallback;
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index be572af75d9c..3672c7e06748 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5316,9 +5316,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct 
vm_area_struct *vma,
        unsigned long haddr = address & huge_page_mask(h);
        struct mmu_notifier_range range;
 
-       VM_BUG_ON(unshare && (flags & FOLL_WRITE));
-       VM_BUG_ON(!unshare && !(flags & FOLL_WRITE));
-
        /*
         * hugetlb does not support FOLL_FORCE-style write faults that keep the
         * PTE mapped R/O such as maybe_mkwrite() would do.
@@ -5328,8 +5325,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct 
vm_area_struct *vma,
 
        /* Let's take out MAP_SHARED mappings first. */
        if (vma->vm_flags & VM_MAYSHARE) {
-               if (unlikely(unshare))
-                       return 0;
                set_huge_ptep_writable(vma, haddr, ptep);
                return 0;
        }
diff --git a/mm/memory.c b/mm/memory.c
index 78e2c58f6f31..fe131273217a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3343,9 +3343,6 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct folio *folio;
 
-       VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
-
        if (likely(!unshare)) {
                if (userfaultfd_pte_wp(vma, *vmf->pte)) {
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -5150,6 +5147,22 @@ static void lru_gen_exit_fault(void)
 }
 #endif /* CONFIG_LRU_GEN */
 
+static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
+                                      unsigned int *flags)
+{
+       if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
+               if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
+                       return VM_FAULT_SIGSEGV;
+               /*
+                * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
+                * just treat it like an ordinary read-fault otherwise.
+                */
+               if (!is_cow_mapping(vma->vm_flags))
+                       *flags &= ~FAULT_FLAG_UNSHARE;
+       }
+       return 0;
+}
+
 /*
  * By the time we get here, we already hold the mm semaphore
  *
@@ -5166,6 +5179,10 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 
unsigned long address,
        count_vm_event(PGFAULT);
        count_memcg_event_mm(vma->vm_mm, PGFAULT);
 
+       ret = sanitize_fault_flags(vma, &flags);
+       if (ret)
+               return ret;
+
        if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
                                            flags & FAULT_FLAG_INSTRUCTION,
                                            flags & FAULT_FLAG_REMOTE))
-- 
2.38.1

Reply via email to