In preparation for adding a mmap_assert_locked() check in
__get_user_pages(), teach the mmap_assert_*locked() helpers that it's fine
to operate on an mm without locking in the middle of execve() as long as
it hasn't been installed on a process yet.

Existing code paths that do this are (reverse callgraph):

  get_user_pages_remote
    get_arg_page
      copy_strings
      copy_string_kernel
      remove_arg_zero
    tomoyo_dump_page
      tomoyo_print_bprm
      tomoyo_scan_bprm
      tomoyo_environ

Signed-off-by: Jann Horn <ja...@google.com>
---
 fs/exec.c                 |  8 ++++++++
 include/linux/mm_types.h  |  9 +++++++++
 include/linux/mmap_lock.h | 16 ++++++++++++----
 3 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index a91003e28eaa..c02b0e8e1c0b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1129,6 +1129,14 @@ static int exec_mmap(struct mm_struct *mm)
                }
        }

+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_DEBUG_VM)
+       /*
+        * From here on, the mm may be accessed concurrently, and proper locking
+        * is required for things like get_user_pages_remote().
+        */
+       mm->mmap_lock_required = 1;
+#endif
+
        task_lock(tsk);
        active_mm = tsk->active_mm;
        membarrier_exec_mmap(mm);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ed028af3cb19..89fee0d0d652 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -552,6 +552,15 @@ struct mm_struct {
                atomic_long_t hugetlb_usage;
 #endif
                struct work_struct async_put_work;
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_DEBUG_VM)
+               /*
+                * Notes whether this mm has been installed on a process yet.
+                * If not, only the task going through execve() can access this
+                * mm, and no locking is needed around get_user_pages_remote().
+                * This flag is only used for debug checks.
+                */
+               bool mmap_lock_required;
+#endif
        } __randomize_layout;

        /*
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 0707671851a8..c4fd874954d7 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -77,14 +77,22 @@ static inline void
mmap_read_unlock_non_owner(struct mm_struct *mm)

 static inline void mmap_assert_locked(struct mm_struct *mm)
 {
-       lockdep_assert_held(&mm->mmap_lock);
-       VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_DEBUG_VM)
+       if (mm->mmap_lock_required) {
+               lockdep_assert_held(&mm->mmap_lock);
+               VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+       }
+#endif
 }

 static inline void mmap_assert_write_locked(struct mm_struct *mm)
 {
-       lockdep_assert_held_write(&mm->mmap_lock);
-       VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_DEBUG_VM)
+       if (mm->mmap_lock_required) {
+               lockdep_assert_held_write(&mm->mmap_lock);
+               VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+       }
+#endif
 }

 #endif /* _LINUX_MMAP_LOCK_H */
-- 
2.28.0.709.gb0816b6eb0-goog

Reply via email to