On Thu 12-07-18 14:34:00, David Rientjes wrote:
[...]
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 0fe4087d5151..e6328cef090f 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -488,9 +488,11 @@ void __oom_reap_task_mm(struct mm_struct *mm)
>        * Tell all users of get_user/copy_from_user etc... that the content
>        * is no longer stable. No barriers really needed because unmapping
>        * should imply barriers already and the reader would hit a page fault
> -      * if it stumbled over a reaped memory.
> +      * if it stumbled over a reaped memory. If MMF_UNSTABLE is already set,
> +      * reaping as already occurred so nothing left to do.
>        */
> -     set_bit(MMF_UNSTABLE, &mm->flags);
> +     if (test_and_set_bit(MMF_UNSTABLE, &mm->flags))
> +             return;

This could lead to pre mature oom victim selection
oom_reaper                      exiting victim
oom_reap_task                   exit_mmap
  __oom_reap_task_mm              __oom_reap_task_mm
                                    test_and_set_bit(MMF_UNSTABLE) # wins the 
race
  test_and_set_bit(MMF_UNSTABLE)
set_bit(MMF_OOM_SKIP) # new victim can be selected now.

Besides that, why should we back off in the first place. We can
race the two without any problems AFAICS. We already do have proper
synchronization between the two due to mmap_sem and MMF_OOM_SKIP.

diff --git a/mm/mmap.c b/mm/mmap.c
index fc41c0543d7f..4642964f7741 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3073,9 +3073,7 @@ void exit_mmap(struct mm_struct *mm)
                 * which clears VM_LOCKED, otherwise the oom reaper cannot
                 * reliably test it.
                 */
-               mutex_lock(&oom_lock);
                __oom_reap_task_mm(mm);
-               mutex_unlock(&oom_lock);
 
                set_bit(MMF_OOM_SKIP, &mm->flags);
                down_write(&mm->mmap_sem);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 32e6f7becb40..f11108af122d 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -529,28 +529,9 @@ void __oom_reap_task_mm(struct mm_struct *mm)
 
 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 {
-       bool ret = true;
-
-       /*
-        * We have to make sure to not race with the victim exit path
-        * and cause premature new oom victim selection:
-        * oom_reap_task_mm             exit_mm
-        *   mmget_not_zero
-        *                                mmput
-        *                                  atomic_dec_and_test
-        *                                exit_oom_victim
-        *                              [...]
-        *                              out_of_memory
-        *                                select_bad_process
-        *                                  # no TIF_MEMDIE task selects new 
victim
-        *  unmap_page_range # frees some memory
-        */
-       mutex_lock(&oom_lock);
-
        if (!down_read_trylock(&mm->mmap_sem)) {
-               ret = false;
                trace_skip_task_reaping(tsk->pid);
-               goto unlock_oom;
+               return false;
        }
 
        /*
@@ -562,7 +543,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
        if (mm_has_blockable_invalidate_notifiers(mm)) {
                up_read(&mm->mmap_sem);
                schedule_timeout_idle(HZ);
-               goto unlock_oom;
+               return true;
        }
 
        /*
@@ -589,9 +570,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
        up_read(&mm->mmap_sem);
 
        trace_finish_task_reaping(tsk->pid);
-unlock_oom:
-       mutex_unlock(&oom_lock);
-       return ret;
+       return true;
 }
 
 #define MAX_OOM_REAP_RETRIES 10
-- 
Michal Hocko
SUSE Labs

Reply via email to