Hi,

Any comments on this?

On Tue, May 14, 2019 at 05:29:55PM +0300, Mike Rapoport wrote:
> When get_user_pages*() is called with pages = NULL, the processing of
> VM_FAULT_RETRY terminates early without actually retrying to fault-in all
> the pages.
> 
> If the pages in the requested range belong to a VMA that has userfaultfd
> registered, handle_userfault() returns VM_FAULT_RETRY *after* user space
> has populated the page, but for the gup pre-fault case there's no actual
> retry and the caller will get no pages although they are present.
> 
> This issue was uncovered when running post-copy memory restore in CRIU
> after commit d9c9ce34ed5c ("x86/fpu: Fault-in user stack if
> copy_fpstate_to_sigframe() fails").
> 
> After this change, the copying of FPU state to the sigframe switched from
> copy_to_user() variants which caused a real page fault to get_user_pages()
> with pages parameter set to NULL.
> 
> In post-copy mode of CRIU, the destination memory is managed with
> userfaultfd and lack of the retry for pre-fault case in get_user_pages()
> causes a crash of the restored process.
> 
> Making the pre-fault behavior of get_user_pages() the same as the "normal"
> one fixes the issue.
> 
> Fixes: d9c9ce34ed5c ("x86/fpu: Fault-in user stack if 
> copy_fpstate_to_sigframe() fails")
> Signed-off-by: Mike Rapoport <[email protected]>
> ---
>  mm/gup.c | 15 ++++++++-------
>  1 file changed, 8 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 91819b8..c32ae5a 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -936,10 +936,6 @@ static __always_inline long 
> __get_user_pages_locked(struct task_struct *tsk,
>                       BUG_ON(ret >= nr_pages);
>               }
>  
> -             if (!pages)
> -                     /* If it's a prefault don't insist harder */
> -                     return ret;
> -
>               if (ret > 0) {
>                       nr_pages -= ret;
>                       pages_done += ret;
> @@ -955,8 +951,12 @@ static __always_inline long 
> __get_user_pages_locked(struct task_struct *tsk,
>                               pages_done = ret;
>                       break;
>               }
> -             /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
> -             pages += ret;
> +             /*
> +              * VM_FAULT_RETRY triggered, so seek to the faulting offset.
> +              * For the prefault case (!pages) we only update counts.
> +              */
> +             if (likely(pages))
> +                     pages += ret;
>               start += ret << PAGE_SHIFT;
>  
>               /*
> @@ -979,7 +979,8 @@ static __always_inline long 
> __get_user_pages_locked(struct task_struct *tsk,
>               pages_done++;
>               if (!nr_pages)
>                       break;
> -             pages++;
> +             if (likely(pages))
> +                     pages++;
>               start += PAGE_SIZE;
>       }
>       if (lock_dropped && *locked) {
> -- 
> 2.7.4
> 

-- 
Sincerely yours,
Mike.

Reply via email to