Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Auger Eric
Hi Alex,

On 06/04/2017 16:43, Alex Williamson wrote:
> On Thu, 6 Apr 2017 10:23:59 +0200
> Auger Eric  wrote:
> 
>> Hi Alex,
>>
>> On 03/04/2017 22:02, Alex Williamson wrote:
>>> If the mmap_sem is contented then the vfio type1 IOMMU backend will
>>> defer locked page accounting updates to a workqueue task.  This has
>>> a few problems and depending on which side the user tries to play,
>>> they might be over-penalized for unmaps that haven't yet been
>>> accounted, or able to race the workqueue to enter more mappings
>>> than they're allowed.  It's not entirely clear what motivated this
>>> workqueue mechanism in the original vfio design, but it seems to
>>> introduce more problems than it solves, so remove it and update the
>>> callers to allow for failure.  We can also now recheck the limit
>>> under write lock to make sure we don't exceed it.
>>>
>>> Cc: sta...@vger.kernel.org
>>> Signed-off-by: Alex Williamson 
>>> ---
>>>
>>> Sergio had proposed a QEMU workaround for this:
>>> https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
>>> Clearly the bug is in the kernel and I'm more inclined to fix it via
>>> stable releases.  I also considered adding a flag in the type1 info
>>> structure to indicate synchronous lock accounting, but then second
>>> guessed using that to advertise the defect, especially if the workaround
>>> is only to pause and try again.  Comments/suggestions?  Thanks,
>>>
>>> Alex
>>>
>>>  drivers/vfio/vfio_iommu_type1.c |   99 
>>> ++-
>>>  1 file changed, 45 insertions(+), 54 deletions(-)
>>>
>>> diff --git a/drivers/vfio/vfio_iommu_type1.c 
>>> b/drivers/vfio/vfio_iommu_type1.c
>>> index 32d2633092a3..d93a88748d14 100644
>>> --- a/drivers/vfio/vfio_iommu_type1.c
>>> +++ b/drivers/vfio/vfio_iommu_type1.c
>>> @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma 
>>> *dma, struct vfio_pfn *vpfn)
>>> return ret;
>>>  }
>>>  
>>> -struct vwork {
>>> -   struct mm_struct*mm;
>>> -   longnpage;
>>> -   struct work_struct  work;
>>> -};
>>> -
>>> -/* delayed decrement/increment for locked_vm */
>>> -static void vfio_lock_acct_bg(struct work_struct *work)
>>> +static int vfio_lock_acct(struct task_struct *task, long npage)
>>>  {
>>> -   struct vwork *vwork = container_of(work, struct vwork, work);
>>> -   struct mm_struct *mm;
>>> -
>>> -   mm = vwork->mm;
>>> -   down_write(>mmap_sem);
>>> -   mm->locked_vm += vwork->npage;
>>> -   up_write(>mmap_sem);
>>> -   mmput(mm);
>>> -   kfree(vwork);
>>> -}
>>> -
>>> -static void vfio_lock_acct(struct task_struct *task, long npage)
>>> -{
>>> -   struct vwork *vwork;
>>> struct mm_struct *mm;
>>> bool is_current;
>>> +   int ret;
>>>  
>>> if (!npage)
>>> -   return;
>>> +   return 0;
>>>  
>>> is_current = (task->mm == current->mm);
>>>  
>>> mm = is_current ? task->mm : get_task_mm(task);
>>> if (!mm)
>>> -   return; /* process exited */
>>> +   return -ESRCH; /* process exited */
>>>  
>>> -   if (down_write_trylock(>mmap_sem)) {
>>> +   ret = down_write_killable(>mmap_sem);
>>> +   if (ret)  
>> don't you miss mmput(mm) if (!is_current)?
> 
> Yes!  I'm going to change this it if (!ret) {...
>  
>>> +   return ret;
> 
> Remove this
> 
>>> +
>>> +   if (npage < 0) {
>>> mm->locked_vm += npage;
>>> -   up_write(>mmap_sem);
>>> -   if (!is_current)
>>> -   mmput(mm);
>>> -   return;
>>> -   }
>>> +   } else {
>>> +   unsigned long limit;
>>> +
>>> +   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
>>> +   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>>  
>>> -   if (is_current) {
>>> -   mm = get_task_mm(task);
>>> -   if (!mm)
>>> -   return;
>>> +   if (mm->locked_vm + npage <= limit)
>>> +   mm->locked_vm += npage;
>>> +   else
>>> +   ret = -ENOMEM;
>>> }
>>>  
>>> -   /*
>>> -* Couldn't get mmap_sem lock, so must setup to update
>>> -* mm->locked_vm later. If locked_vm were atomic, we
>>> -* wouldn't need this silliness
>>> -*/
>>> -   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
>>> -   if (WARN_ON(!vwork)) {
>>> +   up_write(>mmap_sem);
> 
> And end the (!ret) branch here }
> 
> So if we don't get mmap_sem, we skip to here, mmput, and return the
> error.
> 
>>> +
>>> +   if (!is_current)
>>> mmput(mm);
>>> -   return;
>>> -   }
>>> -   INIT_WORK(>work, vfio_lock_acct_bg);
>>> -   vwork->mm = mm;
>>> -   vwork->npage = npage;
>>> -   schedule_work(>work);
>>> +
>>> +   return ret;
>>>  }
>>>  
>>>  /*
>>> @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
>>> long vaddr,
>>>  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long 
>>> vaddr,
>>>

Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Auger Eric
Hi Alex,

On 06/04/2017 16:43, Alex Williamson wrote:
> On Thu, 6 Apr 2017 10:23:59 +0200
> Auger Eric  wrote:
> 
>> Hi Alex,
>>
>> On 03/04/2017 22:02, Alex Williamson wrote:
>>> If the mmap_sem is contented then the vfio type1 IOMMU backend will
>>> defer locked page accounting updates to a workqueue task.  This has
>>> a few problems and depending on which side the user tries to play,
>>> they might be over-penalized for unmaps that haven't yet been
>>> accounted, or able to race the workqueue to enter more mappings
>>> than they're allowed.  It's not entirely clear what motivated this
>>> workqueue mechanism in the original vfio design, but it seems to
>>> introduce more problems than it solves, so remove it and update the
>>> callers to allow for failure.  We can also now recheck the limit
>>> under write lock to make sure we don't exceed it.
>>>
>>> Cc: sta...@vger.kernel.org
>>> Signed-off-by: Alex Williamson 
>>> ---
>>>
>>> Sergio had proposed a QEMU workaround for this:
>>> https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
>>> Clearly the bug is in the kernel and I'm more inclined to fix it via
>>> stable releases.  I also considered adding a flag in the type1 info
>>> structure to indicate synchronous lock accounting, but then second
>>> guessed using that to advertise the defect, especially if the workaround
>>> is only to pause and try again.  Comments/suggestions?  Thanks,
>>>
>>> Alex
>>>
>>>  drivers/vfio/vfio_iommu_type1.c |   99 
>>> ++-
>>>  1 file changed, 45 insertions(+), 54 deletions(-)
>>>
>>> diff --git a/drivers/vfio/vfio_iommu_type1.c 
>>> b/drivers/vfio/vfio_iommu_type1.c
>>> index 32d2633092a3..d93a88748d14 100644
>>> --- a/drivers/vfio/vfio_iommu_type1.c
>>> +++ b/drivers/vfio/vfio_iommu_type1.c
>>> @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma 
>>> *dma, struct vfio_pfn *vpfn)
>>> return ret;
>>>  }
>>>  
>>> -struct vwork {
>>> -   struct mm_struct*mm;
>>> -   longnpage;
>>> -   struct work_struct  work;
>>> -};
>>> -
>>> -/* delayed decrement/increment for locked_vm */
>>> -static void vfio_lock_acct_bg(struct work_struct *work)
>>> +static int vfio_lock_acct(struct task_struct *task, long npage)
>>>  {
>>> -   struct vwork *vwork = container_of(work, struct vwork, work);
>>> -   struct mm_struct *mm;
>>> -
>>> -   mm = vwork->mm;
>>> -   down_write(>mmap_sem);
>>> -   mm->locked_vm += vwork->npage;
>>> -   up_write(>mmap_sem);
>>> -   mmput(mm);
>>> -   kfree(vwork);
>>> -}
>>> -
>>> -static void vfio_lock_acct(struct task_struct *task, long npage)
>>> -{
>>> -   struct vwork *vwork;
>>> struct mm_struct *mm;
>>> bool is_current;
>>> +   int ret;
>>>  
>>> if (!npage)
>>> -   return;
>>> +   return 0;
>>>  
>>> is_current = (task->mm == current->mm);
>>>  
>>> mm = is_current ? task->mm : get_task_mm(task);
>>> if (!mm)
>>> -   return; /* process exited */
>>> +   return -ESRCH; /* process exited */
>>>  
>>> -   if (down_write_trylock(>mmap_sem)) {
>>> +   ret = down_write_killable(>mmap_sem);
>>> +   if (ret)  
>> don't you miss mmput(mm) if (!is_current)?
> 
> Yes!  I'm going to change this it if (!ret) {...
>  
>>> +   return ret;
> 
> Remove this
> 
>>> +
>>> +   if (npage < 0) {
>>> mm->locked_vm += npage;
>>> -   up_write(>mmap_sem);
>>> -   if (!is_current)
>>> -   mmput(mm);
>>> -   return;
>>> -   }
>>> +   } else {
>>> +   unsigned long limit;
>>> +
>>> +   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
>>> +   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>>>  
>>> -   if (is_current) {
>>> -   mm = get_task_mm(task);
>>> -   if (!mm)
>>> -   return;
>>> +   if (mm->locked_vm + npage <= limit)
>>> +   mm->locked_vm += npage;
>>> +   else
>>> +   ret = -ENOMEM;
>>> }
>>>  
>>> -   /*
>>> -* Couldn't get mmap_sem lock, so must setup to update
>>> -* mm->locked_vm later. If locked_vm were atomic, we
>>> -* wouldn't need this silliness
>>> -*/
>>> -   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
>>> -   if (WARN_ON(!vwork)) {
>>> +   up_write(>mmap_sem);
> 
> And end the (!ret) branch here }
> 
> So if we don't get mmap_sem, we skip to here, mmput, and return the
> error.
> 
>>> +
>>> +   if (!is_current)
>>> mmput(mm);
>>> -   return;
>>> -   }
>>> -   INIT_WORK(>work, vfio_lock_acct_bg);
>>> -   vwork->mm = mm;
>>> -   vwork->npage = npage;
>>> -   schedule_work(>work);
>>> +
>>> +   return ret;
>>>  }
>>>  
>>>  /*
>>> @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
>>> long vaddr,
>>>  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long 
>>> vaddr,
>>>   long npage, unsigned 

Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Alex Williamson
On Thu, 6 Apr 2017 10:23:59 +0200
Auger Eric  wrote:

> Hi Alex,
> 
> On 03/04/2017 22:02, Alex Williamson wrote:
> > If the mmap_sem is contented then the vfio type1 IOMMU backend will
> > defer locked page accounting updates to a workqueue task.  This has
> > a few problems and depending on which side the user tries to play,
> > they might be over-penalized for unmaps that haven't yet been
> > accounted, or able to race the workqueue to enter more mappings
> > than they're allowed.  It's not entirely clear what motivated this
> > workqueue mechanism in the original vfio design, but it seems to
> > introduce more problems than it solves, so remove it and update the
> > callers to allow for failure.  We can also now recheck the limit
> > under write lock to make sure we don't exceed it.
> > 
> > Cc: sta...@vger.kernel.org
> > Signed-off-by: Alex Williamson 
> > ---
> > 
> > Sergio had proposed a QEMU workaround for this:
> > https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
> > Clearly the bug is in the kernel and I'm more inclined to fix it via
> > stable releases.  I also considered adding a flag in the type1 info
> > structure to indicate synchronous lock accounting, but then second
> > guessed using that to advertise the defect, especially if the workaround
> > is only to pause and try again.  Comments/suggestions?  Thanks,
> > 
> > Alex
> > 
> >  drivers/vfio/vfio_iommu_type1.c |   99 
> > ++-
> >  1 file changed, 45 insertions(+), 54 deletions(-)
> > 
> > diff --git a/drivers/vfio/vfio_iommu_type1.c 
> > b/drivers/vfio/vfio_iommu_type1.c
> > index 32d2633092a3..d93a88748d14 100644
> > --- a/drivers/vfio/vfio_iommu_type1.c
> > +++ b/drivers/vfio/vfio_iommu_type1.c
> > @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma 
> > *dma, struct vfio_pfn *vpfn)
> > return ret;
> >  }
> >  
> > -struct vwork {
> > -   struct mm_struct*mm;
> > -   longnpage;
> > -   struct work_struct  work;
> > -};
> > -
> > -/* delayed decrement/increment for locked_vm */
> > -static void vfio_lock_acct_bg(struct work_struct *work)
> > +static int vfio_lock_acct(struct task_struct *task, long npage)
> >  {
> > -   struct vwork *vwork = container_of(work, struct vwork, work);
> > -   struct mm_struct *mm;
> > -
> > -   mm = vwork->mm;
> > -   down_write(>mmap_sem);
> > -   mm->locked_vm += vwork->npage;
> > -   up_write(>mmap_sem);
> > -   mmput(mm);
> > -   kfree(vwork);
> > -}
> > -
> > -static void vfio_lock_acct(struct task_struct *task, long npage)
> > -{
> > -   struct vwork *vwork;
> > struct mm_struct *mm;
> > bool is_current;
> > +   int ret;
> >  
> > if (!npage)
> > -   return;
> > +   return 0;
> >  
> > is_current = (task->mm == current->mm);
> >  
> > mm = is_current ? task->mm : get_task_mm(task);
> > if (!mm)
> > -   return; /* process exited */
> > +   return -ESRCH; /* process exited */
> >  
> > -   if (down_write_trylock(>mmap_sem)) {
> > +   ret = down_write_killable(>mmap_sem);
> > +   if (ret)  
> don't you miss mmput(mm) if (!is_current)?

Yes!  I'm going to change this it if (!ret) {...
 
> > +   return ret;

Remove this

> > +
> > +   if (npage < 0) {
> > mm->locked_vm += npage;
> > -   up_write(>mmap_sem);
> > -   if (!is_current)
> > -   mmput(mm);
> > -   return;
> > -   }
> > +   } else {
> > +   unsigned long limit;
> > +
> > +   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
> > +   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> >  
> > -   if (is_current) {
> > -   mm = get_task_mm(task);
> > -   if (!mm)
> > -   return;
> > +   if (mm->locked_vm + npage <= limit)
> > +   mm->locked_vm += npage;
> > +   else
> > +   ret = -ENOMEM;
> > }
> >  
> > -   /*
> > -* Couldn't get mmap_sem lock, so must setup to update
> > -* mm->locked_vm later. If locked_vm were atomic, we
> > -* wouldn't need this silliness
> > -*/
> > -   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
> > -   if (WARN_ON(!vwork)) {
> > +   up_write(>mmap_sem);

And end the (!ret) branch here }

So if we don't get mmap_sem, we skip to here, mmput, and return the
error.

> > +
> > +   if (!is_current)
> > mmput(mm);
> > -   return;
> > -   }
> > -   INIT_WORK(>work, vfio_lock_acct_bg);
> > -   vwork->mm = mm;
> > -   vwork->npage = npage;
> > -   schedule_work(>work);
> > +
> > +   return ret;
> >  }
> >  
> >  /*
> > @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
> > long vaddr,
> >  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long 
> > vaddr,
> >   long npage, unsigned long *pfn_base)
> >  {
> > -   

Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Alex Williamson
On Thu, 6 Apr 2017 10:23:59 +0200
Auger Eric  wrote:

> Hi Alex,
> 
> On 03/04/2017 22:02, Alex Williamson wrote:
> > If the mmap_sem is contented then the vfio type1 IOMMU backend will
> > defer locked page accounting updates to a workqueue task.  This has
> > a few problems and depending on which side the user tries to play,
> > they might be over-penalized for unmaps that haven't yet been
> > accounted, or able to race the workqueue to enter more mappings
> > than they're allowed.  It's not entirely clear what motivated this
> > workqueue mechanism in the original vfio design, but it seems to
> > introduce more problems than it solves, so remove it and update the
> > callers to allow for failure.  We can also now recheck the limit
> > under write lock to make sure we don't exceed it.
> > 
> > Cc: sta...@vger.kernel.org
> > Signed-off-by: Alex Williamson 
> > ---
> > 
> > Sergio had proposed a QEMU workaround for this:
> > https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
> > Clearly the bug is in the kernel and I'm more inclined to fix it via
> > stable releases.  I also considered adding a flag in the type1 info
> > structure to indicate synchronous lock accounting, but then second
> > guessed using that to advertise the defect, especially if the workaround
> > is only to pause and try again.  Comments/suggestions?  Thanks,
> > 
> > Alex
> > 
> >  drivers/vfio/vfio_iommu_type1.c |   99 
> > ++-
> >  1 file changed, 45 insertions(+), 54 deletions(-)
> > 
> > diff --git a/drivers/vfio/vfio_iommu_type1.c 
> > b/drivers/vfio/vfio_iommu_type1.c
> > index 32d2633092a3..d93a88748d14 100644
> > --- a/drivers/vfio/vfio_iommu_type1.c
> > +++ b/drivers/vfio/vfio_iommu_type1.c
> > @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma 
> > *dma, struct vfio_pfn *vpfn)
> > return ret;
> >  }
> >  
> > -struct vwork {
> > -   struct mm_struct*mm;
> > -   longnpage;
> > -   struct work_struct  work;
> > -};
> > -
> > -/* delayed decrement/increment for locked_vm */
> > -static void vfio_lock_acct_bg(struct work_struct *work)
> > +static int vfio_lock_acct(struct task_struct *task, long npage)
> >  {
> > -   struct vwork *vwork = container_of(work, struct vwork, work);
> > -   struct mm_struct *mm;
> > -
> > -   mm = vwork->mm;
> > -   down_write(>mmap_sem);
> > -   mm->locked_vm += vwork->npage;
> > -   up_write(>mmap_sem);
> > -   mmput(mm);
> > -   kfree(vwork);
> > -}
> > -
> > -static void vfio_lock_acct(struct task_struct *task, long npage)
> > -{
> > -   struct vwork *vwork;
> > struct mm_struct *mm;
> > bool is_current;
> > +   int ret;
> >  
> > if (!npage)
> > -   return;
> > +   return 0;
> >  
> > is_current = (task->mm == current->mm);
> >  
> > mm = is_current ? task->mm : get_task_mm(task);
> > if (!mm)
> > -   return; /* process exited */
> > +   return -ESRCH; /* process exited */
> >  
> > -   if (down_write_trylock(>mmap_sem)) {
> > +   ret = down_write_killable(>mmap_sem);
> > +   if (ret)  
> don't you miss mmput(mm) if (!is_current)?

Yes!  I'm going to change this it if (!ret) {...
 
> > +   return ret;

Remove this

> > +
> > +   if (npage < 0) {
> > mm->locked_vm += npage;
> > -   up_write(>mmap_sem);
> > -   if (!is_current)
> > -   mmput(mm);
> > -   return;
> > -   }
> > +   } else {
> > +   unsigned long limit;
> > +
> > +   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
> > +   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> >  
> > -   if (is_current) {
> > -   mm = get_task_mm(task);
> > -   if (!mm)
> > -   return;
> > +   if (mm->locked_vm + npage <= limit)
> > +   mm->locked_vm += npage;
> > +   else
> > +   ret = -ENOMEM;
> > }
> >  
> > -   /*
> > -* Couldn't get mmap_sem lock, so must setup to update
> > -* mm->locked_vm later. If locked_vm were atomic, we
> > -* wouldn't need this silliness
> > -*/
> > -   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
> > -   if (WARN_ON(!vwork)) {
> > +   up_write(>mmap_sem);

And end the (!ret) branch here }

So if we don't get mmap_sem, we skip to here, mmput, and return the
error.

> > +
> > +   if (!is_current)
> > mmput(mm);
> > -   return;
> > -   }
> > -   INIT_WORK(>work, vfio_lock_acct_bg);
> > -   vwork->mm = mm;
> > -   vwork->npage = npage;
> > -   schedule_work(>work);
> > +
> > +   return ret;
> >  }
> >  
> >  /*
> > @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
> > long vaddr,
> >  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long 
> > vaddr,
> >   long npage, unsigned long *pfn_base)
> >  {
> > -   unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> 

Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Auger Eric
Hi Alex,

On 03/04/2017 22:02, Alex Williamson wrote:
> If the mmap_sem is contented then the vfio type1 IOMMU backend will
> defer locked page accounting updates to a workqueue task.  This has
> a few problems and depending on which side the user tries to play,
> they might be over-penalized for unmaps that haven't yet been
> accounted, or able to race the workqueue to enter more mappings
> than they're allowed.  It's not entirely clear what motivated this
> workqueue mechanism in the original vfio design, but it seems to
> introduce more problems than it solves, so remove it and update the
> callers to allow for failure.  We can also now recheck the limit
> under write lock to make sure we don't exceed it.
> 
> Cc: sta...@vger.kernel.org
> Signed-off-by: Alex Williamson 
> ---
> 
> Sergio had proposed a QEMU workaround for this:
> https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
> Clearly the bug is in the kernel and I'm more inclined to fix it via
> stable releases.  I also considered adding a flag in the type1 info
> structure to indicate synchronous lock accounting, but then second
> guessed using that to advertise the defect, especially if the workaround
> is only to pause and try again.  Comments/suggestions?  Thanks,
> 
> Alex
> 
>  drivers/vfio/vfio_iommu_type1.c |   99 
> ++-
>  1 file changed, 45 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 32d2633092a3..d93a88748d14 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, 
> struct vfio_pfn *vpfn)
>   return ret;
>  }
>  
> -struct vwork {
> - struct mm_struct*mm;
> - longnpage;
> - struct work_struct  work;
> -};
> -
> -/* delayed decrement/increment for locked_vm */
> -static void vfio_lock_acct_bg(struct work_struct *work)
> +static int vfio_lock_acct(struct task_struct *task, long npage)
>  {
> - struct vwork *vwork = container_of(work, struct vwork, work);
> - struct mm_struct *mm;
> -
> - mm = vwork->mm;
> - down_write(>mmap_sem);
> - mm->locked_vm += vwork->npage;
> - up_write(>mmap_sem);
> - mmput(mm);
> - kfree(vwork);
> -}
> -
> -static void vfio_lock_acct(struct task_struct *task, long npage)
> -{
> - struct vwork *vwork;
>   struct mm_struct *mm;
>   bool is_current;
> + int ret;
>  
>   if (!npage)
> - return;
> + return 0;
>  
>   is_current = (task->mm == current->mm);
>  
>   mm = is_current ? task->mm : get_task_mm(task);
>   if (!mm)
> - return; /* process exited */
> + return -ESRCH; /* process exited */
>  
> - if (down_write_trylock(>mmap_sem)) {
> + ret = down_write_killable(>mmap_sem);
> + if (ret)
don't you miss mmput(mm) if (!is_current)?

> + return ret;
> +
> + if (npage < 0) {
>   mm->locked_vm += npage;
> - up_write(>mmap_sem);
> - if (!is_current)
> - mmput(mm);
> - return;
> - }
> + } else {
> + unsigned long limit;
> +
> + limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
> + task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>  
> - if (is_current) {
> - mm = get_task_mm(task);
> - if (!mm)
> - return;
> + if (mm->locked_vm + npage <= limit)
> + mm->locked_vm += npage;
> + else
> + ret = -ENOMEM;
>   }
>  
> - /*
> -  * Couldn't get mmap_sem lock, so must setup to update
> -  * mm->locked_vm later. If locked_vm were atomic, we
> -  * wouldn't need this silliness
> -  */
> - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
> - if (WARN_ON(!vwork)) {
> + up_write(>mmap_sem);
> +
> + if (!is_current)
>   mmput(mm);
> - return;
> - }
> - INIT_WORK(>work, vfio_lock_acct_bg);
> - vwork->mm = mm;
> - vwork->npage = npage;
> - schedule_work(>work);
> +
> + return ret;
>  }
>  
>  /*
> @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
> long vaddr,
>  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
> long npage, unsigned long *pfn_base)
>  {
> - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>   bool lock_cap = capable(CAP_IPC_LOCK);
>   long ret, pinned = 0, lock_acct = 0;
>   bool rsvd;
> @@ -442,8 +418,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, 
> unsigned long vaddr,
>   /* Lock all the consecutive pages from pfn_base 

Re: [PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-06 Thread Auger Eric
Hi Alex,

On 03/04/2017 22:02, Alex Williamson wrote:
> If the mmap_sem is contented then the vfio type1 IOMMU backend will
> defer locked page accounting updates to a workqueue task.  This has
> a few problems and depending on which side the user tries to play,
> they might be over-penalized for unmaps that haven't yet been
> accounted, or able to race the workqueue to enter more mappings
> than they're allowed.  It's not entirely clear what motivated this
> workqueue mechanism in the original vfio design, but it seems to
> introduce more problems than it solves, so remove it and update the
> callers to allow for failure.  We can also now recheck the limit
> under write lock to make sure we don't exceed it.
> 
> Cc: sta...@vger.kernel.org
> Signed-off-by: Alex Williamson 
> ---
> 
> Sergio had proposed a QEMU workaround for this:
> https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
> Clearly the bug is in the kernel and I'm more inclined to fix it via
> stable releases.  I also considered adding a flag in the type1 info
> structure to indicate synchronous lock accounting, but then second
> guessed using that to advertise the defect, especially if the workaround
> is only to pause and try again.  Comments/suggestions?  Thanks,
> 
> Alex
> 
>  drivers/vfio/vfio_iommu_type1.c |   99 
> ++-
>  1 file changed, 45 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 32d2633092a3..d93a88748d14 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, 
> struct vfio_pfn *vpfn)
>   return ret;
>  }
>  
> -struct vwork {
> - struct mm_struct*mm;
> - longnpage;
> - struct work_struct  work;
> -};
> -
> -/* delayed decrement/increment for locked_vm */
> -static void vfio_lock_acct_bg(struct work_struct *work)
> +static int vfio_lock_acct(struct task_struct *task, long npage)
>  {
> - struct vwork *vwork = container_of(work, struct vwork, work);
> - struct mm_struct *mm;
> -
> - mm = vwork->mm;
> - down_write(>mmap_sem);
> - mm->locked_vm += vwork->npage;
> - up_write(>mmap_sem);
> - mmput(mm);
> - kfree(vwork);
> -}
> -
> -static void vfio_lock_acct(struct task_struct *task, long npage)
> -{
> - struct vwork *vwork;
>   struct mm_struct *mm;
>   bool is_current;
> + int ret;
>  
>   if (!npage)
> - return;
> + return 0;
>  
>   is_current = (task->mm == current->mm);
>  
>   mm = is_current ? task->mm : get_task_mm(task);
>   if (!mm)
> - return; /* process exited */
> + return -ESRCH; /* process exited */
>  
> - if (down_write_trylock(>mmap_sem)) {
> + ret = down_write_killable(>mmap_sem);
> + if (ret)
don't you miss mmput(mm) if (!is_current)?

> + return ret;
> +
> + if (npage < 0) {
>   mm->locked_vm += npage;
> - up_write(>mmap_sem);
> - if (!is_current)
> - mmput(mm);
> - return;
> - }
> + } else {
> + unsigned long limit;
> +
> + limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
> + task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>  
> - if (is_current) {
> - mm = get_task_mm(task);
> - if (!mm)
> - return;
> + if (mm->locked_vm + npage <= limit)
> + mm->locked_vm += npage;
> + else
> + ret = -ENOMEM;
>   }
>  
> - /*
> -  * Couldn't get mmap_sem lock, so must setup to update
> -  * mm->locked_vm later. If locked_vm were atomic, we
> -  * wouldn't need this silliness
> -  */
> - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
> - if (WARN_ON(!vwork)) {
> + up_write(>mmap_sem);
> +
> + if (!is_current)
>   mmput(mm);
> - return;
> - }
> - INIT_WORK(>work, vfio_lock_acct_bg);
> - vwork->mm = mm;
> - vwork->npage = npage;
> - schedule_work(>work);
> +
> + return ret;
>  }
>  
>  /*
> @@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
> long vaddr,
>  static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
> long npage, unsigned long *pfn_base)
>  {
> - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
> + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
>   bool lock_cap = capable(CAP_IPC_LOCK);
>   long ret, pinned = 0, lock_acct = 0;
>   bool rsvd;
> @@ -442,8 +418,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, 
> unsigned long vaddr,
>   /* Lock all the consecutive pages from pfn_base */
>   for (vaddr += 

[PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-03 Thread Alex Williamson
If the mmap_sem is contented then the vfio type1 IOMMU backend will
defer locked page accounting updates to a workqueue task.  This has
a few problems and depending on which side the user tries to play,
they might be over-penalized for unmaps that haven't yet been
accounted, or able to race the workqueue to enter more mappings
than they're allowed.  It's not entirely clear what motivated this
workqueue mechanism in the original vfio design, but it seems to
introduce more problems than it solves, so remove it and update the
callers to allow for failure.  We can also now recheck the limit
under write lock to make sure we don't exceed it.

Cc: sta...@vger.kernel.org
Signed-off-by: Alex Williamson 
---

Sergio had proposed a QEMU workaround for this:
https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
Clearly the bug is in the kernel and I'm more inclined to fix it via
stable releases.  I also considered adding a flag in the type1 info
structure to indicate synchronous lock accounting, but then second
guessed using that to advertise the defect, especially if the workaround
is only to pause and try again.  Comments/suggestions?  Thanks,

Alex

 drivers/vfio/vfio_iommu_type1.c |   99 ++-
 1 file changed, 45 insertions(+), 54 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 32d2633092a3..d93a88748d14 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, 
struct vfio_pfn *vpfn)
return ret;
 }
 
-struct vwork {
-   struct mm_struct*mm;
-   longnpage;
-   struct work_struct  work;
-};
-
-/* delayed decrement/increment for locked_vm */
-static void vfio_lock_acct_bg(struct work_struct *work)
+static int vfio_lock_acct(struct task_struct *task, long npage)
 {
-   struct vwork *vwork = container_of(work, struct vwork, work);
-   struct mm_struct *mm;
-
-   mm = vwork->mm;
-   down_write(>mmap_sem);
-   mm->locked_vm += vwork->npage;
-   up_write(>mmap_sem);
-   mmput(mm);
-   kfree(vwork);
-}
-
-static void vfio_lock_acct(struct task_struct *task, long npage)
-{
-   struct vwork *vwork;
struct mm_struct *mm;
bool is_current;
+   int ret;
 
if (!npage)
-   return;
+   return 0;
 
is_current = (task->mm == current->mm);
 
mm = is_current ? task->mm : get_task_mm(task);
if (!mm)
-   return; /* process exited */
+   return -ESRCH; /* process exited */
 
-   if (down_write_trylock(>mmap_sem)) {
+   ret = down_write_killable(>mmap_sem);
+   if (ret)
+   return ret;
+
+   if (npage < 0) {
mm->locked_vm += npage;
-   up_write(>mmap_sem);
-   if (!is_current)
-   mmput(mm);
-   return;
-   }
+   } else {
+   unsigned long limit;
+
+   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
+   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
-   if (is_current) {
-   mm = get_task_mm(task);
-   if (!mm)
-   return;
+   if (mm->locked_vm + npage <= limit)
+   mm->locked_vm += npage;
+   else
+   ret = -ENOMEM;
}
 
-   /*
-* Couldn't get mmap_sem lock, so must setup to update
-* mm->locked_vm later. If locked_vm were atomic, we
-* wouldn't need this silliness
-*/
-   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
-   if (WARN_ON(!vwork)) {
+   up_write(>mmap_sem);
+
+   if (!is_current)
mmput(mm);
-   return;
-   }
-   INIT_WORK(>work, vfio_lock_acct_bg);
-   vwork->mm = mm;
-   vwork->npage = npage;
-   schedule_work(>work);
+
+   return ret;
 }
 
 /*
@@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
long vaddr,
 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
  long npage, unsigned long *pfn_base)
 {
-   unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+   unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK);
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
@@ -442,8 +418,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, 
unsigned long vaddr,
/* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
-   unsigned long pfn = 0;
-
ret = vaddr_get_pfn(current->mm, vaddr, 

[PATCH] vfio/type1: Remove locked page accounting workqueue

2017-04-03 Thread Alex Williamson
If the mmap_sem is contented then the vfio type1 IOMMU backend will
defer locked page accounting updates to a workqueue task.  This has
a few problems and depending on which side the user tries to play,
they might be over-penalized for unmaps that haven't yet been
accounted, or able to race the workqueue to enter more mappings
than they're allowed.  It's not entirely clear what motivated this
workqueue mechanism in the original vfio design, but it seems to
introduce more problems than it solves, so remove it and update the
callers to allow for failure.  We can also now recheck the limit
under write lock to make sure we don't exceed it.

Cc: sta...@vger.kernel.org
Signed-off-by: Alex Williamson 
---

Sergio had proposed a QEMU workaround for this:
https://lists.nongnu.org/archive/html/qemu-devel/2017-04/msg00244.html
Clearly the bug is in the kernel and I'm more inclined to fix it via
stable releases.  I also considered adding a flag in the type1 info
structure to indicate synchronous lock accounting, but then second
guessed using that to advertise the defect, especially if the workaround
is only to pause and try again.  Comments/suggestions?  Thanks,

Alex

 drivers/vfio/vfio_iommu_type1.c |   99 ++-
 1 file changed, 45 insertions(+), 54 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 32d2633092a3..d93a88748d14 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -246,69 +246,45 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, 
struct vfio_pfn *vpfn)
return ret;
 }
 
-struct vwork {
-   struct mm_struct*mm;
-   longnpage;
-   struct work_struct  work;
-};
-
-/* delayed decrement/increment for locked_vm */
-static void vfio_lock_acct_bg(struct work_struct *work)
+static int vfio_lock_acct(struct task_struct *task, long npage)
 {
-   struct vwork *vwork = container_of(work, struct vwork, work);
-   struct mm_struct *mm;
-
-   mm = vwork->mm;
-   down_write(>mmap_sem);
-   mm->locked_vm += vwork->npage;
-   up_write(>mmap_sem);
-   mmput(mm);
-   kfree(vwork);
-}
-
-static void vfio_lock_acct(struct task_struct *task, long npage)
-{
-   struct vwork *vwork;
struct mm_struct *mm;
bool is_current;
+   int ret;
 
if (!npage)
-   return;
+   return 0;
 
is_current = (task->mm == current->mm);
 
mm = is_current ? task->mm : get_task_mm(task);
if (!mm)
-   return; /* process exited */
+   return -ESRCH; /* process exited */
 
-   if (down_write_trylock(>mmap_sem)) {
+   ret = down_write_killable(>mmap_sem);
+   if (ret)
+   return ret;
+
+   if (npage < 0) {
mm->locked_vm += npage;
-   up_write(>mmap_sem);
-   if (!is_current)
-   mmput(mm);
-   return;
-   }
+   } else {
+   unsigned long limit;
+
+   limit = is_current ? rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT :
+   task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
-   if (is_current) {
-   mm = get_task_mm(task);
-   if (!mm)
-   return;
+   if (mm->locked_vm + npage <= limit)
+   mm->locked_vm += npage;
+   else
+   ret = -ENOMEM;
}
 
-   /*
-* Couldn't get mmap_sem lock, so must setup to update
-* mm->locked_vm later. If locked_vm were atomic, we
-* wouldn't need this silliness
-*/
-   vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
-   if (WARN_ON(!vwork)) {
+   up_write(>mmap_sem);
+
+   if (!is_current)
mmput(mm);
-   return;
-   }
-   INIT_WORK(>work, vfio_lock_acct_bg);
-   vwork->mm = mm;
-   vwork->npage = npage;
-   schedule_work(>work);
+
+   return ret;
 }
 
 /*
@@ -405,7 +381,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
long vaddr,
 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
  long npage, unsigned long *pfn_base)
 {
-   unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+   unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK);
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
@@ -442,8 +418,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, 
unsigned long vaddr,
/* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
-   unsigned long pfn = 0;
-
ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, );
if