Re: [PATCH V2 9/9] vhost: do not return -EAGIAN for non blocking invalidation too early

2019-07-31 Thread Jason Wang



On 2019/7/31 下午5:59, Stefano Garzarella wrote:

A little typo in the title: s/EAGIAN/EAGAIN

Thanks,
Stefano



Right, will fix if need respin or Michael can help to fix.

Thanks




On Wed, Jul 31, 2019 at 04:46:55AM -0400, Jason Wang wrote:

Instead of returning -EAGAIN unconditionally, we'd better do that only
we're sure the range is overlapped with the metadata area.

Reported-by: Jason Gunthorpe 
Fixes: 7f466032dc9e ("vhost: access vq metadata through kernel virtual address")
Signed-off-by: Jason Wang 
---
  drivers/vhost/vhost.c | 32 +++-
  1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index fc2da8a0c671..96c6aeb1871f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -399,16 +399,19 @@ static void inline vhost_vq_sync_access(struct 
vhost_virtqueue *vq)
smp_mb();
  }
  
-static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,

- int index,
- unsigned long start,
- unsigned long end)
+static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
+int index,
+unsigned long start,
+unsigned long end,
+bool blockable)
  {
struct vhost_uaddr *uaddr = >uaddrs[index];
struct vhost_map *map;
  
  	if (!vhost_map_range_overlap(uaddr, start, end))

-   return;
+   return 0;
+   else if (!blockable)
+   return -EAGAIN;
  
  	spin_lock(>mmu_lock);

++vq->invalidate_count;
@@ -423,6 +426,8 @@ static void vhost_invalidate_vq_start(struct 
vhost_virtqueue *vq,
vhost_set_map_dirty(vq, map, index);
vhost_map_unprefetch(map);
}
+
+   return 0;
  }
  
  static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,

@@ -443,18 +448,19 @@ static int vhost_invalidate_range_start(struct 
mmu_notifier *mn,
  {
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
 mmu_notifier);
-   int i, j;
-
-   if (!mmu_notifier_range_blockable(range))
-   return -EAGAIN;
+   bool blockable = mmu_notifier_range_blockable(range);
+   int i, j, ret;
  
  	for (i = 0; i < dev->nvqs; i++) {

struct vhost_virtqueue *vq = dev->vqs[i];
  
-		for (j = 0; j < VHOST_NUM_ADDRS; j++)

-   vhost_invalidate_vq_start(vq, j,
- range->start,
- range->end);
+   for (j = 0; j < VHOST_NUM_ADDRS; j++) {
+   ret = vhost_invalidate_vq_start(vq, j,
+   range->start,
+   range->end, blockable);
+   if (ret)
+   return ret;
+   }
}
  
  	return 0;

--
2.18.1



Re: [PATCH V2 9/9] vhost: do not return -EAGIAN for non blocking invalidation too early

2019-07-31 Thread Stefano Garzarella
A little typo in the title: s/EAGIAN/EAGAIN

Thanks,
Stefano

On Wed, Jul 31, 2019 at 04:46:55AM -0400, Jason Wang wrote:
> Instead of returning -EAGAIN unconditionally, we'd better do that only
> we're sure the range is overlapped with the metadata area.
> 
> Reported-by: Jason Gunthorpe 
> Fixes: 7f466032dc9e ("vhost: access vq metadata through kernel virtual 
> address")
> Signed-off-by: Jason Wang 
> ---
>  drivers/vhost/vhost.c | 32 +++-
>  1 file changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index fc2da8a0c671..96c6aeb1871f 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -399,16 +399,19 @@ static void inline vhost_vq_sync_access(struct 
> vhost_virtqueue *vq)
>   smp_mb();
>  }
>  
> -static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> -   int index,
> -   unsigned long start,
> -   unsigned long end)
> +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> +  int index,
> +  unsigned long start,
> +  unsigned long end,
> +  bool blockable)
>  {
>   struct vhost_uaddr *uaddr = >uaddrs[index];
>   struct vhost_map *map;
>  
>   if (!vhost_map_range_overlap(uaddr, start, end))
> - return;
> + return 0;
> + else if (!blockable)
> + return -EAGAIN;
>  
>   spin_lock(>mmu_lock);
>   ++vq->invalidate_count;
> @@ -423,6 +426,8 @@ static void vhost_invalidate_vq_start(struct 
> vhost_virtqueue *vq,
>   vhost_set_map_dirty(vq, map, index);
>   vhost_map_unprefetch(map);
>   }
> +
> + return 0;
>  }
>  
>  static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
> @@ -443,18 +448,19 @@ static int vhost_invalidate_range_start(struct 
> mmu_notifier *mn,
>  {
>   struct vhost_dev *dev = container_of(mn, struct vhost_dev,
>mmu_notifier);
> - int i, j;
> -
> - if (!mmu_notifier_range_blockable(range))
> - return -EAGAIN;
> + bool blockable = mmu_notifier_range_blockable(range);
> + int i, j, ret;
>  
>   for (i = 0; i < dev->nvqs; i++) {
>   struct vhost_virtqueue *vq = dev->vqs[i];
>  
> - for (j = 0; j < VHOST_NUM_ADDRS; j++)
> - vhost_invalidate_vq_start(vq, j,
> -   range->start,
> -   range->end);
> + for (j = 0; j < VHOST_NUM_ADDRS; j++) {
> + ret = vhost_invalidate_vq_start(vq, j,
> + range->start,
> + range->end, blockable);
> + if (ret)
> + return ret;
> + }
>   }
>  
>   return 0;
> -- 
> 2.18.1
> 

-- 


[PATCH V2 9/9] vhost: do not return -EAGIAN for non blocking invalidation too early

2019-07-31 Thread Jason Wang
Instead of returning -EAGAIN unconditionally, we'd better do that only
we're sure the range is overlapped with the metadata area.

Reported-by: Jason Gunthorpe 
Fixes: 7f466032dc9e ("vhost: access vq metadata through kernel virtual address")
Signed-off-by: Jason Wang 
---
 drivers/vhost/vhost.c | 32 +++-
 1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index fc2da8a0c671..96c6aeb1871f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -399,16 +399,19 @@ static void inline vhost_vq_sync_access(struct 
vhost_virtqueue *vq)
smp_mb();
 }
 
-static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
+static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
+int index,
+unsigned long start,
+unsigned long end,
+bool blockable)
 {
struct vhost_uaddr *uaddr = >uaddrs[index];
struct vhost_map *map;
 
if (!vhost_map_range_overlap(uaddr, start, end))
-   return;
+   return 0;
+   else if (!blockable)
+   return -EAGAIN;
 
spin_lock(>mmu_lock);
++vq->invalidate_count;
@@ -423,6 +426,8 @@ static void vhost_invalidate_vq_start(struct 
vhost_virtqueue *vq,
vhost_set_map_dirty(vq, map, index);
vhost_map_unprefetch(map);
}
+
+   return 0;
 }
 
 static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
@@ -443,18 +448,19 @@ static int vhost_invalidate_range_start(struct 
mmu_notifier *mn,
 {
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
 mmu_notifier);
-   int i, j;
-
-   if (!mmu_notifier_range_blockable(range))
-   return -EAGAIN;
+   bool blockable = mmu_notifier_range_blockable(range);
+   int i, j, ret;
 
for (i = 0; i < dev->nvqs; i++) {
struct vhost_virtqueue *vq = dev->vqs[i];
 
-   for (j = 0; j < VHOST_NUM_ADDRS; j++)
-   vhost_invalidate_vq_start(vq, j,
- range->start,
- range->end);
+   for (j = 0; j < VHOST_NUM_ADDRS; j++) {
+   ret = vhost_invalidate_vq_start(vq, j,
+   range->start,
+   range->end, blockable);
+   if (ret)
+   return ret;
+   }
}
 
return 0;
-- 
2.18.1