[PATCH v3 08/18] drm/ttm: use gem vma_node

2019-06-28 Thread Gerd Hoffmann
Drop vma_node from ttm_buffer_object, use the gem struct
(base.vma_node) instead.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Christian König 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +-
 drivers/gpu/drm/qxl/qxl_object.h   | 2 +-
 drivers/gpu/drm/radeon/radeon_object.h | 2 +-
 drivers/gpu/drm/virtio/virtgpu_drv.h   | 2 +-
 include/drm/ttm/ttm_bo_api.h   | 4 
 drivers/gpu/drm/drm_gem_vram_helper.c  | 5 +
 drivers/gpu/drm/nouveau/nouveau_display.c  | 2 +-
 drivers/gpu/drm/nouveau/nouveau_gem.c  | 2 +-
 drivers/gpu/drm/ttm/ttm_bo.c   | 8 
 drivers/gpu/drm/ttm/ttm_bo_util.c  | 2 +-
 drivers/gpu/drm/ttm/ttm_bo_vm.c| 9 +
 drivers/gpu/drm/virtio/virtgpu_prime.c | 3 ---
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c| 4 ++--
 14 files changed, 21 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a80a9972ad16..a68d85bd8fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -191,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct 
amdgpu_bo *bo)
  */
 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
 {
-   return drm_vma_node_offset_addr(>tbo.vma_node);
+   return drm_vma_node_offset_addr(>tbo.base.vma_node);
 }
 
 /**
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b812d4ae9d0d..8ae54ba7857c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
 {
-   return drm_vma_node_offset_addr(>tbo.vma_node);
+   return drm_vma_node_offset_addr(>tbo.base.vma_node);
 }
 
 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
b/drivers/gpu/drm/radeon/radeon_object.h
index 9ffd8215d38a..e5554bf9140e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct 
radeon_bo *bo)
  */
 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
 {
-   return drm_vma_node_offset_addr(>tbo.vma_node);
+   return drm_vma_node_offset_addr(>tbo.base.vma_node);
 }
 
 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9e2d3062b01d..7146ba00fd5b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -396,7 +396,7 @@ static inline void virtio_gpu_object_unref(struct 
virtio_gpu_object **bo)
 
 static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
 {
-   return drm_vma_node_offset_addr(>tbo.vma_node);
+   return drm_vma_node_offset_addr(>tbo.base.vma_node);
 }
 
 static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index fa050f0328ab..7ffc50a3303d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -152,7 +152,6 @@ struct ttm_tt;
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
  * @moving: Fence set when BO is moving
- * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -219,9 +218,6 @@ struct ttm_buffer_object {
 */
 
struct dma_fence *moving;
-
-   struct drm_vma_offset_node vma_node;
-
unsigned priority;
 
/**
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c 
b/drivers/gpu/drm/drm_gem_vram_helper.c
index 61d9520cc15f..2e474dee30df 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(drm_gem_vram_put);
  */
 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
 {
-   return drm_vma_node_offset_addr(>bo.vma_node);
+   return drm_vma_node_offset_addr(>bo.base.vma_node);
 }
 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
 
@@ -633,9 +633,6 @@ EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
 int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
   struct vm_area_struct *vma)
 {
-   struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
-   gbo->bo.base.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
return drm_gem_prime_mmap(gem, vma);
 }
 EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c 

[PATCH v3 03/18] drm/qxl: use embedded gem object

2019-06-28 Thread Gerd Hoffmann
Drop drm_gem_object from qxl_bo, use the
ttm_buffer_object.base instead.

Signed-off-by: Gerd Hoffmann 
Acked-by: Christian König 
---
 drivers/gpu/drm/qxl/qxl_drv.h |  6 +++---
 drivers/gpu/drm/qxl/qxl_object.h  |  4 ++--
 drivers/gpu/drm/qxl/qxl_cmd.c |  4 ++--
 drivers/gpu/drm/qxl/qxl_debugfs.c |  2 +-
 drivers/gpu/drm/qxl/qxl_display.c |  8 
 drivers/gpu/drm/qxl/qxl_gem.c |  2 +-
 drivers/gpu/drm/qxl/qxl_object.c  | 20 ++--
 drivers/gpu/drm/qxl/qxl_release.c |  2 +-
 drivers/gpu/drm/qxl/qxl_ttm.c |  4 ++--
 9 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 2896bb6fdbf4..b80d4a4361cd 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -72,12 +72,13 @@ extern int qxl_max_ioctls;
QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
 
 struct qxl_bo {
+   struct ttm_buffer_objecttbo;
+
/* Protected by gem.mutex */
struct list_headlist;
/* Protected by tbo.reserved */
struct ttm_placeplacements[3];
struct ttm_placementplacement;
-   struct ttm_buffer_objecttbo;
struct ttm_bo_kmap_obj  kmap;
unsigned int pin_count;
void*kptr;
@@ -85,7 +86,6 @@ struct qxl_bo {
int type;
 
/* Constant after initialization */
-   struct drm_gem_object   gem_base;
unsigned int is_primary:1; /* is this now a primary surface */
unsigned int is_dumb:1;
struct qxl_bo *shadow;
@@ -94,7 +94,7 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_release *surf_create;
 };
-#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base)
 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
 
 struct qxl_gem {
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 255b914e2a7b..b812d4ae9d0d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -34,7 +34,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool 
no_wait)
r = ttm_bo_reserve(>tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
-   struct drm_device *ddev = bo->gem_base.dev;
+   struct drm_device *ddev = bo->tbo.base.dev;
 
dev_err(ddev->dev, "%p reserve failed\n", bo);
}
@@ -71,7 +71,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 
*mem_type,
r = ttm_bo_reserve(>tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
-   struct drm_device *ddev = bo->gem_base.dev;
+   struct drm_device *ddev = bo->tbo.base.dev;
 
dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 0a2e51af1230..498000899bfd 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -375,7 +375,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
 {
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev->primary_bo->is_primary = false;
-   drm_gem_object_put_unlocked(>primary_bo->gem_base);
+   drm_gem_object_put_unlocked(>primary_bo->tbo.base);
qdev->primary_bo = NULL;
 }
 
@@ -402,7 +402,7 @@ void qxl_io_create_primary(struct qxl_device *qdev, struct 
qxl_bo *bo)
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
qdev->primary_bo = bo;
qdev->primary_bo->is_primary = true;
-   drm_gem_object_get(>primary_bo->gem_base);
+   drm_gem_object_get(>primary_bo->tbo.base);
 }
 
 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c 
b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 118422549828..013b938986c7 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -66,7 +66,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
rcu_read_unlock();
 
seq_printf(m, "size %ld, pc %d, num releases %d\n",
-  (unsigned long)bo->gem_base.size,
+  (unsigned long)bo->tbo.base.size,
   bo->pin_count, rel);
}
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c 
b/drivers/gpu/drm/qxl/qxl_display.c
index 8b319ebbb0fb..93e31d062854 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -794,7 +794,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
qdev->dumb_shadow_bo->surf.height != surf.height) {

[PATCH v3 16/18] drm/qxl: switch driver from bo->resv to bo->base.resv

2019-06-28 Thread Gerd Hoffmann
Signed-off-by: Gerd Hoffmann 
Acked-by: Christian König 
---
 drivers/gpu/drm/qxl/qxl_debugfs.c | 2 +-
 drivers/gpu/drm/qxl/qxl_release.c | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c 
b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 013b938986c7..f30460782f05 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
int rel;
 
rcu_read_lock();
-   fobj = rcu_dereference(bo->tbo.resv->fence);
+   fobj = rcu_dereference(bo->tbo.base.resv->fence);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
 
diff --git a/drivers/gpu/drm/qxl/qxl_release.c 
b/drivers/gpu/drm/qxl/qxl_release.c
index 32126e8836b3..1b7be82c8e68 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -234,7 +234,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
 
-   ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
+   ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
if (ret)
return ret;
 
@@ -454,9 +454,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release 
*release)
list_for_each_entry(entry, >bos, head) {
bo = entry->bo;
 
-   reservation_object_add_shared_fence(bo->resv, >base);
+   reservation_object_add_shared_fence(bo->base.resv, 
>base);
ttm_bo_add_to_lru(bo);
-   reservation_object_unlock(bo->resv);
+   reservation_object_unlock(bo->base.resv);
}
spin_unlock(>lru_lock);
ww_acquire_fini(>ticket);
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

[PATCH v3 17/18] drm/virtio: switch driver from bo->resv to bo->base.resv

2019-06-28 Thread Gerd Hoffmann
Signed-off-by: Gerd Hoffmann 
Acked-by: Christian König 
---
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 4 ++--
 drivers/gpu/drm/virtio/virtgpu_plane.c | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index ac60be9b5c19..4adfced8df2c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -394,7 +394,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct 
drm_device *dev,
(vgdev, qobj->hw_res_handle,
 vfpriv->ctx_id, offset, args->level,
 , fence);
-   reservation_object_add_excl_fence(qobj->tbo.resv,
+   reservation_object_add_excl_fence(qobj->tbo.base.resv,
  >f);
 
dma_fence_put(>f);
@@ -448,7 +448,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct 
drm_device *dev, void *data,
(vgdev, qobj,
 vfpriv ? vfpriv->ctx_id : 0, offset,
 args->level, , fence);
-   reservation_object_add_excl_fence(qobj->tbo.resv,
+   reservation_object_add_excl_fence(qobj->tbo.base.resv,
  >f);
dma_fence_put(>f);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 024c2aa0c929..328e28081d9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -210,7 +210,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane 
*plane,
 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
-   reservation_object_add_excl_fence(bo->tbo.resv,
+   reservation_object_add_excl_fence(bo->tbo.base.resv,
  >fence->f);
dma_fence_put(>fence->f);
vgfb->fence = NULL;
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v4 02/12] drm/virtio: switch virtio_gpu_wait_ioctl() to gem helper.

2019-06-28 Thread Gerd Hoffmann
On Wed, Jun 26, 2019 at 04:55:20PM -0700, Chia-I Wu wrote:
> On Wed, Jun 19, 2019 at 11:07 PM Gerd Hoffmann  wrote:
> >
> > Use drm_gem_reservation_object_wait() in virtio_gpu_wait_ioctl().
> > This also makes the ioctl run lockless.
> The userspace has a BO cache to avoid freeing BOs immediately but to
> reuse them on next allocations.  The BO cache checks if a BO is busy
> before reuse, and I am seeing a big negative perf impact because of
> slow virtio_gpu_wait_ioctl.  I wonder if this helps.

Could help indeed (assuming it checks with NOWAIT).

How many objects does userspace check in one go typically?  Maybe it
makes sense to add an ioctl which checks a list, to reduce the system
call overhead.

> > +   if (args->flags & VIRTGPU_WAIT_NOWAIT) {
> > +   obj = drm_gem_object_lookup(file, args->handle);
> Don't we need a NULL check here?

Yes, we do.  Will fix.

thanks,
  Gerd

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v4 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing

2019-06-28 Thread Gerd Hoffmann
  Hi,

> > --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> > +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> > @@ -120,9 +120,9 @@ struct virtio_gpu_vbuffer {
> >
> > char *resp_buf;
> > int resp_size;
> > -
> > virtio_gpu_resp_cb resp_cb;
> >
> > +   struct virtio_gpu_object_array *objs;
> This can use a comment (e.g., objects referenced by the vbuffer)

IMHO this is obvious ...

> >  void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
> >void *data, uint32_t data_size,
> > -  uint32_t ctx_id, struct virtio_gpu_fence *fence);
> > +  uint32_t ctx_id, struct virtio_gpu_fence *fence,
> > +  struct virtio_gpu_object_array *objs);
> Can we keep fence, which is updated, as the last parameter?

Fixed.

> > +   if (buflist) {
> > +   for (i = 0; i < exbuf->num_bo_handles; i++)
> > +   
> > reservation_object_add_excl_fence(buflist->objs[i]->resv,
> > + _fence->f);
> > +   drm_gem_unlock_reservations(buflist->objs, buflist->nents,
> > +   );
> > +   }
> We used to unlock after virtio_gpu_cmd_submit.
> 
> I guess, the fence is considered signaled (because its seqno is still
> 0) until after virtio_gpu_cmd_submit.  We probably don't want other
> processes to see the semi-initialized fence.

Good point.  Fixed.

> >  out_memdup:
> > kfree(buf);
> >  out_unresv:
> > -   ttm_eu_backoff_reservation(, _list);
> > -out_free:
> > -   virtio_gpu_unref_list(_list);
> Keeping out_free to free buflist seems just fine.

We don't need the separate label though ...

> > +   drm_gem_unlock_reservations(buflist->objs, buflist->nents, );
> >  out_unused_fd:
> > kvfree(bo_handles);
> > -   kvfree(buflist);
> > +   if (buflist)
> > +   virtio_gpu_array_put_free(buflist);

... and the buflist is released here if needed.

But we need if (buflist) for drm_gem_unlock_reservations too.  Fixed.

> > -
> > -   list_del(>list);
> > -   free_vbuf(vgdev, entry);
> > }
> > wake_up(>ctrlq.ack_queue);
> >
> > if (fence_id)
> > virtio_gpu_fence_event_process(vgdev, fence_id);
> > +
> > +   list_for_each_entry_safe(entry, tmp, _list, list) {
> > +   if (entry->objs)
> > +   virtio_gpu_array_put_free(entry->objs);
> > +   list_del(>list);
> We are clearing the list.  I guess list_del is not needed.
> > +   free_vbuf(vgdev, entry);

This just shuffles around the code.  Dropping list_del() is unrelated
and should be a separate patch.

Beside that I'm not sure it actually can be dropped.  free_vbuf() will
not kfree() the vbuf but keep it cached in a freelist instead.

cheers,
  Gerd

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 05/12] drm/virtio: drop no_wait argument from virtio_gpu_object_reserve

2019-06-28 Thread Gerd Hoffmann
All callers pass no_wait = false.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   | 5 ++---
 drivers/gpu/drm/virtio/virtgpu_gem.c   | 4 ++--
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 4 ++--
 3 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 2cd96256ba37..06cc0e961df6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -398,12 +398,11 @@ static inline u64 virtio_gpu_object_mmap_offset(struct 
virtio_gpu_object *bo)
return drm_vma_node_offset_addr(>tbo.vma_node);
 }
 
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
-bool no_wait)
+static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo)
 {
int r;
 
-   r = ttm_bo_reserve(>tbo, true, no_wait, NULL);
+   r = ttm_bo_reserve(>tbo, true, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct virtio_gpu_device *qdev =
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c 
b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1e49e08dd545..9c9ad3b14080 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -140,7 +140,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
if (!vgdev->has_virgl_3d)
return 0;
 
-   r = virtio_gpu_object_reserve(qobj, false);
+   r = virtio_gpu_object_reserve(qobj);
if (r)
return r;
 
@@ -161,7 +161,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
if (!vgdev->has_virgl_3d)
return;
 
-   r = virtio_gpu_object_reserve(qobj, false);
+   r = virtio_gpu_object_reserve(qobj);
if (r)
return;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c06dde541491..0caff3fa623e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -375,7 +375,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct 
drm_device *dev,
 
qobj = gem_to_virtio_gpu_obj(gobj);
 
-   ret = virtio_gpu_object_reserve(qobj, false);
+   ret = virtio_gpu_object_reserve(qobj);
if (ret)
goto out;
 
@@ -425,7 +425,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct 
drm_device *dev, void *data,
 
qobj = gem_to_virtio_gpu_obj(gobj);
 
-   ret = virtio_gpu_object_reserve(qobj, false);
+   ret = virtio_gpu_object_reserve(qobj);
if (ret)
goto out;
 
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 12/12] drm/virtio: remove virtio_gpu_alloc_object

2019-06-28 Thread Gerd Hoffmann
Thin wrapper around virtio_gpu_object_create(),
but calling that directly works equally well.

Signed-off-by: Gerd Hoffmann 
Acked-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  4 
 drivers/gpu/drm/virtio/virtgpu_gem.c   | 23 ---
 drivers/gpu/drm/virtio/virtgpu_ioctl.c |  6 +++---
 3 files changed, 7 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 78f4d6211812..8b60bc39d00b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -228,10 +228,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
   struct drm_file *file);
 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
 struct drm_file *file);
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
-   struct virtio_gpu_object_params *params,
-   struct virtio_gpu_fence *fence);
 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c 
b/drivers/gpu/drm/virtio/virtgpu_gem.c
index fc4aba8f7bf8..61153fbedbd5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -26,35 +26,20 @@
 #include 
 #include "virtgpu_drv.h"
 
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
-   struct virtio_gpu_object_params *params,
-   struct virtio_gpu_fence *fence)
-{
-   struct virtio_gpu_device *vgdev = dev->dev_private;
-   struct virtio_gpu_object *obj;
-   int ret;
-
-   ret = virtio_gpu_object_create(vgdev, params, , fence);
-   if (ret)
-   return ERR_PTR(ret);
-
-   return obj;
-}
-
 int virtio_gpu_gem_create(struct drm_file *file,
  struct drm_device *dev,
  struct virtio_gpu_object_params *params,
  struct drm_gem_object **obj_p,
  uint32_t *handle_p)
 {
+   struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
 
-   obj = virtio_gpu_alloc_object(dev, params, NULL);
-   if (IS_ERR(obj))
-   return PTR_ERR(obj);
+   ret = virtio_gpu_object_create(vgdev, params, , NULL);
+   if (ret < 0)
+   return ret;
 
ret = drm_gem_handle_create(file, >base.base, );
if (ret) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 43380786bb96..fa9fb2d50b56 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -266,10 +266,10 @@ static int virtio_gpu_resource_create_ioctl(struct 
drm_device *dev, void *data,
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence)
return -ENOMEM;
-   qobj = virtio_gpu_alloc_object(dev, , fence);
+   ret = virtio_gpu_object_create(vgdev, , , fence);
dma_fence_put(>f);
-   if (IS_ERR(qobj))
-   return PTR_ERR(qobj);
+   if (ret < 0)
+   return ret;
obj = >base.base;
 
ret = drm_gem_handle_create(file_priv, obj, );
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 07/12] drm/virtio: add virtio_gpu_object_array & helpers

2019-06-28 Thread Gerd Hoffmann
Some helper functions to manage an array of gem objects.

v5: some small optimizations (Chia-I Wu).
v4: make them virtio-private instead of generic helpers.

Signed-off-by: Gerd Hoffmann 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h | 10 +++
 drivers/gpu/drm/virtio/virtgpu_gem.c | 45 
 2 files changed, 55 insertions(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 07f6001ea91e..98d646789d23 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -84,6 +84,11 @@ struct virtio_gpu_object {
 #define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
 
+struct virtio_gpu_object_array {
+   u32 nents;
+   struct drm_gem_object *objs[];
+};
+
 struct virtio_gpu_vbuffer;
 struct virtio_gpu_device;
 
@@ -251,6 +256,11 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
  struct drm_device *dev,
  uint32_t handle, uint64_t *offset_p);
 
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 
nents);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+
 /* virtio vg */
 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c 
b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 9c9ad3b14080..74ba3f0db4f1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -169,3 +169,48 @@ void virtio_gpu_gem_object_close(struct drm_gem_object 
*obj,
qobj->hw_res_handle);
virtio_gpu_object_unreserve(qobj);
 }
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+   struct virtio_gpu_object_array *objs;
+   size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+   objs = kmalloc(size, GFP_KERNEL);
+   return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+   kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 
nents)
+{
+   struct virtio_gpu_object_array *objs;
+   u32 i;
+
+   objs = virtio_gpu_array_alloc(nents);
+   if (!objs)
+   return NULL;
+
+   for (i = 0; i < nents; i++) {
+   objs->nents = i;
+   objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+   if (!objs->objs[i]) {
+   virtio_gpu_array_put_free(objs);
+   return NULL;
+   }
+   }
+   objs->nents = i;
+   return objs;
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+   u32 i;
+
+   for (i = 0; i < objs->nents; i++)
+   drm_gem_object_put_unlocked(objs->objs[i]);
+   virtio_gpu_array_free(objs);
+}
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v4 11/12] drm/virtio: switch from ttm to gem shmem helpers

2019-06-28 Thread Gerd Hoffmann
> >  static inline struct virtio_gpu_object*
> >  virtio_gpu_object_ref(struct virtio_gpu_object *bo)

> The last users of these two helpers are removed with this patch.  We
> can remove them.

patch 12/12 does that.

> > +   bo = gem_to_virtio_gpu_obj(_obj->base);
> > +   bo->base.base.funcs = _gpu_gem_funcs;
> Move this to virtio_gpu_create_object.

Fixed.

> > +   ret = drm_gem_shmem_pin(>base.base);
> The bo is attached for its entire lifetime, at least currently.  Maybe
> we can use drm_gem_shmem_get_pages_sgt (and get rid of obj->pages).

Already checked this.
We can't due to the iommu quirks.

cheers,
  Gerd

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 10/12] drm/virtio: drop virtio_gpu_object_list_validate/virtio_gpu_unref_list

2019-06-28 Thread Gerd Hoffmann
No users left.

Signed-off-by: Gerd Hoffmann 
Acked-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  3 --
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 39 --
 2 files changed, 42 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index c4b266b6f731..c6c0a66ccf6b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -222,9 +222,6 @@ struct virtio_gpu_fpriv {
 /* virtio_ioctl.c */
 #define DRM_VIRTIO_NUM_IOCTLS 10
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
-   struct list_head *head);
-void virtio_gpu_unref_list(struct list_head *head);
 
 /* virtio_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index ae6830aa38c9..688b0b70d3d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,45 +54,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void 
*data,
 _gpu_map->offset);
 }
 
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
-   struct list_head *head)
-{
-   struct ttm_operation_ctx ctx = { false, false };
-   struct ttm_validate_buffer *buf;
-   struct ttm_buffer_object *bo;
-   struct virtio_gpu_object *qobj;
-   int ret;
-
-   ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
-   if (ret != 0)
-   return ret;
-
-   list_for_each_entry(buf, head, head) {
-   bo = buf->bo;
-   qobj = container_of(bo, struct virtio_gpu_object, tbo);
-   ret = ttm_bo_validate(bo, >placement, );
-   if (ret) {
-   ttm_eu_backoff_reservation(ticket, head);
-   return ret;
-   }
-   }
-   return 0;
-}
-
-void virtio_gpu_unref_list(struct list_head *head)
-{
-   struct ttm_validate_buffer *buf;
-   struct ttm_buffer_object *bo;
-   struct virtio_gpu_object *qobj;
-
-   list_for_each_entry(buf, head, head) {
-   bo = buf->bo;
-   qobj = container_of(bo, struct virtio_gpu_object, tbo);
-
-   drm_gem_object_put_unlocked(>gem_base);
-   }
-}
-
 /*
  * Usage of execbuffer:
  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 11/12] drm/virtio: switch from ttm to gem shmem helpers

2019-06-28 Thread Gerd Hoffmann
virtio-gpu basically needs a sg_table for the bo, to tell the host where
the backing pages for the object are.  So the gem shmem helpers are a
perfect fit.  Some drm_gem_object_funcs need thin wrappers to update the
host state, but otherwise the helpers handle everything just fine.

Once the fencing was sorted the switch was surprisingly easy and for the
most part just removing the ttm code.

v5: move drm_gem_object_funcs hookup (Chia-I Wu).
v4: fix drm_gem_object_funcs name.

Signed-off-by: Gerd Hoffmann 
Acked-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  52 +---
 drivers/gpu/drm/virtio/virtgpu_drv.c|  20 +-
 drivers/gpu/drm/virtio/virtgpu_gem.c|  16 +-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c  |  19 +-
 drivers/gpu/drm/virtio/virtgpu_kms.c|   9 -
 drivers/gpu/drm/virtio/virtgpu_object.c | 148 
 drivers/gpu/drm/virtio/virtgpu_prime.c  |  37 ---
 drivers/gpu/drm/virtio/virtgpu_ttm.c| 304 
 drivers/gpu/drm/virtio/virtgpu_vq.c |  24 +-
 drivers/gpu/drm/virtio/Kconfig  |   2 +-
 drivers/gpu/drm/virtio/Makefile |   2 +-
 11 files changed, 82 insertions(+), 551 deletions(-)
 delete mode 100644 drivers/gpu/drm/virtio/virtgpu_ttm.c

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index c6c0a66ccf6b..78f4d6211812 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,14 +33,11 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
-#include 
-#include 
-#include 
-#include 
 
 #define DRIVER_NAME "virtio_gpu"
 #define DRIVER_DESC "virtio GPU"
@@ -68,21 +65,16 @@ struct virtio_gpu_object_params {
 };
 
 struct virtio_gpu_object {
-   struct drm_gem_object gem_base;
+   struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
 
struct sg_table *pages;
uint32_t mapped;
-   void *vmap;
bool dumb;
-   struct ttm_placeplacement_code;
-   struct ttm_placementplacement;
-   struct ttm_buffer_objecttbo;
-   struct ttm_bo_kmap_obj  kmap;
bool created;
 };
 #define gem_to_virtio_gpu_obj(gobj) \
-   container_of((gobj), struct virtio_gpu_object, gem_base)
+   container_of((gobj), struct virtio_gpu_object, base.base)
 
 struct virtio_gpu_object_array {
u32 nents;
@@ -152,10 +144,6 @@ struct virtio_gpu_framebuffer {
 #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
 
-struct virtio_gpu_mman {
-   struct ttm_bo_devicebdev;
-};
-
 struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -184,8 +172,6 @@ struct virtio_gpu_device {
 
struct virtio_device *vdev;
 
-   struct virtio_gpu_mman mman;
-
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
 
@@ -350,11 +336,6 @@ struct drm_plane *virtio_gpu_plane_init(struct 
virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
 
-/* virtio_gpu_ttm.c */
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
 /* virtio_gpu_fence.c */
 bool virtio_fence_signaled(struct dma_fence *f);
 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
@@ -366,58 +347,47 @@ void virtio_gpu_fence_event_process(struct 
virtio_gpu_device *vdev,
u64 last_seq);
 
 /* virtio_gpu_object */
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+   size_t size);
 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_object_params *params,
 struct virtio_gpu_object **bo_ptr,
 struct virtio_gpu_fence *fence);
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
-  struct virtio_gpu_object *bo);
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
 
 /* virtgpu_prime.c */
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
-  struct vm_area_struct *vma);
 
 static inline struct virtio_gpu_object*
 virtio_gpu_object_ref(struct virtio_gpu_object 

[PATCH v5 08/12] drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing

2019-06-28 Thread Gerd Hoffmann
Use gem reservation helpers and direct reservation_object_* calls
instead of ttm.

v5: fix fencing (Chia-I Wu).
v3: Also attach the array of gem objects to the virtio command buffer,
so we can drop the object references in the completion callback.  Needed
because ttm fence helpers grab a reference for us, but gem helpers
don't.

Signed-off-by: Gerd Hoffmann 
Acked-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h   |  6 ++-
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 62 +++---
 drivers/gpu/drm/virtio/virtgpu_vq.c| 17 ---
 3 files changed, 43 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 98d646789d23..356d27132388 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -120,9 +120,9 @@ struct virtio_gpu_vbuffer {
 
char *resp_buf;
int resp_size;
-
virtio_gpu_resp_cb resp_cb;
 
+   struct virtio_gpu_object_array *objs;
struct list_head list;
 };
 
@@ -311,7 +311,9 @@ void virtio_gpu_cmd_context_detach_resource(struct 
virtio_gpu_device *vgdev,
uint32_t resource_id);
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
   void *data, uint32_t data_size,
-  uint32_t ctx_id, struct virtio_gpu_fence *fence);
+  uint32_t ctx_id,
+  struct virtio_gpu_object_array *objs,
+  struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
  uint32_t resource_id, uint32_t ctx_id,
  uint64_t offset, uint32_t level,
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0caff3fa623e..ae6830aa38c9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -105,14 +105,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device 
*dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
-   struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence;
-   struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
-   struct list_head validate_list;
-   struct ttm_validate_buffer *buflist = NULL;
+   struct virtio_gpu_object_array *buflist = NULL;
int i;
struct ww_acquire_ctx ticket;
struct sync_file *sync_file;
@@ -155,15 +152,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device 
*dev, void *data,
return out_fence_fd;
}
 
-   INIT_LIST_HEAD(_list);
if (exbuf->num_bo_handles) {
-
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
-  sizeof(uint32_t), GFP_KERNEL);
-   buflist = kvmalloc_array(exbuf->num_bo_handles,
-  sizeof(struct ttm_validate_buffer),
-  GFP_KERNEL | __GFP_ZERO);
-   if (!bo_handles || !buflist) {
+   sizeof(uint32_t), GFP_KERNEL);
+   if (!bo_handles) {
ret = -ENOMEM;
goto out_unused_fd;
}
@@ -175,25 +167,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device 
*dev, void *data,
goto out_unused_fd;
}
 
-   for (i = 0; i < exbuf->num_bo_handles; i++) {
-   gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
-   if (!gobj) {
-   ret = -ENOENT;
-   goto out_unused_fd;
-   }
-
-   qobj = gem_to_virtio_gpu_obj(gobj);
-   buflist[i].bo = >tbo;
-
-   list_add([i].head, _list);
+   buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+   exbuf->num_bo_handles);
+   if (!buflist) {
+   ret = -ENOENT;
+   goto out_unused_fd;
}
kvfree(bo_handles);
bo_handles = NULL;
}
 
-   ret = virtio_gpu_object_list_validate(, _list);
-   if (ret)
-   goto out_free;
+   if (buflist) {
+   ret = drm_gem_lock_reservations(buflist->objs, buflist->nents,
+   );
+   if (ret)
+   goto out_unused_fd;
+   }
 
buf = 

[PATCH v5 03/12] drm/virtio: simplify cursor updates

2019-06-28 Thread Gerd Hoffmann
No need to do the reservation dance,
we can just wait on the fence directly.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_plane.c | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c 
b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 024c2aa0c929..4b805bf466d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -184,7 +184,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane 
*plane,
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
-   int ret = 0;
 
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -208,15 +207,9 @@ static void virtio_gpu_cursor_plane_update(struct 
drm_plane *plane,
 cpu_to_le32(plane->state->crtc_w),
 cpu_to_le32(plane->state->crtc_h),
 0, 0, vgfb->fence);
-   ret = virtio_gpu_object_reserve(bo, false);
-   if (!ret) {
-   reservation_object_add_excl_fence(bo->tbo.resv,
- >fence->f);
-   dma_fence_put(>fence->f);
-   vgfb->fence = NULL;
-   virtio_gpu_object_unreserve(bo);
-   virtio_gpu_object_wait(bo, false);
-   }
+   dma_fence_wait(>fence->f, true);
+   dma_fence_put(>fence->f);
+   vgfb->fence = NULL;
}
 
if (plane->state->fb != old_state->fb) {
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 09/12] drm/virtio: rework virtio_gpu_object_create fencing

2019-06-28 Thread Gerd Hoffmann
Use gem reservation helpers and direct reservation_object_* calls
instead of ttm.

v5: fix fencing (Chia-I Wu).
v3: Due to using the gem reservation object it is initialized and ready
for use before calling ttm_bo_init, so we can also drop the tricky fence
logic which checks whenever the command is in flight still.  We can
simply fence our object before submitting the virtio command and be done
with it.

Signed-off-by: Gerd Hoffmann 
Acked-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  2 +
 drivers/gpu/drm/virtio/virtgpu_object.c | 55 ++---
 drivers/gpu/drm/virtio/virtgpu_vq.c |  4 ++
 3 files changed, 27 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 356d27132388..c4b266b6f731 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -267,6 +267,7 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+   struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
   uint32_t resource_id);
@@ -329,6 +330,7 @@ void
 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
  struct virtio_gpu_object *bo,
  struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
  struct virtio_gpu_fence *fence);
 void virtio_gpu_ctrl_ack(struct virtqueue *vq);
 void virtio_gpu_cursor_ack(struct virtqueue *vq);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 82bfbf983fd2..fa0ea22c68b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -97,7 +97,9 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 struct virtio_gpu_object **bo_ptr,
 struct virtio_gpu_fence *fence)
 {
+   struct virtio_gpu_object_array *objs = NULL;
struct virtio_gpu_object *bo;
+   struct ww_acquire_ctx ticket;
size_t acc_size;
int ret;
 
@@ -123,12 +125,29 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
}
bo->dumb = params->dumb;
 
+   if (fence) {
+   objs = virtio_gpu_array_alloc(1);
+   objs->objs[0] = >gem_base;
+   drm_gem_object_get(objs->objs[0]);
+
+   ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+   );
+   if (ret == 0)
+   reservation_object_add_excl_fence(objs->objs[0]->resv,
+ >f);
+   }
+
if (params->virgl) {
-   virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+   virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
} else {
-   virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+   virtio_gpu_cmd_create_resource(vgdev, bo, params,
+  objs, fence);
}
 
+   if (fence)
+   drm_gem_unlock_reservations(objs->objs, objs->nents, );
+
virtio_gpu_init_ttm_placement(bo);
ret = ttm_bo_init(>mman.bdev, >tbo, params->size,
  ttm_bo_type_device, >placement, 0,
@@ -139,38 +158,6 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
if (ret != 0)
return ret;
 
-   if (fence) {
-   struct virtio_gpu_fence_driver *drv = >fence_drv;
-   struct list_head validate_list;
-   struct ttm_validate_buffer mainbuf;
-   struct ww_acquire_ctx ticket;
-   unsigned long irq_flags;
-   bool signaled;
-
-   INIT_LIST_HEAD(_list);
-   memset(, 0, sizeof(struct ttm_validate_buffer));
-
-   /* use a gem reference since unref list undoes them */
-   drm_gem_object_get(>gem_base);
-   mainbuf.bo = >tbo;
-   list_add(, _list);
-
-   ret = virtio_gpu_object_list_validate(, _list);
-   if (ret == 0) {
-   spin_lock_irqsave(>lock, irq_flags);
-   signaled = virtio_fence_signaled(>f);
-   if (!signaled)
-   /* virtio create command still in flight */
-   

[PATCH v5 02/12] drm/virtio: switch virtio_gpu_wait_ioctl() to gem helper.

2019-06-28 Thread Gerd Hoffmann
Use drm_gem_reservation_object_wait() in virtio_gpu_wait_ioctl().
This also makes the ioctl run lockless.

v5: handle lookup failure.
v2: use reservation_object_test_signaled_rcu for VIRTGPU_WAIT_NOWAIT.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_ioctl.c | 28 --
 1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c 
b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 1b50c34a29dc..c06dde541491 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -464,23 +464,21 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, 
void *data,
struct drm_file *file)
 {
struct drm_virtgpu_3d_wait *args = data;
-   struct drm_gem_object *gobj = NULL;
-   struct virtio_gpu_object *qobj = NULL;
+   struct drm_gem_object *obj;
+   long timeout = 15 * HZ;
int ret;
-   bool nowait = false;
 
-   gobj = drm_gem_object_lookup(file, args->handle);
-   if (gobj == NULL)
-   return -ENOENT;
-
-   qobj = gem_to_virtio_gpu_obj(gobj);
-
-   if (args->flags & VIRTGPU_WAIT_NOWAIT)
-   nowait = true;
-   ret = virtio_gpu_object_wait(qobj, nowait);
-
-   drm_gem_object_put_unlocked(gobj);
-   return ret;
+   if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+   obj = drm_gem_object_lookup(file, args->handle);
+   if (obj == NULL)
+   return -ENOENT;
+   ret = reservation_object_test_signaled_rcu(obj->resv, true);
+   drm_gem_object_put_unlocked(obj);
+   return ret ? 0 : -EBUSY;
+   }
+
+   return drm_gem_reservation_object_wait(file, args->handle,
+  true, timeout);
 }
 
 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 01/12] drm/virtio: pass gem reservation object to ttm init

2019-06-28 Thread Gerd Hoffmann
With this gem and ttm will use the same reservation object,
so mixing and matching ttm / gem reservation helpers should
work fine.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_object.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..242766d644a7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -132,7 +132,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device 
*vgdev,
virtio_gpu_init_ttm_placement(bo);
ret = ttm_bo_init(>mman.bdev, >tbo, params->size,
  ttm_bo_type_device, >placement, 0,
- true, acc_size, NULL, NULL,
+ true, acc_size, NULL,
+ bo->gem_base.resv,
  _gpu_ttm_bo_destroy);
/* ttm_bo_init failure will call the destroy */
if (ret != 0)
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 06/12] drm/virtio: remove ttm calls from in virtio_gpu_object_{reserve, unreserve}

2019-06-28 Thread Gerd Hoffmann
Call reservation_object_* directly instead
of using ttm_bo_{reserve,unreserve}.

v4: check for EINTR only.
v3: check for EINTR too.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 06cc0e961df6..07f6001ea91e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -402,9 +402,9 @@ static inline int virtio_gpu_object_reserve(struct 
virtio_gpu_object *bo)
 {
int r;
 
-   r = ttm_bo_reserve(>tbo, true, false, NULL);
+   r = reservation_object_lock_interruptible(bo->gem_base.resv, NULL);
if (unlikely(r != 0)) {
-   if (r != -ERESTARTSYS) {
+   if (r != -EINTR) {
struct virtio_gpu_device *qdev =
bo->gem_base.dev->dev_private;
dev_err(qdev->dev, "%p reserve failed\n", bo);
@@ -416,7 +416,7 @@ static inline int virtio_gpu_object_reserve(struct 
virtio_gpu_object *bo)
 
 static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
 {
-   ttm_bo_unreserve(>tbo);
+   reservation_object_unlock(bo->gem_base.resv);
 }
 
 /* virgl debufs */
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v5 04/12] drm/virtio: remove virtio_gpu_object_wait

2019-06-28 Thread Gerd Hoffmann
No users left.

Signed-off-by: Gerd Hoffmann 
Reviewed-by: Daniel Vetter 
---
 drivers/gpu/drm/virtio/virtgpu_drv.h|  1 -
 drivers/gpu/drm/virtio/virtgpu_object.c | 13 -
 2 files changed, 14 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h 
b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9e2d3062b01d..2cd96256ba37 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -364,7 +364,6 @@ int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
 int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
   struct virtio_gpu_object *bo);
 void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
 
 /* virtgpu_prime.c */
 struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 242766d644a7..82bfbf983fd2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -233,16 +233,3 @@ void virtio_gpu_object_free_sg_table(struct 
virtio_gpu_object *bo)
kfree(bo->pages);
bo->pages = NULL;
 }
-
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
-{
-   int r;
-
-   r = ttm_bo_reserve(>tbo, true, no_wait, NULL);
-   if (unlikely(r != 0))
-   return r;
-   r = ttm_bo_wait(>tbo, true, no_wait);
-   ttm_bo_unreserve(>tbo);
-   return r;
-}
-
-- 
2.18.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v2 1/3] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock

2019-06-28 Thread Stefano Garzarella
Some callbacks used by the upper layers can run while we are in the
.remove(). A potential use-after-free can happen, because we free
the_virtio_vsock without knowing if the callbacks are over or not.

To solve this issue we move the assignment of the_virtio_vsock at the
end of .probe(), when we finished all the initialization, and at the
beginning of .remove(), before to release resources.
For the same reason, we do the same also for the vdev->priv.

We use RCU to be sure that all callbacks that use the_virtio_vsock
ended before freeing it. This is not required for callbacks that
use vdev->priv, because after the vdev->config->del_vqs() we are sure
that they are ended and will no longer be invoked.

We also take the mutex during the .remove() to avoid that .probe() can
run while we are resetting the device.

Signed-off-by: Stefano Garzarella 
---
 net/vmw_vsock/virtio_transport.c | 67 +---
 1 file changed, 44 insertions(+), 23 deletions(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9c287e3e393c..7ad510ec12e0 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -65,19 +65,22 @@ struct virtio_vsock {
u32 guest_cid;
 };
 
-static struct virtio_vsock *virtio_vsock_get(void)
-{
-   return the_virtio_vsock;
-}
-
 static u32 virtio_transport_get_local_cid(void)
 {
-   struct virtio_vsock *vsock = virtio_vsock_get();
+   struct virtio_vsock *vsock;
+   u32 ret;
 
-   if (!vsock)
-   return VMADDR_CID_ANY;
+   rcu_read_lock();
+   vsock = rcu_dereference(the_virtio_vsock);
+   if (!vsock) {
+   ret = VMADDR_CID_ANY;
+   goto out_rcu;
+   }
 
-   return vsock->guest_cid;
+   ret = vsock->guest_cid;
+out_rcu:
+   rcu_read_unlock();
+   return ret;
 }
 
 static void virtio_transport_loopback_work(struct work_struct *work)
@@ -197,14 +200,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct virtio_vsock *vsock;
int len = pkt->len;
 
-   vsock = virtio_vsock_get();
+   rcu_read_lock();
+   vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
virtio_transport_free_pkt(pkt);
-   return -ENODEV;
+   len = -ENODEV;
+   goto out_rcu;
}
 
-   if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
-   return virtio_transport_send_pkt_loopback(vsock, pkt);
+   if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
+   len = virtio_transport_send_pkt_loopback(vsock, pkt);
+   goto out_rcu;
+   }
 
if (pkt->reply)
atomic_inc(>queued_replies);
@@ -214,6 +221,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(>send_pkt_list_lock);
 
queue_work(virtio_vsock_workqueue, >send_pkt_work);
+
+out_rcu:
+   rcu_read_unlock();
return len;
 }
 
@@ -222,12 +232,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
 {
struct virtio_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
-   int cnt = 0;
+   int cnt = 0, ret;
LIST_HEAD(freeme);
 
-   vsock = virtio_vsock_get();
+   rcu_read_lock();
+   vsock = rcu_dereference(the_virtio_vsock);
if (!vsock) {
-   return -ENODEV;
+   ret = -ENODEV;
+   goto out_rcu;
}
 
spin_lock_bh(>send_pkt_list_lock);
@@ -255,7 +267,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
queue_work(virtio_vsock_workqueue, >rx_work);
}
 
-   return 0;
+   ret = 0;
+
+out_rcu:
+   rcu_read_unlock();
+   return ret;
 }
 
 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
@@ -590,8 +606,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->rx_buf_max_nr = 0;
atomic_set(>queued_replies, 0);
 
-   vdev->priv = vsock;
-   the_virtio_vsock = vsock;
mutex_init(>tx_lock);
mutex_init(>rx_lock);
mutex_init(>event_lock);
@@ -613,6 +627,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
virtio_vsock_event_fill(vsock);
mutex_unlock(>event_lock);
 
+   vdev->priv = vsock;
+   rcu_assign_pointer(the_virtio_vsock, vsock);
+
mutex_unlock(_virtio_vsock_mutex);
return 0;
 
@@ -627,6 +644,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
struct virtio_vsock *vsock = vdev->priv;
struct virtio_vsock_pkt *pkt;
 
+   mutex_lock(_virtio_vsock_mutex);
+
+   vdev->priv = NULL;
+   rcu_assign_pointer(the_virtio_vsock, NULL);
+   synchronize_rcu();
+
flush_work(>loopback_work);
flush_work(>rx_work);
flush_work(>tx_work);
@@ -666,12 +689,10 @@ static void virtio_vsock_remove(struct virtio_device 
*vdev)
}
spin_unlock_bh(>loopback_list_lock);
 

[PATCH v2 2/3] vsock/virtio: stop workers during the .remove()

2019-06-28 Thread Stefano Garzarella
Before to call vdev->config->reset(vdev) we need to be sure that
no one is accessing the device, for this reason, we add new variables
in the struct virtio_vsock to stop the workers during the .remove().

This patch also add few comments before vdev->config->reset(vdev)
and vdev->config->del_vqs(vdev).

Suggested-by: Stefan Hajnoczi 
Suggested-by: Michael S. Tsirkin 
Signed-off-by: Stefano Garzarella 
---
 net/vmw_vsock/virtio_transport.c | 51 +++-
 1 file changed, 50 insertions(+), 1 deletion(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 7ad510ec12e0..1b44ec6f3f6c 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -38,6 +38,7 @@ struct virtio_vsock {
 * must be accessed with tx_lock held.
 */
struct mutex tx_lock;
+   bool tx_run;
 
struct work_struct send_pkt_work;
spinlock_t send_pkt_list_lock;
@@ -53,6 +54,7 @@ struct virtio_vsock {
 * must be accessed with rx_lock held.
 */
struct mutex rx_lock;
+   bool rx_run;
int rx_buf_nr;
int rx_buf_max_nr;
 
@@ -60,6 +62,7 @@ struct virtio_vsock {
 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
 */
struct mutex event_lock;
+   bool event_run;
struct virtio_vsock_event event_list[8];
 
u32 guest_cid;
@@ -94,6 +97,10 @@ static void virtio_transport_loopback_work(struct 
work_struct *work)
spin_unlock_bh(>loopback_list_lock);
 
mutex_lock(>rx_lock);
+
+   if (!vsock->rx_run)
+   goto out;
+
while (!list_empty()) {
struct virtio_vsock_pkt *pkt;
 
@@ -102,6 +109,7 @@ static void virtio_transport_loopback_work(struct 
work_struct *work)
 
virtio_transport_recv_pkt(pkt);
}
+out:
mutex_unlock(>rx_lock);
 }
 
@@ -130,6 +138,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)
 
mutex_lock(>tx_lock);
 
+   if (!vsock->tx_run)
+   goto out;
+
vq = vsock->vqs[VSOCK_VQ_TX];
 
for (;;) {
@@ -188,6 +199,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
if (added)
virtqueue_kick(vq);
 
+out:
mutex_unlock(>tx_lock);
 
if (restart_rx)
@@ -323,6 +335,10 @@ static void virtio_transport_tx_work(struct work_struct 
*work)
 
vq = vsock->vqs[VSOCK_VQ_TX];
mutex_lock(>tx_lock);
+
+   if (!vsock->tx_run)
+   goto out;
+
do {
struct virtio_vsock_pkt *pkt;
unsigned int len;
@@ -333,6 +349,8 @@ static void virtio_transport_tx_work(struct work_struct 
*work)
added = true;
}
} while (!virtqueue_enable_cb(vq));
+
+out:
mutex_unlock(>tx_lock);
 
if (added)
@@ -361,6 +379,9 @@ static void virtio_transport_rx_work(struct work_struct 
*work)
 
mutex_lock(>rx_lock);
 
+   if (!vsock->rx_run)
+   goto out;
+
do {
virtqueue_disable_cb(vq);
for (;;) {
@@ -470,6 +491,9 @@ static void virtio_transport_event_work(struct work_struct 
*work)
 
mutex_lock(>event_lock);
 
+   if (!vsock->event_run)
+   goto out;
+
do {
struct virtio_vsock_event *event;
unsigned int len;
@@ -484,7 +508,7 @@ static void virtio_transport_event_work(struct work_struct 
*work)
} while (!virtqueue_enable_cb(vq));
 
virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
-
+out:
mutex_unlock(>event_lock);
 }
 
@@ -619,12 +643,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
INIT_WORK(>send_pkt_work, virtio_transport_send_pkt_work);
INIT_WORK(>loopback_work, virtio_transport_loopback_work);
 
+   mutex_lock(>tx_lock);
+   vsock->tx_run = true;
+   mutex_unlock(>tx_lock);
+
mutex_lock(>rx_lock);
virtio_vsock_rx_fill(vsock);
+   vsock->rx_run = true;
mutex_unlock(>rx_lock);
 
mutex_lock(>event_lock);
virtio_vsock_event_fill(vsock);
+   vsock->event_run = true;
mutex_unlock(>event_lock);
 
vdev->priv = vsock;
@@ -659,6 +689,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
 
+   /* Stop all work handlers to make sure no one is accessing the device,
+* so we can safely call vdev->config->reset().
+*/
+   mutex_lock(>rx_lock);
+   vsock->rx_run = false;
+   mutex_unlock(>rx_lock);
+
+   mutex_lock(>tx_lock);
+   vsock->tx_run = false;
+   mutex_unlock(>tx_lock);
+
+   mutex_lock(>event_lock);
+   vsock->event_run = false;
+   mutex_unlock(>event_lock);
+
+   /* Flush all device writes and interrupts, device will not 

[PATCH v2 3/3] vsock/virtio: fix flush of works during the .remove()

2019-06-28 Thread Stefano Garzarella
This patch moves the flush of works after vdev->config->del_vqs(vdev),
because we need to be sure that no workers run before to free the
'vsock' object.

Since we stopped the workers using the [tx|rx|event]_run flags,
we are sure no one is accessing the device while we are calling
vdev->config->reset(vdev), so we can safely move the workers' flush.

Before the vdev->config->del_vqs(vdev), workers can be scheduled
by VQ callbacks, so we must flush them after del_vqs(), to avoid
use-after-free of 'vsock' object.

Suggested-by: Michael S. Tsirkin 
Signed-off-by: Stefano Garzarella 
---
 net/vmw_vsock/virtio_transport.c | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 1b44ec6f3f6c..96dafa978268 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -680,12 +680,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
rcu_assign_pointer(the_virtio_vsock, NULL);
synchronize_rcu();
 
-   flush_work(>loopback_work);
-   flush_work(>rx_work);
-   flush_work(>tx_work);
-   flush_work(>event_work);
-   flush_work(>send_pkt_work);
-
/* Reset all connected sockets when the device disappear */
vsock_for_each_connected_socket(virtio_vsock_reset_sock);
 
@@ -740,6 +734,15 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Delete virtqueues and flush outstanding callbacks if any */
vdev->config->del_vqs(vdev);
 
+   /* Other works can be queued before 'config->del_vqs()', so we flush
+* all works before to free the vsock object to avoid use after free.
+*/
+   flush_work(>loopback_work);
+   flush_work(>rx_work);
+   flush_work(>tx_work);
+   flush_work(>event_work);
+   flush_work(>send_pkt_work);
+
mutex_unlock(_virtio_vsock_mutex);
 
kfree(vsock);
-- 
2.20.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


[PATCH v2 0/3] vsock/virtio: several fixes in the .probe() and .remove()

2019-06-28 Thread Stefano Garzarella
During the review of "[PATCH] vsock/virtio: Initialize core virtio vsock
before registering the driver", Stefan pointed out some possible issues
in the .probe() and .remove() callbacks of the virtio-vsock driver.

This series tries to solve these issues:
- Patch 1 adds RCU critical sections to avoid use-after-free of
  'the_virtio_vsock' pointer.
- Patch 2 stops workers before to call vdev->config->reset(vdev) to
  be sure that no one is accessing the device.
- Patch 3 moves the works flush at the end of the .remove() to avoid
  use-after-free of 'vsock' object.

v2:
- Patch 1: use RCU to protect 'the_virtio_vsock' pointer
- Patch 2: no changes
- Patch 3: flush works only at the end of .remove()
- Removed patch 4 because virtqueue_detach_unused_buf() returns all the buffers
  allocated.

v1: https://patchwork.kernel.org/cover/10964733/

Stefano Garzarella (3):
  vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock
  vsock/virtio: stop workers during the .remove()
  vsock/virtio: fix flush of works during the .remove()

 net/vmw_vsock/virtio_transport.c | 131 ---
 1 file changed, 102 insertions(+), 29 deletions(-)

-- 
2.20.1

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Call for Papers - ICOTTS'2019, Buenos Aires, Argentina | Deadline: July 21

2019-06-28 Thread ML
ICOTTS'19 - The 2019 International Conference on Tourism, Technology & Systems

5 - 7 December 2019, Buenos Aires, Argentina

Proceedings by Springer. Indexed by Scopus, ISI, etc.

https://www.icotts.org/ 

ICOTTS'19 - The 2019 International Conference on Tourism, Technology & Systems, 
to be held at the Universidad Abierta Interamericana in Buenos Aires 
, Argentina, between the 
5th and the 7th of December 2019. ICOTTS is a Multidisciplinary conference with 
a special focus in new technologies and systems in the tourism sector.

We are pleased to invite you to submit your papers to ICOTTS'19. They can be 
written in English, Spanish or Portuguese. All submissions will be reviewed on 
the basis of relevance, originality, importance and clarity.

Scope & Topics

Multidisciplinary conference, transversal to all the activity sectors that 
involve Information Technologies and systems in the Tourism area, namely: 
Competitiveness of destinations based on digital technology, Hospitality, 
Destinations Management, Business & Finance, Public Administration; Economics; 
Management Science; Education; Health & Rehabilitation; Agriculture & Food 
Technology.

Topics of interest include but are not limited to:

· Technology in Tourism and Tourist experience

· Generations and Technology in Tourism

· Digital Marketing applied to Tourism and Travel

· Mobile Technologies applied to sustainable Tourism

· Tourism research in providing innovative solutions to social problems

· Tourism, Wellness and Hospitality

· Information Technologies in Tourism

· Digital transformation of Tourism Business

· Traveling for health/medical and wellness

· Information Technologies in Ecotourism and Agritourism

· Information Technologies in Food Tourism

· Information Technologies in Education and Educational Tourism

· eTourism and Tourism 2.0

· Big data and Management for Travel and Tourism

· Geo-tagging and Tourist mobility

· Health Tourism

· Information Systems in Tourism and Hospitality

· Smart Destinations

· Resilience and Tourism

· Dark Tourism

· Military Tourism

· Robotics in Tourism

· Destination Marketing Systems

· Computer Reservations Systems

· Global Distribution Systems

· Electronic Information Distribution in Tourism and Hospitality

· Organizational Models and Information Systems

· Information Systems and Technologies​

Submission and Decision

Submitted papers must comply with the format of Smart Innovation, Systems and 
Technologies (see Instructions for Authors at Springer Website 
),
 be written in English (until 10-page limit), must not have been published 
before, not be under review for any other conference or publication and not 
include any information leading to the authors’ identification. Therefore, the 
authors’ names, affiliations and bibliographic references should not be 
included in the version for evaluation by the Scientific Committee. This 
information should only be included in the camera-ready version, saved in Word 
or Latex format and also in PDF format. These files must be accompanied by the 
Consent to Publish 

 form filled out, in a ZIP file, and uploaded at the conference management 
system.

​

Submitted papers written in Spanish or Portuguese (until 15-page limit) must 
comply with the format of RISTI 

 - Revista Ibérica de Sistemas e Tecnologias de Informação must not have been 
published before, not be under review for any other conference or publication 
and not include any information leading to the authors’ identification. 
Therefore, the authors’ names, affiliations and e-mails should not be included 
in the version for evaluation by the Scientific Committee. This information 
should only be included in the camera-ready version, saved in Word. These files 
must be uploaded at the conference management system in a ZIP file.

​

All papers will be subjected to a “double-blind review” by at least two members 
of the Scientific Committee. Based on Scientific Committee evaluation, a paper 
can be rejected or accepted by the Conference Chairs. In the later case, it can 
be accepted as the type originally submitted or as another type.

​

The authors of accepted poster papers must also build and print a poster to be 
exhibited during the Conference. This poster must follow an A1 or A2 vertical 
format. The Conference can includes Work Sessions where these posters are 
presented and orally discussed, with a 7 minute limit