From: Christian König <ckoenig.leichtzumer...@gmail.com>

This feature is only used by vmwgfx and superflous for everybody else.

v2: use vmw_buffer_object instead of vmw_user_bo.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c               | 27 ----------------------
 drivers/gpu/drm/ttm/ttm_bo_util.c          |  1 -
 drivers/gpu/drm/ttm/ttm_execbuf_util.c     |  7 +-----
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c         | 36 +++++++++++++++++++++++++-----
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h        |  2 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c    |  8 +++++++
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c   |  4 ++++
 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c |  4 ++++
 include/drm/ttm/ttm_bo_api.h               | 31 -------------------------
 9 files changed, 49 insertions(+), 71 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 881cf26d698e..a9aaecdd7481 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -153,7 +153,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
 
        BUG_ON(kref_read(&bo->list_kref));
        BUG_ON(kref_read(&bo->kref));
-       BUG_ON(atomic_read(&bo->cpu_writers));
        BUG_ON(bo->mem.mm_node != NULL);
        BUG_ON(!list_empty(&bo->lru));
        BUG_ON(!list_empty(&bo->ddestroy));
@@ -1311,7 +1310,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 
        kref_init(&bo->kref);
        kref_init(&bo->list_kref);
-       atomic_set(&bo->cpu_writers, 0);
        INIT_LIST_HEAD(&bo->lru);
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
@@ -1823,31 +1821,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_wait);
 
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
-       int ret = 0;
-
-       /*
-        * Using ttm_bo_reserve makes sure the lru lists are updated.
-        */
-
-       ret = ttm_bo_reserve(bo, true, no_wait, NULL);
-       if (unlikely(ret != 0))
-               return ret;
-       ret = ttm_bo_wait(bo, true, no_wait);
-       if (likely(ret == 0))
-               atomic_inc(&bo->cpu_writers);
-       ttm_bo_unreserve(bo);
-       return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
-       atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
 /**
  * A buffer object shrink method that tries to swap out the first
  * buffer object on the bo_global::swap_lru list.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..b00039dcb487 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -511,7 +511,6 @@ static int ttm_buffer_object_transfer(struct 
ttm_buffer_object *bo,
        mutex_init(&fbo->base.wu_mutex);
        fbo->base.moving = NULL;
        drm_vma_node_reset(&fbo->base.base.vma_node);
-       atomic_set(&fbo->base.cpu_writers, 0);
 
        kref_init(&fbo->base.list_kref);
        kref_init(&fbo->base.kref);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 131dae8f4170..0519e1b5a49c 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -113,12 +113,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                struct ttm_buffer_object *bo = entry->bo;
 
                ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
-               if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-                       dma_resv_unlock(bo->base.resv);
-
-                       ret = -EBUSY;
-
-               } else if (ret == -EALREADY && dups) {
+               if (ret == -EALREADY && dups) {
                        struct ttm_validate_buffer *safe = entry;
                        entry = list_prev_entry(entry, head);
                        list_del(&safe->head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a05ef9d0a2e7..5ddd2573b64c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -76,6 +76,26 @@ vmw_user_buffer_object(struct ttm_buffer_object *bo)
        return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
 }
 
+/**
+ * vmw_bo_verify_synccpu - Verify if grab for CPU access exists
+ *
+ * @list: list of ttm_validate_buffer objects
+ *
+ * Return:
+ * -EBUSY if a CPU grab is found, 0 otherwise.
+ */
+int vmw_bo_verify_synccpu(struct list_head *list)
+{
+       struct ttm_validate_buffer *entry;
+
+       list_for_each_entry(entry, list, head) {
+               struct vmw_buffer_object *bo = vmw_buffer_object(entry->bo);
+
+                if (unlikely(atomic_read(&bo->cpu_writers) > 0))
+                       return -EBUSY;
+       }
+       return 0;
+}
 
 /**
  * vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -565,7 +585,7 @@ static void vmw_user_bo_ref_obj_release(struct 
ttm_base_object *base,
 
        switch (ref_type) {
        case TTM_REF_SYNCCPU_WRITE:
-               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+               atomic_dec(&user_bo->vbo.cpu_writers);
                break;
        default:
                WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -681,12 +701,12 @@ static int vmw_user_bo_synccpu_grab(struct 
vmw_user_buffer_object *user_bo,
                                    struct ttm_object_file *tfile,
                                    uint32_t flags)
 {
+       bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
        struct ttm_buffer_object *bo = &user_bo->vbo.base;
        bool existed;
        int ret;
 
        if (flags & drm_vmw_synccpu_allow_cs) {
-               bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
                long lret;
 
                lret = dma_resv_wait_timeout_rcu
@@ -699,15 +719,20 @@ static int vmw_user_bo_synccpu_grab(struct 
vmw_user_buffer_object *user_bo,
                return 0;
        }
 
-       ret = ttm_bo_synccpu_write_grab
-               (bo, !!(flags & drm_vmw_synccpu_dontblock));
+       ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+       if (unlikely(ret != 0))
+               return ret;
+       ret = ttm_bo_wait(bo, true, nonblock);
+       if (likely(ret == 0))
+               atomic_inc(&user_bo->vbo.cpu_writers);
+       ttm_bo_unreserve(bo);
        if (unlikely(ret != 0))
                return ret;
 
        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
                                 TTM_REF_SYNCCPU_WRITE, &existed, false);
        if (ret != 0 || existed)
-               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+               atomic_dec(&user_bo->vbo.cpu_writers);
 
        return ret;
 }
@@ -731,7 +756,6 @@ static int vmw_user_bo_synccpu_release(uint32_t handle,
        return 0;
 }
 
-
 /**
  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
  * functionality.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index adb0436528c7..894ed804186d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -90,6 +90,7 @@ struct vmw_buffer_object {
        struct ttm_buffer_object base;
        struct list_head res_list;
        s32 pin_count;
+       atomic_t cpu_writers;
        /* Not ref-counted.  Protected by binding_mutex */
        struct vmw_resource *dx_query_ctx;
        /* Protected by reservation */
@@ -773,6 +774,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
 extern struct vmw_buffer_object *
 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
+extern int vmw_bo_verify_synccpu(struct list_head *list);
 
 /**
  * vmw_user_bo_noref_release - release a buffer object pointer looked up
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ff86d49dc5e8..0738436b90ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3737,6 +3737,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err_nores;
 
+       ret = vmw_bo_verify_synccpu(&val_ctx.bo_list);
+       if (unlikely(ret != 0))
+               goto out_err;
+
        ret = vmw_validation_bo_validate(&val_ctx, true);
        if (unlikely(ret != 0))
                goto out_err;
@@ -3936,6 +3940,10 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private 
*dev_priv,
        if (ret)
                goto out_no_reserve;
 
+       ret = vmw_bo_verify_synccpu(&val_ctx.bo_list);
+       if (unlikely(ret != 0))
+               goto out_no_emit;
+
        if (dev_priv->query_cid_valid) {
                BUG_ON(fence != NULL);
                ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 0b5472450633..b496ce5c1735 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -469,6 +469,10 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
+       ret = vmw_bo_verify_synccpu(&val_list);
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+
        if (res->func->needs_backup && list_empty(&res->mob_head))
                return 0;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..a7beaec91f56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -725,6 +725,10 @@ int vmw_validation_prepare(struct vmw_validation_context 
*ctx,
        if (ret)
                goto out_no_bo_reserve;
 
+       ret = vmw_bo_verify_synccpu(&ctx->bo_list);
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+
        ret = vmw_validation_bo_validate(ctx, intr);
        if (ret)
                goto out_no_validate;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 43c4929a2171..1d623c140f07 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -147,7 +147,6 @@ struct ttm_tt;
  * holds a pointer to a persistent shmem object.
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
- * @cpu_writes: For synchronization. Number of cpu writers.
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
@@ -198,11 +197,6 @@ struct ttm_buffer_object {
        struct ttm_tt *ttm;
        bool evicted;
 
-       /**
-        * Members protected by the bo::reserved lock only when written to.
-        */
-
-       atomic_t cpu_writers;
 
        /**
         * Members protected by the bdev::lru_lock.
@@ -441,31 +435,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device 
*bdev, int resched);
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                              const struct ttm_place *place);
 
-/**
- * ttm_bo_synccpu_write_grab
- *
- * @bo: The buffer object:
- * @no_wait: Return immediately if buffer is busy.
- *
- * Synchronizes a buffer object for CPU RW access. This means
- * command submission that affects the buffer will return -EBUSY
- * until ttm_bo_synccpu_write_release is called.
- *
- * Returns
- * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
-
-/**
- * ttm_bo_synccpu_write_release:
- *
- * @bo : The buffer object.
- *
- * Releases a synccpu lock.
- */
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
-
 /**
  * ttm_bo_acc_size
  *
-- 
2.14.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to