From: Alex Dai <[email protected]>

The cached work queue header pointer is set to last byte of work
queue buffer. It will make sure the whole work queue buffer is
available after coming back from reset or init.

Do not hold kmap_atomic mapping before going to sleep when work
queue is full.

Signed-off-by: Alex Dai <[email protected]>
---
 drivers/gpu/drm/i915/i915_guc_submission.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543ef..41f4a96 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -486,11 +486,11 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
        if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
                return 0;
 
-       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
-       desc = base + gc->proc_desc_offset;
-
        while (timeout_counter-- > 0) {
+               base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
+               desc = base + gc->proc_desc_offset;
                gc->wq_head = desc->head;
+               kunmap_atomic(base);
 
                if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
                        ret = 0;
@@ -501,8 +501,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
                        usleep_range(1000, 2000);
        };
 
-       kunmap_atomic(base);
-
        return ret;
 }
 
@@ -730,6 +728,8 @@ static struct i915_guc_client *guc_client_alloc(struct 
drm_device *dev,
        client->client_obj = obj;
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
+       client->wq_head = GUC_WQ_SIZE - 1;
+       client->wq_tail = 0;
 
        client->doorbell_offset = select_doorbell_cacheline(guc);
 
-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to