From: Marek Olšák <marek.ol...@amd.com>

it's cleaner this way.
---
 src/gallium/auxiliary/util/u_queue.c            | 42 ++++++++++++++-----------
 src/gallium/auxiliary/util/u_queue.h            |  2 +-
 src/gallium/drivers/freedreno/freedreno_batch.c |  2 +-
 src/gallium/drivers/radeonsi/si_state_shaders.c |  6 ++--
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c       |  2 +-
 src/gallium/winsys/radeon/drm/radeon_drm_cs.c   |  2 +-
 6 files changed, 30 insertions(+), 26 deletions(-)

diff --git a/src/gallium/auxiliary/util/u_queue.c 
b/src/gallium/auxiliary/util/u_queue.c
index 89fb235..a223b7c 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -93,41 +93,62 @@ remove_from_atexit_list(struct util_queue *queue)
       iter = iter->next;
    }
 
    /* It must be the first one. */
    assert(first_queue == queue);
    first_queue = first_queue->next;
    pipe_mutex_unlock(exit_mutex);
 }
 
 /****************************************************************************
- * util_queue implementation
+ * util_queue_fence
  */
 
 static void
 util_queue_fence_signal(struct util_queue_fence *fence)
 {
    pipe_mutex_lock(fence->mutex);
    fence->signalled = true;
    pipe_condvar_broadcast(fence->cond);
    pipe_mutex_unlock(fence->mutex);
 }
 
 void
-util_queue_job_wait(struct util_queue_fence *fence)
+util_queue_fence_wait(struct util_queue_fence *fence)
 {
    pipe_mutex_lock(fence->mutex);
    while (!fence->signalled)
       pipe_condvar_wait(fence->cond, fence->mutex);
    pipe_mutex_unlock(fence->mutex);
 }
 
+void
+util_queue_fence_init(struct util_queue_fence *fence)
+{
+   memset(fence, 0, sizeof(*fence));
+   pipe_mutex_init(fence->mutex);
+   pipe_condvar_init(fence->cond);
+   fence->signalled = true;
+}
+
+void
+util_queue_fence_destroy(struct util_queue_fence *fence)
+{
+   assert(fence->signalled);
+   pipe_condvar_destroy(fence->cond);
+   pipe_mutex_destroy(fence->mutex);
+}
+
+/****************************************************************************
+ * util_queue implementation
+ */
+
 struct thread_input {
    struct util_queue *queue;
    int thread_index;
 };
 
 static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
 {
    struct util_queue *queue = ((struct thread_input*)input)->queue;
    int thread_index = ((struct thread_input*)input)->thread_index;
 
@@ -272,37 +293,20 @@ util_queue_destroy(struct util_queue *queue)
    util_queue_killall_and_wait(queue);
 
    pipe_condvar_destroy(queue->has_space_cond);
    pipe_condvar_destroy(queue->has_queued_cond);
    pipe_mutex_destroy(queue->lock);
    FREE(queue->jobs);
    FREE(queue->threads);
 }
 
 void
-util_queue_fence_init(struct util_queue_fence *fence)
-{
-   memset(fence, 0, sizeof(*fence));
-   pipe_mutex_init(fence->mutex);
-   pipe_condvar_init(fence->cond);
-   fence->signalled = true;
-}
-
-void
-util_queue_fence_destroy(struct util_queue_fence *fence)
-{
-   assert(fence->signalled);
-   pipe_condvar_destroy(fence->cond);
-   pipe_mutex_destroy(fence->mutex);
-}
-
-void
 util_queue_add_job(struct util_queue *queue,
                    void *job,
                    struct util_queue_fence *fence,
                    util_queue_execute_func execute,
                    util_queue_execute_func cleanup)
 {
    struct util_queue_job *ptr;
 
    assert(fence->signalled);
    fence->signalled = false;
diff --git a/src/gallium/auxiliary/util/u_queue.h 
b/src/gallium/auxiliary/util/u_queue.h
index 4ddba33..21ceace 100644
--- a/src/gallium/auxiliary/util/u_queue.h
+++ b/src/gallium/auxiliary/util/u_queue.h
@@ -79,21 +79,21 @@ void util_queue_destroy(struct util_queue *queue);
 void util_queue_fence_init(struct util_queue_fence *fence);
 void util_queue_fence_destroy(struct util_queue_fence *fence);
 
 /* optional cleanup callback is called after fence is signaled: */
 void util_queue_add_job(struct util_queue *queue,
                         void *job,
                         struct util_queue_fence *fence,
                         util_queue_execute_func execute,
                         util_queue_execute_func cleanup);
 
-void util_queue_job_wait(struct util_queue_fence *fence);
+void util_queue_fence_wait(struct util_queue_fence *fence);
 int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
                                         unsigned thread_index);
 
 /* util_queue needs to be cleared to zeroes for this to work */
 static inline bool
 util_queue_is_initialized(struct util_queue *queue)
 {
    return queue->threads != NULL;
 }
 
diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c 
b/src/gallium/drivers/freedreno/freedreno_batch.c
index f5a5c6a..c6dcf11 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch.c
@@ -223,21 +223,21 @@ void
 __fd_batch_describe(char* buf, const struct fd_batch *batch)
 {
        util_sprintf(buf, "fd_batch<%u>", batch->seqno);
 }
 
 void
 fd_batch_sync(struct fd_batch *batch)
 {
        if (!batch->ctx->screen->reorder)
                return;
-       util_queue_job_wait(&batch->flush_fence);
+       util_queue_fence_wait(&batch->flush_fence);
 }
 
 static void
 batch_flush_func(void *job, int id)
 {
        struct fd_batch *batch = job;
 
        fd_gmem_render_tiles(batch);
        batch_reset_resources(batch);
 }
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c 
b/src/gallium/drivers/radeonsi/si_state_shaders.c
index 12ea20b..09a4932 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -1190,21 +1190,21 @@ again:
                return 0;
 
        /* This must be done before the mutex is locked, because async GS
         * compilation calls this function too, and therefore must enter
         * the mutex first.
         *
         * Only wait if we are in a draw call. Don't wait if we are
         * in a compiler thread.
         */
        if (thread_index < 0)
-               util_queue_job_wait(&sel->ready);
+               util_queue_fence_wait(&sel->ready);
 
        pipe_mutex_lock(sel->mutex);
 
        /* Find the shader variant. */
        for (iter = sel->first_variant; iter; iter = iter->next_variant) {
                /* Don't check the "current" shader. We checked it above. */
                if (current != iter &&
                    memcmp(&iter->key, key, sizeof(*key)) == 0) {
                        /* If it's an optimized shader and its compilation has
                         * been started but isn't done, use the unoptimized
@@ -1822,21 +1822,21 @@ static void si_bind_ps_shader(struct pipe_context *ctx, 
void *state)
 
        sctx->ps_shader.cso = sel;
        sctx->ps_shader.current = sel ? sel->first_variant : NULL;
        sctx->do_update_shaders = true;
        si_mark_atom_dirty(sctx, &sctx->cb_render_state);
 }
 
 static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
 {
        if (shader->is_optimized) {
-               util_queue_job_wait(&shader->optimized_ready);
+               util_queue_fence_wait(&shader->optimized_ready);
                util_queue_fence_destroy(&shader->optimized_ready);
        }
 
        if (shader->pm4) {
                switch (shader->selector->type) {
                case PIPE_SHADER_VERTEX:
                        if (shader->key.as_ls)
                                si_pm4_delete_state(sctx, ls, shader->pm4);
                        else if (shader->key.as_es)
                                si_pm4_delete_state(sctx, es, shader->pm4);
@@ -1874,21 +1874,21 @@ static void si_delete_shader_selector(struct 
pipe_context *ctx, void *state)
        struct si_shader_selector *sel = (struct si_shader_selector *)state;
        struct si_shader *p = sel->first_variant, *c;
        struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
                [PIPE_SHADER_VERTEX] = &sctx->vs_shader,
                [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader,
                [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader,
                [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader,
                [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
        };
 
-       util_queue_job_wait(&sel->ready);
+       util_queue_fence_wait(&sel->ready);
 
        if (current_shader[sel->type]->cso == sel) {
                current_shader[sel->type]->cso = NULL;
                current_shader[sel->type]->current = NULL;
        }
 
        while (p) {
                c = p->next_variant;
                si_delete_shader(sctx, p);
                p = c;
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 9d5b0bd..01f38d5 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1111,21 +1111,21 @@ cleanup:
 
    amdgpu_cs_context_cleanup(cs);
 }
 
 /* Make sure the previous submission is completed. */
 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
 {
    struct amdgpu_cs *cs = amdgpu_cs(rcs);
 
    /* Wait for any pending ioctl of this CS to complete. */
-   util_queue_job_wait(&cs->flush_completed);
+   util_queue_fence_wait(&cs->flush_completed);
 }
 
 static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
                            unsigned flags,
                            struct pipe_fence_handle **fence)
 {
    struct amdgpu_cs *cs = amdgpu_cs(rcs);
    struct amdgpu_winsys *ws = cs->ctx->ws;
    int error_code = 0;
 
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index 8f9e8a6..fb6a6bb 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -479,21 +479,21 @@ void radeon_drm_cs_emit_ioctl_oneshot(void *job, int 
thread_index)
 
 /*
  * Make sure previous submission of this cs are completed
  */
 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
 {
     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
     /* Wait for any pending ioctl of this CS to complete. */
     if (util_queue_is_initialized(&cs->ws->cs_queue))
-        util_queue_job_wait(&cs->flush_completed);
+        util_queue_fence_wait(&cs->flush_completed);
 }
 
 /* Add the given fence to a slab buffer fence list.
  *
  * There is a potential race condition when bo participates in submissions on
  * two or more threads simultaneously. Since we do not know which of the
  * submissions will be sent to the GPU first, we have to keep the fences
  * of all submissions.
  *
  * However, fences that belong to submissions that have already returned from
-- 
2.7.4

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to