From: Marek Olšák <[email protected]>

and handle PIPE_FLUSH_HINT_FINISH in r300.
---
 src/gallium/drivers/r300/r300_blit.c            |  2 +-
 src/gallium/drivers/r300/r300_flush.c           |  7 ++++---
 src/gallium/drivers/r300/r300_query.c           |  2 +-
 src/gallium/drivers/r300/r300_render.c          |  2 +-
 src/gallium/drivers/r600/evergreen_compute.c    |  2 +-
 src/gallium/drivers/r600/r600_buffer_common.c   |  4 ++--
 src/gallium/drivers/r600/r600_cs.h              |  2 +-
 src/gallium/drivers/r600/r600_hw_context.c      |  8 ++++----
 src/gallium/drivers/r600/r600_pipe_common.c     | 14 +++++++-------
 src/gallium/drivers/r600/r600_state_common.c    |  2 +-
 src/gallium/drivers/r600/r600_texture.c         |  2 +-
 src/gallium/drivers/r600/radeon_uvd.c           |  2 +-
 src/gallium/drivers/r600/radeon_vce.c           |  2 +-
 src/gallium/drivers/radeon/r600_buffer_common.c |  4 ++--
 src/gallium/drivers/radeon/r600_cs.h            |  2 +-
 src/gallium/drivers/radeon/r600_pipe_common.c   |  8 ++++----
 src/gallium/drivers/radeon/r600_texture.c       |  2 +-
 src/gallium/drivers/radeon/radeon_uvd.c         |  2 +-
 src/gallium/drivers/radeon/radeon_vce.c         |  2 +-
 src/gallium/drivers/radeon/radeon_vcn_dec.c     |  2 +-
 src/gallium/drivers/radeon/radeon_vcn_enc.c     |  2 +-
 src/gallium/drivers/radeon/radeon_winsys.h      |  5 +----
 src/gallium/drivers/radeonsi/si_fence.c         |  6 +++---
 src/gallium/drivers/radeonsi/si_hw_context.c    |  6 +++---
 src/gallium/drivers/radeonsi/si_state_shaders.c |  4 ++--
 src/gallium/winsys/amdgpu/drm/amdgpu_bo.c       |  4 ++--
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c       |  2 +-
 src/gallium/winsys/radeon/drm/radeon_drm_bo.c   |  4 ++--
 src/gallium/winsys/radeon/drm/radeon_drm_cs.c   |  6 +++---
 29 files changed, 55 insertions(+), 57 deletions(-)

diff --git a/src/gallium/drivers/r300/r300_blit.c 
b/src/gallium/drivers/r300/r300_blit.c
index 8fda727..bc49775 100644
--- a/src/gallium/drivers/r300/r300_blit.c
+++ b/src/gallium/drivers/r300/r300_blit.c
@@ -376,21 +376,21 @@ static void r300_clear(struct pipe_context* pipe,
         /* Calculate zmask_clear and hiz_clear atom sizes. */
         unsigned dwords =
             r300->gpu_flush.size +
             (r300->zmask_clear.dirty ? r300->zmask_clear.size : 0) +
             (r300->hiz_clear.dirty ? r300->hiz_clear.size : 0) +
             (r300->cmask_clear.dirty ? r300->cmask_clear.size : 0) +
             r300_get_num_cs_end_dwords(r300);
 
         /* Reserve CS space. */
         if (!r300->rws->cs_check_space(r300->cs, dwords)) {
-            r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+            r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
         }
 
         /* Emit clear packets. */
         r300_emit_gpu_flush(r300, r300->gpu_flush.size, r300->gpu_flush.state);
         r300->gpu_flush.dirty = FALSE;
 
         if (r300->zmask_clear.dirty) {
             r300_emit_zmask_clear(r300, r300->zmask_clear.size,
                                   r300->zmask_clear.state);
             r300->zmask_clear.dirty = FALSE;
diff --git a/src/gallium/drivers/r300/r300_flush.c 
b/src/gallium/drivers/r300/r300_flush.c
index 7fabd13..f6c1bf3 100644
--- a/src/gallium/drivers/r300/r300_flush.c
+++ b/src/gallium/drivers/r300/r300_flush.c
@@ -122,19 +122,20 @@ void r300_flush(struct pipe_context *pipe,
                                           FALSE);
             r300->hyperz_enabled = FALSE;
         }
     }
 }
 
 static void r300_flush_wrapped(struct pipe_context *pipe,
                                struct pipe_fence_handle **fence,
                                unsigned flags)
 {
-    r300_flush(pipe,
-               flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0,
-               fence);
+    if (flags & PIPE_FLUSH_HINT_FINISH)
+        flags &= ~PIPE_FLUSH_ASYNC;
+
+    r300_flush(pipe, flags, fence);
 }
 
 void r300_init_flush_functions(struct r300_context* r300)
 {
     r300->context.flush = r300_flush_wrapped;
 }
diff --git a/src/gallium/drivers/r300/r300_query.c 
b/src/gallium/drivers/r300/r300_query.c
index a84c941..014055b 100644
--- a/src/gallium/drivers/r300/r300_query.c
+++ b/src/gallium/drivers/r300/r300_query.c
@@ -114,21 +114,21 @@ void r300_stop_query(struct r300_context *r300)
 }
 
 static bool r300_end_query(struct pipe_context* pipe,
                           struct pipe_query* query)
 {
     struct r300_context* r300 = r300_context(pipe);
     struct r300_query *q = r300_query(query);
 
     if (q->type == PIPE_QUERY_GPU_FINISHED) {
         pb_reference(&q->buf, NULL);
-        r300_flush(pipe, RADEON_FLUSH_ASYNC,
+        r300_flush(pipe, PIPE_FLUSH_ASYNC,
                    (struct pipe_fence_handle**)&q->buf);
         return true;
     }
 
     if (q != r300->query_current) {
         fprintf(stderr, "r300: end_query: Got invalid query.\n");
         assert(0);
         return false;
     }
 
diff --git a/src/gallium/drivers/r300/r300_render.c 
b/src/gallium/drivers/r300/r300_render.c
index 9397aae..211d35d 100644
--- a/src/gallium/drivers/r300/r300_render.c
+++ b/src/gallium/drivers/r300/r300_render.c
@@ -209,21 +209,21 @@ static boolean r300_reserve_cs_dwords(struct r300_context 
*r300,
     if (emit_vertex_arrays)
         cs_dwords += 55; /* emit_vertex_arrays */
 
     if (emit_vertex_arrays_swtcl)
         cs_dwords += 7; /* emit_vertex_arrays_swtcl */
 
     cs_dwords += r300_get_num_cs_end_dwords(r300);
 
     /* Reserve requested CS space. */
     if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
-        r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+        r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
         flushed = TRUE;
     }
 
     return flushed;
 }
 
 /**
  * Validate buffers and emit dirty state.
  * \param r300          The context.
  * \param flags         See r300_prepare_flags.
diff --git a/src/gallium/drivers/r600/evergreen_compute.c 
b/src/gallium/drivers/r600/evergreen_compute.c
index 48c4a9c..3985e70 100644
--- a/src/gallium/drivers/r600/evergreen_compute.c
+++ b/src/gallium/drivers/r600/evergreen_compute.c
@@ -616,21 +616,21 @@ static void evergreen_emit_dispatch(struct r600_context 
*rctx,
 }
 
 static void compute_emit_cs(struct r600_context *rctx,
                            const struct pipe_grid_info *info)
 {
        struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        unsigned i;
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
-               rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        /* Initialize all the compute-related registers.
         *
         * See evergreen_init_atom_start_compute_cs() in this file for the list
         * of registers initialized by the start_compute_cs_cmd atom.
         */
        r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd);
 
        /* emit config state */
diff --git a/src/gallium/drivers/r600/r600_buffer_common.c 
b/src/gallium/drivers/r600/r600_buffer_common.c
index 35a7023..5ff25ae 100644
--- a/src/gallium/drivers/r600/r600_buffer_common.c
+++ b/src/gallium/drivers/r600/r600_buffer_common.c
@@ -59,32 +59,32 @@ void *r600_buffer_map_sync_with_rings(struct 
r600_common_context *ctx,
 
        if (!(usage & PIPE_TRANSFER_WRITE)) {
                /* have to wait for the last write */
                rusage = RADEON_USAGE_WRITE;
        }
 
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->gfx.flush(ctx, 0, NULL);
                        busy = true;
                }
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->dma.flush(ctx, 0, NULL);
                        busy = true;
                }
        }
 
        if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
                        return NULL;
diff --git a/src/gallium/drivers/r600/r600_cs.h 
b/src/gallium/drivers/r600/r600_cs.h
index 0efae09..9c8298a 100644
--- a/src/gallium/drivers/r600/r600_cs.h
+++ b/src/gallium/drivers/r600/r600_cs.h
@@ -101,21 +101,21 @@ radeon_add_to_buffer_list_check_mem(struct 
r600_common_context *rctx,
                                    struct r600_ring *ring,
                                    struct r600_resource *rbo,
                                    enum radeon_bo_usage usage,
                                    enum radeon_bo_priority priority,
                                    bool check_mem)
 {
        if (check_mem &&
            !radeon_cs_memory_below_limit(rctx->screen, ring->cs,
                                          rctx->vram + rbo->vram_usage,
                                          rctx->gtt + rbo->gart_usage))
-               ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
 
        return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
 }
 
 static inline void r600_emit_reloc(struct r600_common_context *rctx,
                                   struct r600_ring *ring, struct r600_resource 
*rbo,
                                   enum radeon_bo_usage usage,
                                   enum radeon_bo_priority priority)
 {
        struct radeon_winsys_cs *cs = ring->cs;
diff --git a/src/gallium/drivers/r600/r600_hw_context.c 
b/src/gallium/drivers/r600/r600_hw_context.c
index 727ad9c..8ffd02b 100644
--- a/src/gallium/drivers/r600/r600_hw_context.c
+++ b/src/gallium/drivers/r600/r600_hw_context.c
@@ -28,27 +28,27 @@
 #include "util/u_memory.h"
 #include <errno.h>
 #include <unistd.h>
 
 
 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
                        boolean count_draw_in)
 {
        /* Flush the DMA IB if it's not empty. */
        if (radeon_emitted(ctx->b.dma.cs, 0))
-               ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
                                          ctx->b.vram, ctx->b.gtt)) {
                ctx->b.gtt = 0;
                ctx->b.vram = 0;
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                return;
        }
        /* all will be accounted once relocation are emited */
        ctx->b.gtt = 0;
        ctx->b.vram = 0;
 
        /* Check available space in CS. */
        if (count_draw_in) {
                uint64_t mask;
 
@@ -75,21 +75,21 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned 
num_dw,
        }
 
        /* Count in framebuffer cache flushes at the end of CS. */
        num_dw += R600_MAX_FLUSH_CS_DWORDS;
 
        /* The fence at the end of CS. */
        num_dw += 10;
 
        /* Flush if there's not enough space. */
        if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 }
 
 void r600_flush_emit(struct r600_context *rctx)
 {
        struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        unsigned cp_coher_cntl = 0;
        unsigned wait_until = 0;
 
        if (!rctx->b.flags) {
@@ -432,21 +432,21 @@ void r600_emit_pfp_sync_me(struct r600_context *rctx)
                 */
                struct r600_resource *buf = NULL;
                unsigned offset, reloc;
                uint64_t va;
 
                /* 16-byte address alignment is required by WAIT_REG_MEM. */
                u_suballocator_alloc(rctx->b.allocator_zeroed_memory, 4, 16,
                                     &offset, (struct pipe_resource**)&buf);
                if (!buf) {
                        /* This is too heavyweight, but will work. */
-                       rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+                       rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                        return;
                }
 
                reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, buf,
                                                  RADEON_USAGE_READWRITE,
                                                  RADEON_PRIO_FENCE);
 
                va = buf->gpu_address + offset;
                assert(va % 16 == 0);
 
diff --git a/src/gallium/drivers/r600/r600_pipe_common.c 
b/src/gallium/drivers/r600/r600_pipe_common.c
index 23f7d74..d44860a 100644
--- a/src/gallium/drivers/r600/r600_pipe_common.c
+++ b/src/gallium/drivers/r600/r600_pipe_common.c
@@ -263,39 +263,39 @@ void r600_need_dma_space(struct r600_common_context *ctx, 
unsigned num_dw,
        }
 
        /* Flush the GFX IB if DMA depends on it. */
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ((dst &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
                                               RADEON_USAGE_READWRITE)) ||
             (src &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
                                               RADEON_USAGE_WRITE))))
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Flush if there's not enough space, or if the memory usage per IB
         * is too large.
         *
         * IBs using too little memory are limited by the IB submission 
overhead.
         * IBs using too much memory are limited by the kernel/TTM overhead.
         * Too long IBs create CPU-GPU pipeline bubbles and add latency.
         *
         * This heuristic makes sure that DMA requests are executed
         * very soon after the call is made and lowers memory usage.
         * It improves texture upload performance by keeping the DMA
         * engine busy while uploads are being submitted.
         */
        num_dw++; /* for emit_wait_idle below */
        if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
            ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 
||
            !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) 
{
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                assert((num_dw + ctx->dma.cs->current.cdw) <= 
ctx->dma.cs->current.max_dw);
        }
 
        /* Wait for idle if either buffer has been used in the IB before to
         * prevent read-after-write hazards.
         */
        if ((dst &&
             ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
                                              RADEON_USAGE_READWRITE)) ||
            (src &&
@@ -393,24 +393,24 @@ static void r600_fence_server_sync(struct pipe_context 
*ctx,
 static void r600_flush_from_st(struct pipe_context *ctx,
                               struct pipe_fence_handle **fence,
                               unsigned flags)
 {
        struct pipe_screen *screen = ctx->screen;
        struct r600_common_context *rctx = (struct r600_common_context *)ctx;
        struct radeon_winsys *ws = rctx->ws;
        struct pipe_fence_handle *gfx_fence = NULL;
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
-       unsigned rflags = RADEON_FLUSH_ASYNC;
+       unsigned rflags = PIPE_FLUSH_ASYNC;
 
        if (flags & PIPE_FLUSH_END_OF_FRAME)
-               rflags |= RADEON_FLUSH_END_OF_FRAME;
+               rflags |= PIPE_FLUSH_END_OF_FRAME;
 
        /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
        if (rctx->dma.cs)
                rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
 
        if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
                if (fence)
                        ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
                if (!(flags & PIPE_FLUSH_DEFERRED))
                        ws->cs_sync_flush(rctx->gfx.cs);
@@ -619,26 +619,26 @@ static bool r600_resource_commit(struct pipe_context 
*pctx,
        /*
         * Since buffer commitment changes cannot be pipelined, we need to
         * (a) flush any pending commands that refer to the buffer we're about
         *     to change, and
         * (b) wait for threaded submit to finish, including those that were
         *     triggered by some other, earlier operation.
         */
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) 
{
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) 
{
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        ctx->ws->cs_sync_flush(ctx->dma.cs);
        ctx->ws->cs_sync_flush(ctx->gfx.cs);
 
        assert(resource->target == PIPE_BUFFER);
 
        return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
 }
 
@@ -1187,21 +1187,21 @@ static boolean r600_fence_finish(struct pipe_screen 
*screen,
                }
        }
 
        if (!rfence->gfx)
                return true;
 
        /* Flush the gfx IB if it hasn't been flushed yet. */
        if (rctx &&
            rfence->gfx_unflushed.ctx == rctx &&
            rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
-               rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
                rfence->gfx_unflushed.ctx = NULL;
 
                if (!timeout)
                        return false;
 
                /* Recompute the timeout after all that. */
                if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
                        int64_t time = os_time_get_nano();
                        timeout = abs_timeout > time ? abs_timeout - time : 0;
                }
diff --git a/src/gallium/drivers/r600/r600_state_common.c 
b/src/gallium/drivers/r600/r600_state_common.c
index a977cdc..d9b1592 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -1828,21 +1828,21 @@ static void r600_draw_vbo(struct pipe_context *ctx, 
const struct pipe_draw_info
                return;
        }
        if (unlikely(!rctx->ps_shader &&
                     (!rctx->rasterizer || 
!rctx->rasterizer->rasterizer_discard))) {
                assert(0);
                return;
        }
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
-               rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        /* Re-emit the framebuffer state if needed. */
        dirty_tex_counter = p_atomic_read(&rctx->b.screen->dirty_tex_counter);
        if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) {
                rctx->b.last_dirty_tex_counter = dirty_tex_counter;
                r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
                rctx->framebuffer.do_update_surf_dirtiness = true;
        }
 
diff --git a/src/gallium/drivers/r600/r600_texture.c 
b/src/gallium/drivers/r600/r600_texture.c
index 07782ff..4042b70 100644
--- a/src/gallium/drivers/r600/r600_texture.c
+++ b/src/gallium/drivers/r600/r600_texture.c
@@ -1493,21 +1493,21 @@ static void r600_texture_transfer_unmap(struct 
pipe_context *ctx,
         * The idea is that we don't want to build IBs that use too much
         * memory and put pressure on the kernel memory manager and we also
         * want to make temporary and invalidated buffers go idle ASAP to
         * decrease the total memory usage or make them reusable. The memory
         * usage will be slightly higher than given here because of the buffer
         * cache in the winsys.
         *
         * The result is that the kernel memory manager is never a bottleneck.
         */
        if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 
4) {
-               rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                rctx->num_alloc_tex_transfer_bytes = 0;
        }
 
        pipe_resource_reference(&transfer->resource, NULL);
        FREE(transfer);
 }
 
 static const struct u_resource_vtbl r600_texture_vtbl =
 {
        NULL,                           /* get_handle */
diff --git a/src/gallium/drivers/r600/radeon_uvd.c 
b/src/gallium/drivers/r600/radeon_uvd.c
index 69bba8c..17ff3d5 100644
--- a/src/gallium/drivers/r600/radeon_uvd.c
+++ b/src/gallium/drivers/r600/radeon_uvd.c
@@ -1252,21 +1252,21 @@ static void ruvd_end_frame(struct pipe_video_codec 
*decoder,
                 0, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        send_cmd(dec, RUVD_CMD_DECODING_TARGET_BUFFER, dt, 0,
                 RADEON_USAGE_WRITE, RADEON_DOMAIN_VRAM);
        send_cmd(dec, RUVD_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->buf,
                 FB_BUFFER_OFFSET, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
        if (have_it(dec))
                send_cmd(dec, RUVD_CMD_ITSCALING_TABLE_BUFFER, 
msg_fb_it_buf->res->buf,
                         FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, 
RADEON_DOMAIN_GTT);
        set_reg(dec, dec->reg.cntl, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
 /**
  * flush any outstanding command buffers to the hardware
  */
 static void ruvd_flush(struct pipe_video_codec *decoder)
 {
 }
 
diff --git a/src/gallium/drivers/r600/radeon_vce.c 
b/src/gallium/drivers/r600/radeon_vce.c
index 16a0127..533bc18 100644
--- a/src/gallium/drivers/r600/radeon_vce.c
+++ b/src/gallium/drivers/r600/radeon_vce.c
@@ -52,21 +52,21 @@
 #define FW_52_0_3 ((52 << 24) | (0 << 16) | (3 << 8))
 #define FW_52_4_3 ((52 << 24) | (4 << 16) | (3 << 8))
 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
 #define FW_53 (53 << 24)
 
 /**
  * flush commands to the hardware
  */
 static void flush(struct rvce_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
        enc->task_info_idx = 0;
        enc->bs_idx = 0;
 }
 
 #if 0
 static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
 {
        uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, 
PIPE_TRANSFER_READ_WRITE);
        unsigned i = 0;
        fprintf(stderr, "\n");
diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c 
b/src/gallium/drivers/radeon/r600_buffer_common.c
index 5efde0c..e6a0eff 100644
--- a/src/gallium/drivers/radeon/r600_buffer_common.c
+++ b/src/gallium/drivers/radeon/r600_buffer_common.c
@@ -57,32 +57,32 @@ void *si_buffer_map_sync_with_rings(struct 
r600_common_context *ctx,
 
        if (!(usage & PIPE_TRANSFER_WRITE)) {
                /* have to wait for the last write */
                rusage = RADEON_USAGE_WRITE;
        }
 
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->gfx.flush(ctx, 0, NULL);
                        busy = true;
                }
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->dma.flush(ctx, 0, NULL);
                        busy = true;
                }
        }
 
        if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
                        return NULL;
diff --git a/src/gallium/drivers/radeon/r600_cs.h 
b/src/gallium/drivers/radeon/r600_cs.h
index 2f555dc..89d6c7c 100644
--- a/src/gallium/drivers/radeon/r600_cs.h
+++ b/src/gallium/drivers/radeon/r600_cs.h
@@ -99,21 +99,21 @@ radeon_add_to_buffer_list_check_mem(struct 
r600_common_context *rctx,
                                    struct r600_ring *ring,
                                    struct r600_resource *rbo,
                                    enum radeon_bo_usage usage,
                                    enum radeon_bo_priority priority,
                                    bool check_mem)
 {
        if (check_mem &&
            !radeon_cs_memory_below_limit(rctx->screen, ring->cs,
                                          rctx->vram + rbo->vram_usage,
                                          rctx->gtt + rbo->gart_usage))
-               ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
 
        return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
 }
 
 static inline void radeon_set_config_reg_seq(struct radeon_winsys_cs *cs, 
unsigned reg, unsigned num)
 {
        assert(reg < SI_CONTEXT_REG_OFFSET);
        assert(cs->current.cdw + 2 + num <= cs->current.max_dw);
        radeon_emit(cs, PKT3(PKT3_SET_CONFIG_REG, num, 0));
        radeon_emit(cs, (reg - SI_CONFIG_REG_OFFSET) >> 2);
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c 
b/src/gallium/drivers/radeon/r600_pipe_common.c
index 08eb406..7e7e42f 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -182,39 +182,39 @@ void si_need_dma_space(struct r600_common_context *ctx, 
unsigned num_dw,
        }
 
        /* Flush the GFX IB if DMA depends on it. */
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ((dst &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
                                               RADEON_USAGE_READWRITE)) ||
             (src &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
                                               RADEON_USAGE_WRITE))))
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Flush if there's not enough space, or if the memory usage per IB
         * is too large.
         *
         * IBs using too little memory are limited by the IB submission 
overhead.
         * IBs using too much memory are limited by the kernel/TTM overhead.
         * Too long IBs create CPU-GPU pipeline bubbles and add latency.
         *
         * This heuristic makes sure that DMA requests are executed
         * very soon after the call is made and lowers memory usage.
         * It improves texture upload performance by keeping the DMA
         * engine busy while uploads are being submitted.
         */
        num_dw++; /* for emit_wait_idle below */
        if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
            ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 
||
            !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) 
{
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                assert((num_dw + ctx->dma.cs->current.cdw) <= 
ctx->dma.cs->current.max_dw);
        }
 
        /* Wait for idle if either buffer has been used in the IB before to
         * prevent read-after-write hazards.
         */
        if ((dst &&
             ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
                                              RADEON_USAGE_READWRITE)) ||
            (src &&
@@ -379,26 +379,26 @@ static bool r600_resource_commit(struct pipe_context 
*pctx,
        /*
         * Since buffer commitment changes cannot be pipelined, we need to
         * (a) flush any pending commands that refer to the buffer we're about
         *     to change, and
         * (b) wait for threaded submit to finish, including those that were
         *     triggered by some other, earlier operation.
         */
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) 
{
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) 
{
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        ctx->ws->cs_sync_flush(ctx->dma.cs);
        ctx->ws->cs_sync_flush(ctx->gfx.cs);
 
        assert(resource->target == PIPE_BUFFER);
 
        return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
 }
 
diff --git a/src/gallium/drivers/radeon/r600_texture.c 
b/src/gallium/drivers/radeon/r600_texture.c
index bc72e73..1a0503b 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -1827,21 +1827,21 @@ static void r600_texture_transfer_unmap(struct 
pipe_context *ctx,
         * The idea is that we don't want to build IBs that use too much
         * memory and put pressure on the kernel memory manager and we also
         * want to make temporary and invalidated buffers go idle ASAP to
         * decrease the total memory usage or make them reusable. The memory
         * usage will be slightly higher than given here because of the buffer
         * cache in the winsys.
         *
         * The result is that the kernel memory manager is never a bottleneck.
         */
        if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 
4) {
-               rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                rctx->num_alloc_tex_transfer_bytes = 0;
        }
 
        pipe_resource_reference(&transfer->resource, NULL);
        FREE(transfer);
 }
 
 static const struct u_resource_vtbl r600_texture_vtbl =
 {
        NULL,                           /* get_handle */
diff --git a/src/gallium/drivers/radeon/radeon_uvd.c 
b/src/gallium/drivers/radeon/radeon_uvd.c
index afa8836..ee76e74 100644
--- a/src/gallium/drivers/radeon/radeon_uvd.c
+++ b/src/gallium/drivers/radeon/radeon_uvd.c
@@ -1314,21 +1314,21 @@ static void ruvd_end_frame(struct pipe_video_codec 
*decoder,
                 0, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        send_cmd(dec, RUVD_CMD_DECODING_TARGET_BUFFER, dt, 0,
                 RADEON_USAGE_WRITE, RADEON_DOMAIN_VRAM);
        send_cmd(dec, RUVD_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->buf,
                 FB_BUFFER_OFFSET, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
        if (have_it(dec))
                send_cmd(dec, RUVD_CMD_ITSCALING_TABLE_BUFFER, 
msg_fb_it_buf->res->buf,
                         FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, 
RADEON_DOMAIN_GTT);
        set_reg(dec, dec->reg.cntl, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
 /**
  * flush any outstanding command buffers to the hardware
  */
 static void ruvd_flush(struct pipe_video_codec *decoder)
 {
 }
 
diff --git a/src/gallium/drivers/radeon/radeon_vce.c 
b/src/gallium/drivers/radeon/radeon_vce.c
index 7594421..69e6022 100644
--- a/src/gallium/drivers/radeon/radeon_vce.c
+++ b/src/gallium/drivers/radeon/radeon_vce.c
@@ -46,21 +46,21 @@
 #define FW_52_0_3 ((52 << 24) | (0 << 16) | (3 << 8))
 #define FW_52_4_3 ((52 << 24) | (4 << 16) | (3 << 8))
 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
 #define FW_53 (53 << 24)
 
 /**
  * flush commands to the hardware
  */
 static void flush(struct rvce_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
        enc->task_info_idx = 0;
        enc->bs_idx = 0;
 }
 
 #if 0
 static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
 {
        uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, 
PIPE_TRANSFER_READ_WRITE);
        unsigned i = 0;
        fprintf(stderr, "\n");
diff --git a/src/gallium/drivers/radeon/radeon_vcn_dec.c 
b/src/gallium/drivers/radeon/radeon_vcn_dec.c
index 30cd607..fa62155 100644
--- a/src/gallium/drivers/radeon/radeon_vcn_dec.c
+++ b/src/gallium/drivers/radeon/radeon_vcn_dec.c
@@ -1151,21 +1151,21 @@ static void radeon_dec_end_frame(struct 
pipe_video_codec *decoder,
                 0, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        send_cmd(dec, RDECODE_CMD_DECODING_TARGET_BUFFER, dt, 0,
                 RADEON_USAGE_WRITE, RADEON_DOMAIN_VRAM);
        send_cmd(dec, RDECODE_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->buf,
                 FB_BUFFER_OFFSET, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
        if (have_it(dec))
                send_cmd(dec, RDECODE_CMD_IT_SCALING_TABLE_BUFFER, 
msg_fb_it_buf->res->buf,
                         FB_BUFFER_OFFSET + FB_BUFFER_SIZE, RADEON_USAGE_READ, 
RADEON_DOMAIN_GTT);
        set_reg(dec, RDECODE_ENGINE_CNTL, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
 /**
  * flush any outstanding command buffers to the hardware
  */
 static void radeon_dec_flush(struct pipe_video_codec *decoder)
 {
 }
 
diff --git a/src/gallium/drivers/radeon/radeon_vcn_enc.c 
b/src/gallium/drivers/radeon/radeon_vcn_enc.c
index 4972d11..abc89a7 100644
--- a/src/gallium/drivers/radeon/radeon_vcn_enc.c
+++ b/src/gallium/drivers/radeon/radeon_vcn_enc.c
@@ -49,21 +49,21 @@ static void radeon_vcn_enc_get_param(struct radeon_encoder 
*enc, struct pipe_h26
        enc->enc_pic.not_referenced = pic->not_referenced;
        enc->enc_pic.is_idr = pic->is_idr;
        enc->enc_pic.crop_left = 0;
        enc->enc_pic.crop_right = (align(enc->base.width, 16) - 
enc->base.width) / 2;
        enc->enc_pic.crop_top = 0;
        enc->enc_pic.crop_bottom = (align(enc->base.height, 16) - 
enc->base.height) / 2;
 }
 
 static void flush(struct radeon_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
 }
 
 static void radeon_enc_flush(struct pipe_video_codec *encoder)
 {
        struct radeon_encoder *enc = (struct radeon_encoder*)encoder;
        flush(enc);
 }
 
 static void radeon_enc_cs_flush(void *ctx, unsigned flags,
                                                                struct 
pipe_fence_handle **fence)
diff --git a/src/gallium/drivers/radeon/radeon_winsys.h 
b/src/gallium/drivers/radeon/radeon_winsys.h
index c03090b..7ab110a 100644
--- a/src/gallium/drivers/radeon/radeon_winsys.h
+++ b/src/gallium/drivers/radeon/radeon_winsys.h
@@ -24,23 +24,20 @@
 #ifndef RADEON_WINSYS_H
 #define RADEON_WINSYS_H
 
 /* The public winsys interface header for the radeon driver. */
 
 #include "pipebuffer/pb_buffer.h"
 
 #include "amd/common/ac_gpu_info.h"
 #include "amd/common/ac_surface.h"
 
-#define RADEON_FLUSH_ASYNC             (1 << 0)
-#define RADEON_FLUSH_END_OF_FRAME       (1 << 1)
-
 /* Tiling flags. */
 enum radeon_bo_layout {
     RADEON_LAYOUT_LINEAR = 0,
     RADEON_LAYOUT_TILED,
     RADEON_LAYOUT_SQUARETILED,
 
     RADEON_LAYOUT_UNKNOWN
 };
 
 enum radeon_bo_domain { /* bitfield */
@@ -524,21 +521,21 @@ struct radeon_winsys {
      * \param list  Returned buffer list. Set to NULL to query the count only.
      * \return      The buffer count.
      */
     unsigned (*cs_get_buffer_list)(struct radeon_winsys_cs *cs,
                                    struct radeon_bo_list_item *list);
 
     /**
      * Flush a command stream.
      *
      * \param cs          A command stream to flush.
-     * \param flags,      RADEON_FLUSH_ASYNC or 0.
+     * \param flags,      PIPE_FLUSH_* flags.
      * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
      *                    after the CS and is returned through this parameter.
      * \return Negative POSIX error code or 0 for success.
      *         Asynchronous submissions never return an error.
      */
     int (*cs_flush)(struct radeon_winsys_cs *cs,
                     unsigned flags,
                     struct pipe_fence_handle **fence);
 
     /**
diff --git a/src/gallium/drivers/radeonsi/si_fence.c 
b/src/gallium/drivers/radeonsi/si_fence.c
index 5f478af..0d165a1 100644
--- a/src/gallium/drivers/radeonsi/si_fence.c
+++ b/src/gallium/drivers/radeonsi/si_fence.c
@@ -264,21 +264,21 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                         *     * and the calls to ClientWaitSync and FenceSync 
were
                         *       issued from the same context,
                         *
                         *     then the GL will behave as if the equivalent of 
Flush
                         *     were inserted immediately after the creation of 
sync."
                         *
                         * This means we need to flush for such fences even 
when we're
                         * not going to wait.
                         */
                        threaded_context_unwrap_sync(ctx);
-                       sctx->b.gfx.flush(&sctx->b, timeout ? 0 : 
RADEON_FLUSH_ASYNC, NULL);
+                       sctx->b.gfx.flush(&sctx->b, timeout ? 0 : 
PIPE_FLUSH_ASYNC, NULL);
                        rfence->gfx_unflushed.ctx = NULL;
 
                        if (!timeout)
                                return false;
 
                        /* Recompute the timeout after all that. */
                        if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
                                int64_t time = os_time_get_nano();
                                timeout = abs_timeout > time ? abs_timeout - 
time : 0;
                        }
@@ -371,24 +371,24 @@ static void si_flush_from_st(struct pipe_context *ctx,
                             struct pipe_fence_handle **fence,
                             unsigned flags)
 {
        struct pipe_screen *screen = ctx->screen;
        struct r600_common_context *rctx = (struct r600_common_context *)ctx;
        struct radeon_winsys *ws = rctx->ws;
        struct pipe_fence_handle *gfx_fence = NULL;
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
        struct si_fine_fence fine = {};
-       unsigned rflags = RADEON_FLUSH_ASYNC;
+       unsigned rflags = PIPE_FLUSH_ASYNC;
 
        if (flags & PIPE_FLUSH_END_OF_FRAME)
-               rflags |= RADEON_FLUSH_END_OF_FRAME;
+               rflags |= PIPE_FLUSH_END_OF_FRAME;
 
        if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
                assert(flags & PIPE_FLUSH_DEFERRED);
                assert(fence);
 
                si_fine_fence_set((struct si_context *)rctx, &fine, flags);
        }
 
        /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
        if (rctx->dma.cs)
diff --git a/src/gallium/drivers/radeonsi/si_hw_context.c 
b/src/gallium/drivers/radeonsi/si_hw_context.c
index d46c109..3823be0 100644
--- a/src/gallium/drivers/radeonsi/si_hw_context.c
+++ b/src/gallium/drivers/radeonsi/si_hw_context.c
@@ -46,51 +46,51 @@ void si_need_cs_space(struct si_context *ctx)
         */
 
        /* There are two memory usage counters in the winsys for all buffers
         * that have been added (cs_add_buffer) and two counters in the pipe
         * driver for those that haven't been added yet.
         */
        if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
                                                   ctx->b.vram, ctx->b.gtt))) {
                ctx->b.gtt = 0;
                ctx->b.vram = 0;
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                return;
        }
        ctx->b.gtt = 0;
        ctx->b.vram = 0;
 
        /* If the CS is sufficiently large, don't count the space needed
         * and just flush if there is not enough space left.
         */
        if (!ctx->b.ws->cs_check_space(cs, 2048))
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 }
 
 void si_context_gfx_flush(void *context, unsigned flags,
                          struct pipe_fence_handle **fence)
 {
        struct si_context *ctx = context;
        struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
        struct radeon_winsys *ws = ctx->b.ws;
 
        if (ctx->gfx_flush_in_progress)
                return;
 
        if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
                return;
 
        if (si_check_device_reset(&ctx->b))
                return;
 
        if (ctx->screen->debug_flags & DBG(CHECK_VM))
-               flags &= ~RADEON_FLUSH_ASYNC;
+               flags &= ~PIPE_FLUSH_ASYNC;
 
        /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
         * responsible for flushing the DMA IB and merging the fences from both.
         * This code is only needed when the driver flushes the GFX IB
         * internally, and it never asks for a fence handle.
         */
        if (radeon_emitted(ctx->b.dma.cs, 0)) {
                assert(fence == NULL); /* internal flushes only */
                ctx->b.dma.flush(ctx, flags, NULL);
        }
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c 
b/src/gallium/drivers/radeonsi/si_state_shaders.c
index 461760f..4f683b8 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -2815,21 +2815,21 @@ static bool si_update_gs_ring_buffers(struct si_context 
*sctx)
                si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0);
        sctx->init_config_gs_rings = pm4;
 
        if (!sctx->init_config_has_vgt_flush) {
                si_init_config_add_vgt_flush(sctx);
                si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
        }
 
        /* Flush the context to re-emit both init_config states. */
        sctx->b.initial_gfx_cs_size = 0; /* force flush */
-       si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+       si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Set ring bindings. */
        if (sctx->esgs_ring) {
                assert(sctx->b.chip_class <= VI);
                si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
                                   sctx->esgs_ring, 0, sctx->esgs_ring->width0,
                                   true, true, 4, 64, 0);
                si_set_ring_buffer(&sctx->b.b, SI_GS_RING_ESGS,
                                   sctx->esgs_ring, 0, sctx->esgs_ring->width0,
                                   false, false, 0, 0, 0);
@@ -3154,21 +3154,21 @@ static void si_init_tess_factor_ring(struct si_context 
*sctx)
                               R_00B430_SPI_SHADER_USER_DATA_HS_0 +
                               GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
                               factor_va >> 16);
        }
 
        /* Flush the context to re-emit the init_config state.
         * This is done only once in a lifetime of a context.
         */
        si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
        sctx->b.initial_gfx_cs_size = 0; /* force flush */
-       si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+       si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
 }
 
 /**
  * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
  * VS passes its outputs to TES directly, so the fixed-function shader only
  * has to write TESSOUTER and TESSINNER.
  */
 static void si_generate_fixed_func_tcs(struct si_context *sctx)
 {
        struct ureg_src outer, inner;
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index c3e97c2..6ec7cb7 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -228,31 +228,31 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
          if (!(usage & PIPE_TRANSFER_WRITE)) {
             /* Mapping for read.
              *
              * Since we are mapping for read, we don't need to wait
              * if the GPU is using the buffer for read too
              * (neither one is changing it).
              *
              * Only check whether the buffer is being used for write. */
             if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                                
RADEON_USAGE_WRITE)) {
-               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+               cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                return NULL;
             }
 
             if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                 RADEON_USAGE_WRITE)) {
                return NULL;
             }
          } else {
             if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
-               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+               cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                return NULL;
             }
 
             if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                 RADEON_USAGE_READWRITE)) {
                return NULL;
             }
          }
       } else {
          uint64_t time = os_time_get_nano();
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c 
b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 2c6856b..089a358 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1486,21 +1486,21 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
       /* Swap command streams. "cst" is going to be submitted. */
       cs->csc = cs->cst;
       cs->cst = cur;
 
       /* Submit. */
       util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
                          amdgpu_cs_submit_ib, NULL);
       /* The submission has been queued, unlock the fence now. */
       simple_mtx_unlock(&ws->bo_fence_lock);
 
-      if (!(flags & RADEON_FLUSH_ASYNC)) {
+      if (!(flags & PIPE_FLUSH_ASYNC)) {
          amdgpu_cs_sync_flush(rcs);
          error_code = cur->error_code;
       }
    } else {
       amdgpu_cs_context_cleanup(cs->csc);
    }
 
    amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
 
    cs->main.base.used_gart = 0;
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index b4e501c..87c3f1e 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -483,31 +483,31 @@ static void *radeon_bo_map(struct pb_buffer *buf,
         if (usage & PIPE_TRANSFER_DONTBLOCK) {
             if (!(usage & PIPE_TRANSFER_WRITE)) {
                 /* Mapping for read.
                  *
                  * Since we are mapping for read, we don't need to wait
                  * if the GPU is using the buffer for read too
                  * (neither one is changing it).
                  *
                  * Only check whether the buffer is being used for write. */
                 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
-                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+                    cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                     return NULL;
                 }
 
                 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
                                     RADEON_USAGE_WRITE)) {
                     return NULL;
                 }
             } else {
                 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
-                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+                    cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                     return NULL;
                 }
 
                 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
                                     RADEON_USAGE_READWRITE)) {
                     return NULL;
                 }
             }
         } else {
             uint64_t time = os_time_get_nano();
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
index add88f8..c3398d0 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
@@ -392,21 +392,21 @@ static bool radeon_drm_cs_validate(struct 
radeon_winsys_cs *rcs)
         unsigned i;
 
         for (i = cs->csc->num_validated_relocs; i < cs->csc->num_relocs; i++) {
             p_atomic_dec(&cs->csc->relocs_bo[i].bo->num_cs_references);
             radeon_bo_reference(&cs->csc->relocs_bo[i].bo, NULL);
         }
         cs->csc->num_relocs = cs->csc->num_validated_relocs;
 
         /* Flush if there are any relocs. Clean up otherwise. */
         if (cs->csc->num_relocs) {
-            cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+            cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
         } else {
             radeon_cs_context_cleanup(cs->csc);
             cs->base.used_vram = 0;
             cs->base.used_gart = 0;
 
             assert(cs->base.current.cdw == 0);
             if (cs->base.current.cdw != 0) {
                 fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
             }
         }
@@ -648,35 +648,35 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs 
*rcs,
         case RING_GFX:
         case RING_COMPUTE:
             cs->cst->flags[0] = RADEON_CS_KEEP_TILING_FLAGS;
             cs->cst->flags[1] = RADEON_CS_RING_GFX;
             cs->cst->cs.num_chunks = 3;
 
             if (cs->ws->info.has_virtual_memory) {
                 cs->cst->flags[0] |= RADEON_CS_USE_VM;
                 cs->cst->cs.num_chunks = 3;
             }
-            if (flags & RADEON_FLUSH_END_OF_FRAME) {
+            if (flags & PIPE_FLUSH_END_OF_FRAME) {
                 cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
                 cs->cst->cs.num_chunks = 3;
             }
             if (cs->ring_type == RING_COMPUTE) {
                 cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
                 cs->cst->cs.num_chunks = 3;
             }
             break;
         }
 
         if (util_queue_is_initialized(&cs->ws->cs_queue)) {
             util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
                                radeon_drm_cs_emit_ioctl_oneshot, NULL);
-            if (!(flags & RADEON_FLUSH_ASYNC))
+            if (!(flags & PIPE_FLUSH_ASYNC))
                 radeon_drm_cs_sync_flush(rcs);
         } else {
             radeon_drm_cs_emit_ioctl_oneshot(cs, 0);
         }
     } else {
         radeon_cs_context_cleanup(cs->cst);
     }
 
     /* Prepare a new CS. */
     cs->base.current.buf = cs->csc->buf;
-- 
2.7.4

_______________________________________________
mesa-dev mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to