Module: Mesa Branch: main Commit: 1b6fe35ec800d366b9a57f8d3a8ffbb6605e25c2 URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=1b6fe35ec800d366b9a57f8d3a8ffbb6605e25c2
Author: Lionel Landwerlin <lionel.g.landwer...@intel.com> Date: Wed Nov 8 11:42:22 2023 +0200 anv: get rid of the duplicate pipeline fields in command buffer state This can be error prone if you forget to update one. Signed-off-by: Lionel Landwerlin <lionel.g.landwer...@intel.com> Reviewed-by: Ian Romanick <ian.d.roman...@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26247> --- src/intel/vulkan/anv_cmd_buffer.c | 31 ++++----- src/intel/vulkan/anv_genX.h | 2 +- src/intel/vulkan/anv_measure.c | 15 ++-- src/intel/vulkan/anv_private.h | 9 +-- src/intel/vulkan/genX_cmd_buffer.c | 81 ++++++++++++++-------- .../vulkan/genX_cmd_draw_generated_indirect.h | 15 ++-- src/intel/vulkan/genX_cmd_draw_helpers.h | 3 +- src/intel/vulkan/genX_gfx_state.c | 15 ++-- src/intel/vulkan/genX_pipeline.c | 2 +- 9 files changed, 102 insertions(+), 71 deletions(-) diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c index 8f9fb72c5b5..8f8cb887700 100644 --- a/src/intel/vulkan/anv_cmd_buffer.c +++ b/src/intel/vulkan/anv_cmd_buffer.c @@ -613,14 +613,14 @@ void anv_CmdBindPipeline( switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_COMPUTE: { - struct anv_compute_pipeline *compute_pipeline = - anv_pipeline_to_compute(pipeline); - if (cmd_buffer->state.compute.pipeline == compute_pipeline) + if (cmd_buffer->state.compute.base.pipeline == pipeline) return; cmd_buffer->state.compute.base.pipeline = pipeline; - cmd_buffer->state.compute.pipeline = compute_pipeline; cmd_buffer->state.compute.pipeline_dirty = true; + + struct anv_compute_pipeline *compute_pipeline = + anv_pipeline_to_compute(pipeline); set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE, &compute_pipeline->cs->bind_map); @@ -630,8 +630,6 @@ void anv_CmdBindPipeline( } case VK_PIPELINE_BIND_POINT_GRAPHICS: { - struct anv_graphics_pipeline *old_pipeline = - cmd_buffer->state.gfx.pipeline; struct anv_graphics_pipeline *new_pipeline = anv_pipeline_to_graphics(pipeline); @@ -639,11 +637,14 @@ void anv_CmdBindPipeline( vk_cmd_set_dynamic_graphics_state(&cmd_buffer->vk, &new_pipeline->dynamic_state); - if (old_pipeline == new_pipeline) + if (cmd_buffer->state.gfx.base.pipeline == pipeline) return; + struct anv_graphics_pipeline *old_pipeline = + cmd_buffer->state.gfx.base.pipeline == NULL ? NULL : + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); + cmd_buffer->state.gfx.base.pipeline = pipeline; - cmd_buffer->state.gfx.pipeline = new_pipeline; cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE; anv_foreach_stage(stage, new_pipeline->base.base.active_stages) { @@ -696,15 +697,14 @@ void anv_CmdBindPipeline( } case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: { - struct anv_ray_tracing_pipeline *rt_pipeline = - anv_pipeline_to_ray_tracing(pipeline); - if (cmd_buffer->state.rt.pipeline == rt_pipeline) + if (cmd_buffer->state.rt.base.pipeline == pipeline) return; cmd_buffer->state.rt.base.pipeline = pipeline; - cmd_buffer->state.rt.pipeline = rt_pipeline; cmd_buffer->state.rt.pipeline_dirty = true; + struct anv_ray_tracing_pipeline *rt_pipeline = + anv_pipeline_to_ray_tracing(pipeline); if (rt_pipeline->stack_size > 0) { anv_CmdSetRayTracingPipelineStackSizeKHR(commandBuffer, rt_pipeline->stack_size); @@ -1039,7 +1039,8 @@ anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer) const struct intel_device_info *devinfo = cmd_buffer->device->info; struct anv_cmd_pipeline_state *pipe_state = &cmd_buffer->state.compute.base; struct anv_push_constants *data = &pipe_state->push_constants; - struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; + struct anv_compute_pipeline *pipeline = + anv_pipeline_to_compute(cmd_buffer->state.compute.base.pipeline); const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0]; @@ -1293,15 +1294,13 @@ anv_cmd_buffer_restore_state(struct anv_cmd_buffer *cmd_buffer, assert(state->flags & ANV_CMD_SAVED_STATE_COMPUTE_PIPELINE); const VkPipelineBindPoint bind_point = VK_PIPELINE_BIND_POINT_COMPUTE; const VkShaderStageFlags stage_flags = VK_SHADER_STAGE_COMPUTE_BIT; - struct anv_cmd_compute_state *comp_state = &cmd_buffer->state.compute; - struct anv_cmd_pipeline_state *pipe_state = &comp_state->base; + struct anv_cmd_pipeline_state *pipe_state = &cmd_buffer->state.compute.base; if (state->flags & ANV_CMD_SAVED_STATE_COMPUTE_PIPELINE) { if (state->pipeline) { anv_CmdBindPipeline(cmd_buffer_, bind_point, anv_pipeline_to_handle(state->pipeline)); } else { - comp_state->pipeline = NULL; pipe_state->pipeline = NULL; } } diff --git a/src/intel/vulkan/anv_genX.h b/src/intel/vulkan/anv_genX.h index a92b1f207ef..a9bd1134982 100644 --- a/src/intel/vulkan/anv_genX.h +++ b/src/intel/vulkan/anv_genX.h @@ -186,7 +186,7 @@ genX(batch_emit_post_3dprimitive_was)(struct anv_batch *batch, uint32_t vertex_count); VkPolygonMode -genX(raster_polygon_mode)(struct anv_graphics_pipeline *pipeline, +genX(raster_polygon_mode)(const struct anv_graphics_pipeline *pipeline, VkPolygonMode polygon_mode, VkPrimitiveTopology primitive_topology); diff --git a/src/intel/vulkan/anv_measure.c b/src/intel/vulkan/anv_measure.c index 482d973f9dd..2cd1e12066c 100644 --- a/src/intel/vulkan/anv_measure.c +++ b/src/intel/vulkan/anv_measure.c @@ -144,11 +144,13 @@ anv_measure_start_snapshot(struct anv_cmd_buffer *cmd_buffer, snapshot->renderpass = (type == INTEL_SNAPSHOT_COMPUTE) ? 0 : measure->base.renderpass; - if (type == INTEL_SNAPSHOT_COMPUTE && cmd_buffer->state.compute.pipeline) { - snapshot->cs = cmd_buffer->state.compute.pipeline->source_hash; - } else if (type == INTEL_SNAPSHOT_DRAW && cmd_buffer->state.gfx.pipeline) { + if (type == INTEL_SNAPSHOT_COMPUTE && cmd_buffer->state.compute.base.pipeline) { + const struct anv_compute_pipeline *pipeline = + anv_pipeline_to_compute(cmd_buffer->state.compute.base.pipeline); + snapshot->cs = pipeline->source_hash; + } else if (type == INTEL_SNAPSHOT_DRAW && cmd_buffer->state.gfx.base.pipeline) { const struct anv_graphics_pipeline *pipeline = - cmd_buffer->state.gfx.pipeline; + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); snapshot->vs = pipeline->base.source_hashes[MESA_SHADER_VERTEX]; snapshot->tcs = pipeline->base.source_hashes[MESA_SHADER_TESS_CTRL]; snapshot->tes = pipeline->base.source_hashes[MESA_SHADER_TESS_EVAL]; @@ -198,11 +200,12 @@ state_changed(struct anv_cmd_buffer *cmd_buffer, if (type == INTEL_SNAPSHOT_COMPUTE) { const struct anv_compute_pipeline *cs_pipe = - cmd_buffer->state.compute.pipeline; + anv_pipeline_to_compute(cmd_buffer->state.compute.base.pipeline); assert(cs_pipe); cs = cs_pipe->source_hash; } else if (type == INTEL_SNAPSHOT_DRAW) { - const struct anv_graphics_pipeline *gfx = cmd_buffer->state.gfx.pipeline; + const struct anv_graphics_pipeline *gfx = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); assert(gfx); vs = gfx->base.source_hashes[MESA_SHADER_VERTEX]; tcs = gfx->base.source_hashes[MESA_SHADER_TESS_CTRL]; diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index a5db47f8c52..08f3ee3652d 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -3351,6 +3351,9 @@ struct anv_cmd_pipeline_state { uint32_t offsets[MAX_DYNAMIC_BUFFERS]; } dynamic_offsets[MAX_SETS]; + /** + * The current bound pipeline. + */ struct anv_pipeline *pipeline; }; @@ -3364,8 +3367,6 @@ struct anv_cmd_pipeline_state { struct anv_cmd_graphics_state { struct anv_cmd_pipeline_state base; - struct anv_graphics_pipeline *pipeline; - VkRenderingFlags rendering_flags; VkRect2D render_area; uint32_t layer_count; @@ -3439,8 +3440,6 @@ enum anv_depth_reg_mode { struct anv_cmd_compute_state { struct anv_cmd_pipeline_state base; - struct anv_compute_pipeline *pipeline; - bool pipeline_dirty; struct anv_state push_data; @@ -3453,8 +3452,6 @@ struct anv_cmd_compute_state { struct anv_cmd_ray_tracing_state { struct anv_cmd_pipeline_state base; - struct anv_ray_tracing_pipeline *pipeline; - bool pipeline_dirty; struct { diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index 3b7794c5710..2e5387d8227 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -1762,8 +1762,9 @@ genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer) static void cmd_buffer_alloc_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer) { - VkShaderStageFlags stages = - cmd_buffer->state.gfx.pipeline->base.base.active_stages; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); + VkShaderStageFlags stages = pipeline->base.base.active_stages; /* In order to avoid thrash, we assume that vertex and fragment stages * always exist. In the rare case where one is missing *and* the other @@ -1771,7 +1772,7 @@ cmd_buffer_alloc_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer) * seems more important. */ stages |= VK_SHADER_STAGE_FRAGMENT_BIT; - if (anv_pipeline_is_primitive(cmd_buffer->state.gfx.pipeline)) + if (anv_pipeline_is_primitive(pipeline)) stages |= VK_SHADER_STAGE_VERTEX_BIT; if (stages == cmd_buffer->state.gfx.push_constant_stages) @@ -1780,7 +1781,7 @@ cmd_buffer_alloc_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer) unsigned push_constant_kb; const struct intel_device_info *devinfo = cmd_buffer->device->info; - if (anv_pipeline_is_mesh(cmd_buffer->state.gfx.pipeline)) + if (anv_pipeline_is_mesh(pipeline)) push_constant_kb = devinfo->mesh_max_constant_urb_size_kb; else push_constant_kb = devinfo->max_constant_urb_size_kb; @@ -2535,7 +2536,8 @@ cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer, unsigned buffer_count) { const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx_state->base.pipeline); static const uint32_t push_constant_opcodes[] = { [MESA_SHADER_VERTEX] = 21, @@ -2611,7 +2613,8 @@ cmd_buffer_emit_push_constant_all(struct anv_cmd_buffer *cmd_buffer, } const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx_state->base.pipeline); gl_shader_stage stage = vk_to_mesa_shader_stage(shader_mask); @@ -2647,7 +2650,8 @@ cmd_buffer_flush_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer, { VkShaderStageFlags flushed = 0; struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx_state->base.pipeline); #if GFX_VER >= 12 uint32_t nobuffer_stages = 0; @@ -2774,7 +2778,8 @@ cmd_buffer_flush_mesh_inline_data(struct anv_cmd_buffer *cmd_buffer, VkShaderStageFlags dirty_stages) { struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; - const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx_state->base.pipeline); if (dirty_stages & VK_SHADER_STAGE_TASK_BIT_EXT && anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK)) { @@ -2971,7 +2976,8 @@ genX(cmd_buffer_set_preemption)(struct anv_cmd_buffer *cmd_buffer, bool value) ALWAYS_INLINE static void genX(emit_hs)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) return; @@ -2996,7 +3002,8 @@ genX(emit_ds)(struct anv_cmd_buffer *cmd_buffer) * * FIXME: Use INTEL_NEEDS_WA_14019750404 once the tool picks it up. */ - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) return; @@ -3007,7 +3014,8 @@ genX(emit_ds)(struct anv_cmd_buffer *cmd_buffer) ALWAYS_INLINE static void genX(cmd_buffer_flush_gfx_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct vk_dynamic_graphics_state *dyn = &cmd_buffer->vk.dynamic_graphics_state; uint32_t *p; @@ -3027,8 +3035,7 @@ genX(cmd_buffer_flush_gfx_state)(struct anv_cmd_buffer *cmd_buffer) if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) { if (anv_pipeline_is_primitive(pipeline)) { genX(apply_task_urb_workaround)(cmd_buffer); - } else if (anv_pipeline_has_stage(cmd_buffer->state.gfx.pipeline, - MESA_SHADER_TASK)) { + } else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TASK)) { cmd_buffer->state.gfx.used_task_shader = true; } } @@ -3273,15 +3280,15 @@ ALWAYS_INLINE static bool anv_use_generated_draws(const struct anv_cmd_buffer *cmd_buffer, uint32_t count) { const struct anv_device *device = cmd_buffer->device; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); /* Limit generated draws to pipelines without HS stage. This makes things * simpler for implementing Wa_1306463417, Wa_16011107343. */ if ((INTEL_NEEDS_WA_1306463417 || INTEL_NEEDS_WA_16011107343) && - anv_pipeline_has_stage(cmd_buffer->state.gfx.pipeline, - MESA_SHADER_TESS_CTRL)) { + anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_CTRL)) return false; - } return device->physical->generated_indirect_draws && count >= device->physical->instance->generated_indirect_threshold; @@ -4228,7 +4235,8 @@ void genX(CmdDraw)( uint32_t firstInstance) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) return; @@ -4301,7 +4309,8 @@ void genX(CmdDrawMultiEXT)( uint32_t stride) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - UNUSED struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + UNUSED struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) return; @@ -4406,7 +4415,8 @@ void genX(CmdDrawIndexed)( uint32_t firstInstance) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) return; @@ -4479,7 +4489,8 @@ void genX(CmdDrawMultiIndexedEXT)( const int32_t *pVertexOffset) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) return; @@ -4710,7 +4721,8 @@ void genX(CmdDrawIndirectByteCountEXT)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, counter_buffer, counterBuffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); /* firstVertex is always zero for this draw function */ const uint32_t firstVertex = 0; @@ -4800,7 +4812,8 @@ load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer, bool indexed, uint32_t draw_id) { - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); struct mi_builder b; mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch); @@ -4855,7 +4868,8 @@ emit_indirect_draws(struct anv_cmd_buffer *cmd_buffer, bool indexed) { #if GFX_VER < 11 - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); #endif @@ -5088,7 +5102,8 @@ emit_indirect_count_draws(struct anv_cmd_buffer *cmd_buffer, bool indexed) { #if GFX_VER < 11 - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); #endif @@ -5449,7 +5464,8 @@ genX(CmdDrawMeshTasksIndirectEXT)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline); const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline); struct anv_cmd_state *cmd_state = &cmd_buffer->state; @@ -5500,7 +5516,8 @@ genX(CmdDrawMeshTasksIndirectCountEXT)( ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline); const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline); @@ -5591,7 +5608,8 @@ static void genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) { struct anv_cmd_compute_state *comp_state = &cmd_buffer->state.compute; - struct anv_compute_pipeline *pipeline = comp_state->pipeline; + struct anv_compute_pipeline *pipeline = + anv_pipeline_to_compute(comp_state->base.pipeline); const UNUSED struct intel_device_info *devinfo = cmd_buffer->device->info; assert(pipeline->cs); @@ -5837,7 +5855,8 @@ void genX(CmdDispatchBase)( uint32_t groupCountZ) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); - struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; + struct anv_compute_pipeline *pipeline = + anv_pipeline_to_compute(cmd_buffer->state.compute.base.pipeline); const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); anv_cmd_buffer_push_base_group_id(cmd_buffer, baseGroupX, @@ -5893,7 +5912,8 @@ void genX(CmdDispatchIndirect)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline; + struct anv_compute_pipeline *pipeline = + anv_pipeline_to_compute(cmd_buffer->state.compute.base.pipeline); const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); struct anv_address addr = anv_address_add(buffer->address, offset); UNUSED struct anv_batch *batch = &cmd_buffer->batch; @@ -6274,7 +6294,8 @@ cmd_buffer_trace_rays(struct anv_cmd_buffer *cmd_buffer, { struct anv_device *device = cmd_buffer->device; struct anv_cmd_ray_tracing_state *rt = &cmd_buffer->state.rt; - struct anv_ray_tracing_pipeline *pipeline = rt->pipeline; + struct anv_ray_tracing_pipeline *pipeline = + anv_pipeline_to_ray_tracing(rt->base.pipeline); if (anv_batch_has_error(&cmd_buffer->batch)) return; diff --git a/src/intel/vulkan/genX_cmd_draw_generated_indirect.h b/src/intel/vulkan/genX_cmd_draw_generated_indirect.h index 5c9c5a48d46..217124400d8 100644 --- a/src/intel/vulkan/genX_cmd_draw_generated_indirect.h +++ b/src/intel/vulkan/genX_cmd_draw_generated_indirect.h @@ -64,7 +64,8 @@ genX(cmd_buffer_emit_generate_draws)(struct anv_cmd_buffer *cmd_buffer, if (push_data_state.map == NULL) return ANV_STATE_NULL; - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); const bool use_tbimr = cmd_buffer->state.gfx.dyn_state.use_tbimr; @@ -162,7 +163,8 @@ genX(cmd_buffer_get_draw_id_addr)(struct anv_cmd_buffer *cmd_buffer, #if GFX_VER >= 11 return ANV_NULL_ADDRESS; #else - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (!vs_prog_data->uses_drawid) return ANV_NULL_ADDRESS; @@ -184,7 +186,8 @@ genX(cmd_buffer_get_generated_draw_stride)(struct anv_cmd_buffer *cmd_buffer) #if GFX_VER >= 11 return 4 * GENX(3DPRIMITIVE_EXTENDED_length); #else - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); uint32_t len = 0; @@ -252,7 +255,8 @@ genX(cmd_buffer_emit_indirect_generated_draws_inplace)(struct anv_cmd_buffer *cm device->physical->va.dynamic_state_pool.size); } - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (vs_prog_data->uses_baseinstance || @@ -409,7 +413,8 @@ genX(cmd_buffer_emit_indirect_generated_draws_inring)(struct anv_cmd_buffer *cmd }, cmd_buffer->generation.ring_bo->size); - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); if (vs_prog_data->uses_baseinstance || diff --git a/src/intel/vulkan/genX_cmd_draw_helpers.h b/src/intel/vulkan/genX_cmd_draw_helpers.h index 8db6b5e7506..75956f1b9a7 100644 --- a/src/intel/vulkan/genX_cmd_draw_helpers.h +++ b/src/intel/vulkan/genX_cmd_draw_helpers.h @@ -108,7 +108,8 @@ update_dirty_vbs_for_gfx8_vb_flush(struct anv_cmd_buffer *cmd_buffer, #if GFX_VER == 9 const struct vk_dynamic_graphics_state *dyn = &cmd_buffer->vk.dynamic_graphics_state; - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); uint64_t vb_used = dyn->vi->bindings_valid; diff --git a/src/intel/vulkan/genX_gfx_state.c b/src/intel/vulkan/genX_gfx_state.c index 20ddb45a351..b31888d1b38 100644 --- a/src/intel/vulkan/genX_gfx_state.c +++ b/src/intel/vulkan/genX_gfx_state.c @@ -79,7 +79,9 @@ genX(streamout_prologue)(struct anv_cmd_buffer *cmd_buffer) if (!intel_needs_workaround(cmd_buffer->device->info, 16013994831)) return; - if (cmd_buffer->state.gfx.pipeline->uses_xfb) { + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); + if (pipeline->uses_xfb) { genX(cmd_buffer_set_preemption)(cmd_buffer, false); return; } @@ -194,7 +196,8 @@ want_stencil_pma_fix(struct anv_cmd_buffer *cmd_buffer, assert(d_iview && d_iview->image->planes[0].aux_usage == ISL_AUX_USAGE_HIZ); /* 3DSTATE_PS_EXTRA::PixelShaderValid */ - struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(cmd_buffer->state.gfx.base.pipeline); if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) return false; @@ -438,7 +441,8 @@ genX(cmd_buffer_flush_gfx_runtime_state)(struct anv_cmd_buffer *cmd_buffer) { UNUSED struct anv_device *device = cmd_buffer->device; struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx; - const struct anv_graphics_pipeline *pipeline = gfx->pipeline; + const struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx->base.pipeline); const struct vk_dynamic_graphics_state *dyn = &cmd_buffer->vk.dynamic_graphics_state; struct anv_gfx_dynamic_state *hw_state = &gfx->dyn_state; @@ -664,7 +668,7 @@ genX(cmd_buffer_flush_gfx_runtime_state)(struct anv_cmd_buffer *cmd_buffer) pipeline->rasterization_samples); const VkPolygonMode dynamic_raster_mode = - genX(raster_polygon_mode)(gfx->pipeline, + genX(raster_polygon_mode)(pipeline, dyn->rs.polygon_mode, dyn->ia.primitive_topology); @@ -1303,7 +1307,8 @@ genX(cmd_buffer_flush_gfx_hw_state)(struct anv_cmd_buffer *cmd_buffer) { struct anv_device *device = cmd_buffer->device; struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx; - struct anv_graphics_pipeline *pipeline = gfx->pipeline; + struct anv_graphics_pipeline *pipeline = + anv_pipeline_to_graphics(gfx->base.pipeline); const struct vk_dynamic_graphics_state *dyn = &cmd_buffer->vk.dynamic_graphics_state; struct anv_gfx_dynamic_state *hw_state = &gfx->dyn_state; diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c index 224990abb8e..7e596a4b6cb 100644 --- a/src/intel/vulkan/genX_pipeline.c +++ b/src/intel/vulkan/genX_pipeline.c @@ -676,7 +676,7 @@ emit_3dstate_sbe(struct anv_graphics_pipeline *pipeline) * different shader stages which might generate their own type of primitives. */ VkPolygonMode -genX(raster_polygon_mode)(struct anv_graphics_pipeline *pipeline, +genX(raster_polygon_mode)(const struct anv_graphics_pipeline *pipeline, VkPolygonMode polygon_mode, VkPrimitiveTopology primitive_topology) {