Module: Mesa Branch: main Commit: 90a2137cd56179a2c65339b2b60991e0281c8995 URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=90a2137cd56179a2c65339b2b60991e0281c8995
Author: Kenneth Graunke <[email protected]> Date: Mon Jan 9 14:17:09 2023 -0800 intel/compiler: Use LSC opcode enum rather than legacy BRW_AOPs This gets our logical atomic messages using the lsc_opcode enum rather than the legacy BRW_AOP_* defines. We have to translate one way or another, and using the modern set makes sense going forward. One advantage is that the lsc_opcode encoding has opcodes for both integer and floating point atomics in the same enum, whereas the legacy encoding used overlapping values (BRW_AOP_AND == 1 == BRW_AOP_FMAX), which made it impossible to handle both sensibly in common code. Reviewed-by: Lionel Landwerlin <[email protected]> Reviewed-by: Rohan Garg <[email protected]> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20604> --- src/intel/compiler/brw_eu.h | 55 +++++++++++++++++ src/intel/compiler/brw_fs.cpp | 15 +++-- src/intel/compiler/brw_fs_nir.cpp | 36 ++++++------ src/intel/compiler/brw_lower_logical_sends.cpp | 81 ++++---------------------- src/intel/compiler/brw_nir.c | 36 ++++++------ src/intel/compiler/brw_nir.h | 2 +- src/intel/compiler/brw_vec4_nir.cpp | 2 +- 7 files changed, 110 insertions(+), 117 deletions(-) diff --git a/src/intel/compiler/brw_eu.h b/src/intel/compiler/brw_eu.h index 370f1567614..999c0e8396f 100644 --- a/src/intel/compiler/brw_eu.h +++ b/src/intel/compiler/brw_eu.h @@ -1240,6 +1240,61 @@ lsc_opcode_is_atomic(enum lsc_opcode opcode) } } +static inline unsigned +lsc_op_to_legacy_atomic(unsigned _op) +{ + enum lsc_opcode op = (enum lsc_opcode) _op; + + switch (op) { + case LSC_OP_ATOMIC_INC: + return BRW_AOP_INC; + case LSC_OP_ATOMIC_DEC: + return BRW_AOP_DEC; + case LSC_OP_ATOMIC_STORE: + return BRW_AOP_MOV; + case LSC_OP_ATOMIC_ADD: + return BRW_AOP_ADD; + case LSC_OP_ATOMIC_SUB: + return BRW_AOP_SUB; + case LSC_OP_ATOMIC_MIN: + return BRW_AOP_IMIN; + case LSC_OP_ATOMIC_MAX: + return BRW_AOP_IMAX; + case LSC_OP_ATOMIC_UMIN: + return BRW_AOP_UMIN; + case LSC_OP_ATOMIC_UMAX: + return BRW_AOP_UMAX; + case LSC_OP_ATOMIC_CMPXCHG: + return BRW_AOP_CMPWR; + case LSC_OP_ATOMIC_FADD: + return BRW_AOP_FADD; + case LSC_OP_ATOMIC_FMIN: + return BRW_AOP_FMIN; + case LSC_OP_ATOMIC_FMAX: + return BRW_AOP_FMAX; + case LSC_OP_ATOMIC_FCMPXCHG: + return BRW_AOP_FCMPWR; + case LSC_OP_ATOMIC_AND: + return BRW_AOP_AND; + case LSC_OP_ATOMIC_OR: + return BRW_AOP_OR; + case LSC_OP_ATOMIC_XOR: + return BRW_AOP_XOR; + /* No LSC op maps to BRW_AOP_PREDEC */ + case LSC_OP_ATOMIC_LOAD: + case LSC_OP_ATOMIC_FSUB: + unreachable("no corresponding legacy atomic operation"); + case LSC_OP_LOAD: + case LSC_OP_LOAD_CMASK: + case LSC_OP_STORE: + case LSC_OP_STORE_CMASK: + case LSC_OP_FENCE: + unreachable("not an atomic op"); + } + + unreachable("invalid LSC op"); +} + static inline uint32_t lsc_data_size_bytes(enum lsc_data_size data_size) { diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp index dbd84968509..fc4fb43dc77 100644 --- a/src/intel/compiler/brw_fs.cpp +++ b/src/intel/compiler/brw_fs.cpp @@ -786,11 +786,10 @@ fs_inst::components_read(unsigned i) const /* Data source */ const unsigned op = src[2].ud; switch (op) { - case BRW_AOP_INC: - case BRW_AOP_DEC: - case BRW_AOP_PREDEC: + case LSC_OP_ATOMIC_INC: + case LSC_OP_ATOMIC_DEC: return 0; - case BRW_AOP_CMPWR: + case LSC_OP_ATOMIC_CMPXCHG: return 2; default: return 1; @@ -806,7 +805,7 @@ fs_inst::components_read(unsigned i) const if (i == 1) { /* Data source */ const unsigned op = src[2].ud; - return op == BRW_AOP_FCMPWR ? 2 : 1; + return op == LSC_OP_ATOMIC_FCMPXCHG ? 2 : 1; } else { return 1; } @@ -839,10 +838,10 @@ fs_inst::components_read(unsigned i) const if (i == SURFACE_LOGICAL_SRC_ADDRESS) return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud; /* Surface operation source. */ - else if (i == SURFACE_LOGICAL_SRC_DATA && op == BRW_AOP_CMPWR) + else if (i == SURFACE_LOGICAL_SRC_DATA && op == LSC_OP_ATOMIC_CMPXCHG) return 2; else if (i == SURFACE_LOGICAL_SRC_DATA && - (op == BRW_AOP_INC || op == BRW_AOP_DEC || op == BRW_AOP_PREDEC)) + (op == LSC_OP_ATOMIC_INC || op == LSC_OP_ATOMIC_DEC)) return 0; else return 1; @@ -858,7 +857,7 @@ fs_inst::components_read(unsigned i) const if (i == SURFACE_LOGICAL_SRC_ADDRESS) return src[SURFACE_LOGICAL_SRC_IMM_DIMS].ud; /* Surface operation source. */ - else if (i == SURFACE_LOGICAL_SRC_DATA && op == BRW_AOP_FCMPWR) + else if (i == SURFACE_LOGICAL_SRC_DATA && op == LSC_OP_ATOMIC_FCMPXCHG) return 2; else return 1; diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index c796b7d1598..1ed5fa977ba 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -4272,8 +4272,8 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS); } else { unsigned num_srcs = info->num_srcs; - int op = brw_aop_for_nir_intrinsic(instr); - if (op == BRW_AOP_INC || op == BRW_AOP_DEC) { + int op = lsc_aop_for_nir_intrinsic(instr); + if (op == LSC_OP_ATOMIC_INC || op == LSC_OP_ATOMIC_DEC) { assert(num_srcs == 4); num_srcs = 3; } @@ -5958,7 +5958,7 @@ void fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); /* The BTI untyped atomic messages only support 32-bit atomics. If you * just look at the big table of messages in the Vol 7 of the SKL PRM, they @@ -5981,10 +5981,10 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld, srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1); fs_reg data; - if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) + if (op != LSC_OP_ATOMIC_INC && op != LSC_OP_ATOMIC_DEC) data = expand_to_32bit(bld, get_nir_src(instr->src[2])); - if (op == BRW_AOP_CMPWR) { + if (op == LSC_OP_ATOMIC_CMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { data, @@ -6022,7 +6022,7 @@ void fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) @@ -6036,7 +6036,7 @@ fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld, srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1); fs_reg data = expand_to_32bit(bld, get_nir_src(instr->src[2])); - if (op == BRW_AOP_FCMPWR) { + if (op == LSC_OP_ATOMIC_FCMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { data, @@ -6073,7 +6073,7 @@ void fs_visitor::nir_emit_shared_atomic(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) @@ -6086,9 +6086,9 @@ fs_visitor::nir_emit_shared_atomic(const fs_builder &bld, srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1); fs_reg data; - if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) + if (op != LSC_OP_ATOMIC_INC && op != LSC_OP_ATOMIC_DEC) data = expand_to_32bit(bld, get_nir_src(instr->src[1])); - if (op == BRW_AOP_CMPWR) { + if (op == LSC_OP_ATOMIC_CMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { expand_to_32bit(bld, data), @@ -6138,7 +6138,7 @@ void fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) @@ -6151,7 +6151,7 @@ fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld, srcs[SURFACE_LOGICAL_SRC_ALLOW_SAMPLE_MASK] = brw_imm_ud(1); fs_reg data = expand_to_32bit(bld, get_nir_src(instr->src[1])); - if (op == BRW_AOP_FCMPWR) { + if (op == LSC_OP_ATOMIC_FCMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { data, @@ -6202,7 +6202,7 @@ void fs_visitor::nir_emit_global_atomic(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); fs_reg dest; if (nir_intrinsic_infos[instr->intrinsic].has_dest) @@ -6211,10 +6211,10 @@ fs_visitor::nir_emit_global_atomic(const fs_builder &bld, fs_reg addr = get_nir_src(instr->src[0]); fs_reg data; - if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC) + if (op != LSC_OP_ATOMIC_INC && op != LSC_OP_ATOMIC_DEC) data = expand_to_32bit(bld, get_nir_src(instr->src[1])); - if (op == BRW_AOP_CMPWR) { + if (op == LSC_OP_ATOMIC_CMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { data, @@ -6256,17 +6256,17 @@ void fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld, nir_intrinsic_instr *instr) { - int op = brw_aop_for_nir_intrinsic(instr); + int op = lsc_aop_for_nir_intrinsic(instr); assert(nir_intrinsic_infos[instr->intrinsic].has_dest); fs_reg dest = get_nir_dest(instr->dest); fs_reg addr = get_nir_src(instr->src[0]); - assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC); + assert(op != LSC_OP_ATOMIC_INC && op != LSC_OP_ATOMIC_DEC); fs_reg data = expand_to_32bit(bld, get_nir_src(instr->src[1])); - if (op == BRW_AOP_FCMPWR) { + if (op == LSC_OP_ATOMIC_FCMPXCHG) { fs_reg tmp = bld.vgrf(data.type, 2); fs_reg sources[2] = { data, diff --git a/src/intel/compiler/brw_lower_logical_sends.cpp b/src/intel/compiler/brw_lower_logical_sends.cpp index 1ff064d342a..1f5eb069e26 100644 --- a/src/intel/compiler/brw_lower_logical_sends.cpp +++ b/src/intel/compiler/brw_lower_logical_sends.cpp @@ -1556,13 +1556,13 @@ lower_surface_logical_send(const fs_builder &bld, fs_inst *inst) case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL: desc = brw_dp_untyped_atomic_desc(devinfo, inst->exec_size, - arg.ud, /* atomic_op */ + lsc_op_to_legacy_atomic(arg.ud), !inst->dst.is_null()); break; case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL: desc = brw_dp_untyped_atomic_float_desc(devinfo, inst->exec_size, - arg.ud, /* atomic_op */ + lsc_op_to_legacy_atomic(arg.ud), !inst->dst.is_null()); break; @@ -1580,7 +1580,7 @@ lower_surface_logical_send(const fs_builder &bld, fs_inst *inst) case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: desc = brw_dp_typed_atomic_desc(devinfo, inst->exec_size, inst->group, - arg.ud, /* atomic_op */ + lsc_op_to_legacy_atomic(arg.ud), !inst->dst.is_null()); break; @@ -1607,59 +1607,6 @@ lower_surface_logical_send(const fs_builder &bld, fs_inst *inst) inst->src[3] = payload2; } -static enum lsc_opcode -brw_atomic_op_to_lsc_atomic_op(unsigned op) -{ - switch(op) { - case BRW_AOP_AND: - return LSC_OP_ATOMIC_AND; - case BRW_AOP_OR: - return LSC_OP_ATOMIC_OR; - case BRW_AOP_XOR: - return LSC_OP_ATOMIC_XOR; - case BRW_AOP_MOV: - return LSC_OP_ATOMIC_STORE; - case BRW_AOP_INC: - return LSC_OP_ATOMIC_INC; - case BRW_AOP_DEC: - return LSC_OP_ATOMIC_DEC; - case BRW_AOP_ADD: - return LSC_OP_ATOMIC_ADD; - case BRW_AOP_SUB: - return LSC_OP_ATOMIC_SUB; - case BRW_AOP_IMAX: - return LSC_OP_ATOMIC_MAX; - case BRW_AOP_IMIN: - return LSC_OP_ATOMIC_MIN; - case BRW_AOP_UMAX: - return LSC_OP_ATOMIC_UMAX; - case BRW_AOP_UMIN: - return LSC_OP_ATOMIC_UMIN; - case BRW_AOP_CMPWR: - return LSC_OP_ATOMIC_CMPXCHG; - default: - assert(false); - unreachable("invalid atomic opcode"); - } -} - -static enum lsc_opcode -brw_atomic_op_to_lsc_fatomic_op(uint32_t aop) -{ - switch(aop) { - case BRW_AOP_FMAX: - return LSC_OP_ATOMIC_FMAX; - case BRW_AOP_FMIN: - return LSC_OP_ATOMIC_FMIN; - case BRW_AOP_FCMPWR: - return LSC_OP_ATOMIC_FCMPXCHG; - case BRW_AOP_FADD: - return LSC_OP_ATOMIC_FADD; - default: - unreachable("Unsupported float atomic opcode"); - } -} - static enum lsc_data_size lsc_bits_to_data_size(unsigned bit_size) { @@ -1762,10 +1709,7 @@ lower_lsc_surface_logical_send(const fs_builder &bld, fs_inst *inst) * Atomic messages are always forced to "un-cacheable" in the L1 * cache. */ - enum lsc_opcode opcode = - inst->opcode == SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL ? - brw_atomic_op_to_lsc_fatomic_op(arg.ud) : - brw_atomic_op_to_lsc_atomic_op(arg.ud); + enum lsc_opcode opcode = (enum lsc_opcode) arg.ud; inst->desc = lsc_msg_desc(devinfo, opcode, inst->exec_size, surf_type, LSC_ADDR_SIZE_A32, @@ -2104,12 +2048,7 @@ lower_lsc_a64_logical_send(const fs_builder &bld, fs_inst *inst) * Atomic messages are always forced to "un-cacheable" in the L1 * cache. */ - enum lsc_opcode opcode = - (inst->opcode == SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL || - inst->opcode == SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL || - inst->opcode == SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL) ? - brw_atomic_op_to_lsc_atomic_op(arg) : - brw_atomic_op_to_lsc_fatomic_op(arg); + enum lsc_opcode opcode = (enum lsc_opcode) arg; inst->desc = lsc_msg_desc(devinfo, opcode, inst->exec_size, LSC_ADDR_SURFTYPE_FLAT, LSC_ADDR_SIZE_A64, 1 /* num_coordinates */, @@ -2275,33 +2214,33 @@ lower_a64_logical_send(const fs_builder &bld, fs_inst *inst) case SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL: desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 32, - arg, /* atomic_op */ + lsc_op_to_legacy_atomic(arg), !inst->dst.is_null()); break; case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT16_LOGICAL: desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 16, - arg, /* atomic_op */ + lsc_op_to_legacy_atomic(arg), !inst->dst.is_null()); break; case SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL: desc = brw_dp_a64_untyped_atomic_desc(devinfo, inst->exec_size, 64, - arg, /* atomic_op */ + lsc_op_to_legacy_atomic(arg), !inst->dst.is_null()); break; case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT16_LOGICAL: desc = brw_dp_a64_untyped_atomic_float_desc(devinfo, inst->exec_size, 16, /* bit_size */ - arg, /* atomic_op */ + lsc_op_to_legacy_atomic(arg), !inst->dst.is_null()); break; case SHADER_OPCODE_A64_UNTYPED_ATOMIC_FLOAT32_LOGICAL: desc = brw_dp_a64_untyped_atomic_float_desc(devinfo, inst->exec_size, 32, /* bit_size */ - arg, /* atomic_op */ + lsc_op_to_legacy_atomic(arg), !inst->dst.is_null()); break; diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index 1fbd150a6c6..08bb841757f 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -1587,8 +1587,8 @@ brw_cmod_for_nir_comparison(nir_op op) } } -uint32_t -brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic) +enum lsc_opcode +lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic) { switch (atomic->intrinsic) { #define AOP_CASE(atom) \ @@ -1619,22 +1619,22 @@ brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic) if (nir_src_is_const(atomic->src[src_idx])) { int64_t add_val = nir_src_as_int(atomic->src[src_idx]); if (add_val == 1) - return BRW_AOP_INC; + return LSC_OP_ATOMIC_INC; else if (add_val == -1) - return BRW_AOP_DEC; + return LSC_OP_ATOMIC_DEC; } - return BRW_AOP_ADD; + return LSC_OP_ATOMIC_ADD; } - AOP_CASE(imin): return BRW_AOP_IMIN; - AOP_CASE(umin): return BRW_AOP_UMIN; - AOP_CASE(imax): return BRW_AOP_IMAX; - AOP_CASE(umax): return BRW_AOP_UMAX; - AOP_CASE(and): return BRW_AOP_AND; - AOP_CASE(or): return BRW_AOP_OR; - AOP_CASE(xor): return BRW_AOP_XOR; - AOP_CASE(exchange): return BRW_AOP_MOV; - AOP_CASE(comp_swap): return BRW_AOP_CMPWR; + AOP_CASE(imin): return LSC_OP_ATOMIC_MIN; + AOP_CASE(umin): return LSC_OP_ATOMIC_UMIN; + AOP_CASE(imax): return LSC_OP_ATOMIC_MAX; + AOP_CASE(umax): return LSC_OP_ATOMIC_UMAX; + AOP_CASE(and): return LSC_OP_ATOMIC_AND; + AOP_CASE(or): return LSC_OP_ATOMIC_OR; + AOP_CASE(xor): return LSC_OP_ATOMIC_XOR; + AOP_CASE(exchange): return LSC_OP_ATOMIC_STORE; + AOP_CASE(comp_swap): return LSC_OP_ATOMIC_CMPXCHG; #undef AOP_CASE #define AOP_CASE(atom) \ @@ -1642,10 +1642,10 @@ brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic) case nir_intrinsic_shared_atomic_##atom: \ case nir_intrinsic_global_atomic_##atom - AOP_CASE(fmin): return BRW_AOP_FMIN; - AOP_CASE(fmax): return BRW_AOP_FMAX; - AOP_CASE(fcomp_swap): return BRW_AOP_FCMPWR; - AOP_CASE(fadd): return BRW_AOP_FADD; + AOP_CASE(fmin): return LSC_OP_ATOMIC_FMIN; + AOP_CASE(fmax): return LSC_OP_ATOMIC_FMAX; + AOP_CASE(fcomp_swap): return LSC_OP_ATOMIC_FCMPXCHG; + AOP_CASE(fadd): return LSC_OP_ATOMIC_FADD; #undef AOP_CASE diff --git a/src/intel/compiler/brw_nir.h b/src/intel/compiler/brw_nir.h index c07126ae4a0..b11a36a34e2 100644 --- a/src/intel/compiler/brw_nir.h +++ b/src/intel/compiler/brw_nir.h @@ -161,7 +161,7 @@ void brw_nir_apply_key(nir_shader *nir, bool is_scalar); enum brw_conditional_mod brw_cmod_for_nir_comparison(nir_op op); -uint32_t brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic); +enum lsc_opcode lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic); enum brw_reg_type brw_type_for_nir_type(const struct intel_device_info *devinfo, nir_alu_type type); diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp index 756eb52dad4..2a3a3b906d9 100644 --- a/src/intel/compiler/brw_vec4_nir.cpp +++ b/src/intel/compiler/brw_vec4_nir.cpp @@ -545,7 +545,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) case nir_intrinsic_ssbo_atomic_xor: case nir_intrinsic_ssbo_atomic_exchange: case nir_intrinsic_ssbo_atomic_comp_swap: - nir_emit_ssbo_atomic(brw_aop_for_nir_intrinsic(instr), instr); + nir_emit_ssbo_atomic(lsc_op_to_legacy_atomic(lsc_aop_for_nir_intrinsic(instr)), instr); break; case nir_intrinsic_load_vertex_id:
