On Thursday, April 21, 2016 10:18:48 PM PDT Jordan Justen wrote: > Signed-off-by: Jordan Justen <[email protected]> > --- > src/mesa/drivers/dri/i965/Makefile.sources | 1 + > src/mesa/drivers/dri/i965/brw_context.c | 4 +- > src/mesa/drivers/dri/i965/brw_context.h | 5 + > src/mesa/drivers/dri/i965/brw_queryobj.c | 35 ++- > src/mesa/drivers/dri/i965/gen6_queryobj.c | 33 ++ > src/mesa/drivers/dri/i965/hsw_queryobj.c | 432 ++++++++++++++++++++++++ +++ > src/mesa/drivers/dri/i965/intel_extensions.c | 4 + > src/mesa/drivers/dri/i965/intel_reg.h | 1 + > 8 files changed, 512 insertions(+), 3 deletions(-) > create mode 100644 src/mesa/drivers/dri/i965/hsw_queryobj.c > > diff --git a/src/mesa/drivers/dri/i965/Makefile.sources b/src/mesa/drivers/ dri/i965/Makefile.sources > index 632f2e9..66ecd37 100644 > --- a/src/mesa/drivers/dri/i965/Makefile.sources > +++ b/src/mesa/drivers/dri/i965/Makefile.sources > @@ -227,6 +227,7 @@ i965_FILES = \ > gen8_viewport_state.c \ > gen8_vs_state.c \ > gen8_wm_depth_stencil.c \ > + hsw_queryobj.c \ > intel_batchbuffer.c \ > intel_batchbuffer.h \ > intel_blit.c \ > diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/ i965/brw_context.c > index 63ac3bc..1380d41 100644 > --- a/src/mesa/drivers/dri/i965/brw_context.c > +++ b/src/mesa/drivers/dri/i965/brw_context.c > @@ -358,7 +358,9 @@ brw_init_driver_functions(struct brw_context *brw, > > brwInitFragProgFuncs( functions ); > brw_init_common_queryobj_functions(functions); > - if (brw->gen >= 6) > + if (brw->gen >= 8 || brw->is_haswell) > + hsw_init_queryobj_functions(functions); > + else if (brw->gen >= 6) > gen6_init_queryobj_functions(functions); > else > gen4_init_queryobj_functions(functions); > diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/ i965/brw_context.h > index 5a4e42b..b75eabd 100644 > --- a/src/mesa/drivers/dri/i965/brw_context.h > +++ b/src/mesa/drivers/dri/i965/brw_context.h > @@ -1425,12 +1425,17 @@ void brw_init_common_queryobj_functions(struct dd_function_table *functions); > void gen4_init_queryobj_functions(struct dd_function_table *functions); > void brw_emit_query_begin(struct brw_context *brw); > void brw_emit_query_end(struct brw_context *brw); > +void brw_query_counter(struct gl_context *ctx, struct gl_query_object *q); > +bool brw_is_query_pipelined(struct brw_query_object *query); > > /** gen6_queryobj.c */ > void gen6_init_queryobj_functions(struct dd_function_table *functions); > void brw_write_timestamp(struct brw_context *brw, drm_intel_bo *bo, int idx); > void brw_write_depth_count(struct brw_context *brw, drm_intel_bo *bo, int idx); > > +/** hsw_queryobj.c */ > +void hsw_init_queryobj_functions(struct dd_function_table *functions); > + > /** brw_conditional_render.c */ > void brw_init_conditional_render_functions(struct dd_function_table *functions); > bool brw_check_conditional_render(struct brw_context *brw); > diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/ dri/i965/brw_queryobj.c > index a8e5aba..81ee5ea 100644 > --- a/src/mesa/drivers/dri/i965/brw_queryobj.c > +++ b/src/mesa/drivers/dri/i965/brw_queryobj.c > @@ -462,7 +462,7 @@ brw_emit_query_end(struct brw_context *brw) > * current GPU time. This is unlike GL_TIME_ELAPSED, which measures the > * time while the query is active. > */ > -static void > +void > brw_query_counter(struct gl_context *ctx, struct gl_query_object *q) > { > struct brw_context *brw = brw_context(ctx); > @@ -507,12 +507,42 @@ brw_get_timestamp(struct gl_context *ctx) > return result; > } > > +bool > +brw_is_query_pipelined(struct brw_query_object *query) > +{ > + switch (query->Base.Target) { > + case GL_TIMESTAMP: > + case GL_TIME_ELAPSED: > + case GL_ANY_SAMPLES_PASSED: > + case GL_ANY_SAMPLES_PASSED_CONSERVATIVE: > + case GL_SAMPLES_PASSED_ARB: > + return true; > + > + case GL_PRIMITIVES_GENERATED: > + case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN: > + case GL_VERTICES_SUBMITTED_ARB: > + case GL_PRIMITIVES_SUBMITTED_ARB: > + case GL_VERTEX_SHADER_INVOCATIONS_ARB: > + case GL_GEOMETRY_SHADER_INVOCATIONS: > + case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB: > + case GL_FRAGMENT_SHADER_INVOCATIONS_ARB: > + case GL_CLIPPING_INPUT_PRIMITIVES_ARB: > + case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB: > + case GL_COMPUTE_SHADER_INVOCATIONS_ARB: > + case GL_TESS_CONTROL_SHADER_PATCHES_ARB: > + case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB: > + return false; > + > + default: > + unreachable("Unrecognized query target in is_query_pipelined()"); > + } > +} > + > /* Initialize query object functions used on all generations. */ > void brw_init_common_queryobj_functions(struct dd_function_table *functions) > { > functions->NewQueryObject = brw_new_query_object; > functions->DeleteQuery = brw_delete_query; > - functions->QueryCounter = brw_query_counter; > functions->GetTimestamp = brw_get_timestamp; > } > > @@ -523,4 +553,5 @@ void gen4_init_queryobj_functions(struct dd_function_table *functions) > functions->EndQuery = brw_end_query; > functions->CheckQuery = brw_check_query; > functions->WaitQuery = brw_wait_query; > + functions->QueryCounter = brw_query_counter; > } > diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/ dri/i965/gen6_queryobj.c > index 960ccfd..f95c9fc 100644 > --- a/src/mesa/drivers/dri/i965/gen6_queryobj.c > +++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c > @@ -37,8 +37,25 @@ > #include "brw_defines.h" > #include "brw_state.h" > #include "intel_batchbuffer.h" > +#include "intel_buffer_objects.h" > #include "intel_reg.h" > > +static inline void > +set_query_availability(struct brw_context *brw, struct brw_query_object *query, > + bool available) > +{ > + /* For ARB_query_buffer_object we write the query availability for > + * pipelined results > + */ > + if (brw->ctx.Extensions.ARB_query_buffer_object && > + brw_is_query_pipelined(query)) { > + brw_emit_pipe_control_write(brw, > + PIPE_CONTROL_WRITE_IMMEDIATE, > + query->bo, 2 * sizeof(uint64_t), > + available, 0); > + } > +} > + > static void > write_primitives_generated(struct brw_context *brw, > drm_intel_bo *query_bo, int stream, int idx) > @@ -243,6 +260,9 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q) > drm_intel_bo_unreference(query->bo); > query->bo = drm_intel_bo_alloc(brw->bufmgr, "query results", 4096, 4096); > > + /* For ARB_query_buffer_object: The result is not available */ > + set_query_availability(brw, query, false); > + > switch (query->Base.Target) { > case GL_TIME_ELAPSED: > /* For timestamp queries, we record the starting time right away so that > @@ -356,6 +376,9 @@ gen6_end_query(struct gl_context *ctx, struct gl_query_object *q) > * but they won't actually execute until it is flushed. > */ > query->flushed = false; > + > + /* For ARB_query_buffer_object: The result is now available */ > + set_query_availability(brw, query, true); > } > > /** > @@ -425,6 +448,15 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q) > } > } > > +static void > +gen6_query_counter(struct gl_context *ctx, struct gl_query_object *q) > +{ > + struct brw_context *brw = brw_context(ctx); > + struct brw_query_object *query = (struct brw_query_object *)q; > + brw_query_counter(ctx, q); > + set_query_availability(brw, query, true); > +} > + > /* Initialize Gen6+-specific query object functions. */ > void gen6_init_queryobj_functions(struct dd_function_table *functions) > { > @@ -432,4 +464,5 @@ void gen6_init_queryobj_functions(struct dd_function_table *functions) > functions->EndQuery = gen6_end_query; > functions->CheckQuery = gen6_check_query; > functions->WaitQuery = gen6_wait_query; > + functions->QueryCounter = gen6_query_counter; > } > diff --git a/src/mesa/drivers/dri/i965/hsw_queryobj.c b/src/mesa/drivers/ dri/i965/hsw_queryobj.c > new file mode 100644 > index 0000000..58c9a09 > --- /dev/null > +++ b/src/mesa/drivers/dri/i965/hsw_queryobj.c > @@ -0,0 +1,432 @@ > +/* > + * Copyright (c) 2016 Intel Corporation > + * > + * Permission is hereby granted, free of charge, to any person obtaining a > + * copy of this software and associated documentation files (the "Software"), > + * to deal in the Software without restriction, including without limitation > + * the rights to use, copy, modify, merge, publish, distribute, sublicense, > + * and/or sell copies of the Software, and to permit persons to whom the > + * Software is furnished to do so, subject to the following conditions: > + * > + * The above copyright notice and this permission notice (including the next > + * paragraph) shall be included in all copies or substantial portions of the > + * Software. > + * > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS > + * IN THE SOFTWARE. > + * > + */ > + > +/** @file hsw_queryobj.c > + * > + * Support for query buffer objects (GL_ARB_query_buffer_object) on Haswell +. > + */ > +#include "main/imports.h" > + > +#include "brw_context.h" > +#include "brw_defines.h" > +#include "intel_batchbuffer.h" > +#include "intel_buffer_objects.h" > +#include "intel_reg.h" > + > +/* > + * GPR0 = 80 * GPR0; > + */ > +static void > +mult_gpr0_by_80(struct brw_context *brw) > +{ > + int m; > + uint32_t maths[] = { > + MI_MATH_ALU2(LOAD, SRCA, R0), > + MI_MATH_ALU2(LOAD, SRCB, R0), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STORE, R1, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R1), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STORE, R1, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R1), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STORE, R1, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R1), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(ADD), > + /* GPR1 = 16 * GPR0 */ > + MI_MATH_ALU2(STORE, R1, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R1), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STORE, R2, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R2), > + MI_MATH_ALU2(LOAD, SRCB, R2), > + MI_MATH_ALU0(ADD), > + /* GPR2 = 64 * GPR0 */ > + MI_MATH_ALU2(STORE, R2, ACCU), > + MI_MATH_ALU2(LOAD, SRCA, R1), > + MI_MATH_ALU2(LOAD, SRCB, R2), > + MI_MATH_ALU0(ADD), > + /* GPR0 = 80 * GPR0 */ > + MI_MATH_ALU2(STORE, R0, ACCU), > + }; > + > + BEGIN_BATCH(1 + ARRAY_SIZE(maths)); > + OUT_BATCH(HSW_MI_MATH | (1 + ARRAY_SIZE(maths) - 2)); > + > + for (m = 0; m < ARRAY_SIZE(maths); m++) > + OUT_BATCH(maths[m]); > + > + ADVANCE_BATCH(); > +} > + > +/* > + * GPR0 = GPR0 & ((1ull << n) - 1); > + */ > +static void > +keep_gpr0_lower_n_bits(struct brw_context *brw, uint32_t n) > +{ > + int m; > + uint32_t maths[] = { > + MI_MATH_ALU2(LOAD, SRCA, R0), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(AND), > + MI_MATH_ALU2(STORE, R0, ACCU), > + }; > + > + assert(n < 64); > + brw_load_register_imm64(brw, HSW_CS_GPR(1), (1ull << n) - 1); > + > + BEGIN_BATCH(1 + ARRAY_SIZE(maths)); > + OUT_BATCH(HSW_MI_MATH | (1 + ARRAY_SIZE(maths) - 2)); > + > + for (m = 0; m < ARRAY_SIZE(maths); m++) > + OUT_BATCH(maths[m]); > + > + ADVANCE_BATCH(); > +} > + > +/* > + * GPR0 = GPR0 << 30; > + */ > +static void > +shl_gpr0_by_30_bits(struct brw_context *brw) > +{ > + /* First we mask 34 bits of GPR0 to prevent overflow */ > + keep_gpr0_lower_n_bits(brw, 34); > + > + uint32_t shl_maths[] = { > + MI_MATH_ALU2(LOAD, SRCA, R0), > + MI_MATH_ALU2(LOAD, SRCB, R0), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STORE, R0, ACCU), > + }; > + > + const uint32_t outer_count = 5; > + const uint32_t inner_count = 6; > + STATIC_ASSERT(outer_count * inner_count == 30); > + const uint32_t cmd_len = 1 + inner_count * ARRAY_SIZE(shl_maths); > + const uint32_t batch_len = cmd_len * outer_count; > + > + BEGIN_BATCH(batch_len); > + > + /* We'll emit 5 commands, each shifting GPR0 left by 6 bits, for a total of > + * 30 left shifts. > + */ > + for (int o = 0; o < outer_count; o++) { > + /* Submit one MI_MATH to shift left by 6 bits */ > + OUT_BATCH(HSW_MI_MATH | (cmd_len - 2)); > + for (int i = 0; i < inner_count; i++) > + for (int m = 0; m < ARRAY_SIZE(shl_maths); m++) > + OUT_BATCH(shl_maths[m]); > + } > + > + ADVANCE_BATCH(); > +} > + > +/* > + * GPR0 = GPR0 >> 2; > + * > + * Note that the upper 30 bits of GPR0 are lost! > + */ > +static void > +shr_gpr0_by_2_bits(struct brw_context *brw) > +{ > + shl_gpr0_by_30_bits(brw); > + brw_load_register_reg(brw, HSW_CS_GPR(0) + 4, HSW_CS_GPR(0)); > + brw_load_register_imm32(brw, HSW_CS_GPR(0) + 4, 0); > +} > + > +/* > + * GPR0 = (GPR0 == 0) ? 0 : 1; > + */ > +static void > +gpr0_to_bool(struct brw_context *brw) > +{ > + int m; > + uint32_t maths[] = { > + MI_MATH_ALU2(LOAD, SRCA, R0), > + MI_MATH_ALU1(LOAD0, SRCB), > + MI_MATH_ALU0(ADD), > + MI_MATH_ALU2(STOREINV, R0, ZF), > + MI_MATH_ALU2(LOAD, SRCA, R0), > + MI_MATH_ALU2(LOAD, SRCB, R1), > + MI_MATH_ALU0(AND), > + MI_MATH_ALU2(STORE, R0, ACCU), > + }; > + > + brw_load_register_imm64(brw, HSW_CS_GPR(1), 1ull); > + > + BEGIN_BATCH(1 + ARRAY_SIZE(maths)); > + OUT_BATCH(HSW_MI_MATH | (1 + ARRAY_SIZE(maths) - 2)); > + > + for (m = 0; m < ARRAY_SIZE(maths); m++) > + OUT_BATCH(maths[m]); > + > + ADVANCE_BATCH(); > +} > + > +static void > +hsw_result_to_gpr0(struct gl_context *ctx, struct brw_query_object *query, > + struct gl_buffer_object *buf, intptr_t offset, > + GLenum pname, GLenum ptype) > +{ > + struct brw_context *brw = brw_context(ctx); > + > + assert(query->bo); > + assert(pname != GL_QUERY_TARGET); > + > + if (pname == GL_QUERY_RESULT_AVAILABLE) { > + /* The query result availability is stored at offset 0 of the buffer. */ > + brw_load_register_mem64(brw, > + HSW_CS_GPR(0), > + query->bo, > + I915_GEM_DOMAIN_INSTRUCTION, > + I915_GEM_DOMAIN_INSTRUCTION, > + 2 * sizeof(uint64_t)); > + return; > + } > + > + if (pname == GL_QUERY_RESULT) { > + /* Since GL_QUERY_RESULT_NO_WAIT wasn't used, they want us to stall to > + * make sure the query is available. > + */ > + brw_emit_pipe_control_flush(brw, > + PIPE_CONTROL_CS_STALL | > + PIPE_CONTROL_STALL_AT_SCOREBOARD); > + } > + > + switch (query->Base.Target) { > + case GL_TIMESTAMP:
How about we just do:
if (query->Base.Target == GL_TIMESTAMP) {
...
} else {
...
}
instead of a switch statement? It'd be a lot less code.
> + brw_load_register_mem64(brw,
> + HSW_CS_GPR(0),
> + query->bo,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + 0 * sizeof(uint64_t));
> + break;
> +
> + case GL_TIME_ELAPSED:
> + case GL_ANY_SAMPLES_PASSED:
> + case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
> + case GL_SAMPLES_PASSED_ARB:
> + case GL_PRIMITIVES_GENERATED:
> + case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
> + case GL_VERTICES_SUBMITTED_ARB:
> + case GL_PRIMITIVES_SUBMITTED_ARB:
> + case GL_VERTEX_SHADER_INVOCATIONS_ARB:
> + case GL_GEOMETRY_SHADER_INVOCATIONS:
> + case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
> + case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
> + case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
> + case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
> + case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
> + case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
> + case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
> + brw_load_register_mem64(brw,
> + HSW_CS_GPR(1),
> + query->bo,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + 0 * sizeof(uint64_t));
> + brw_load_register_mem64(brw,
> + HSW_CS_GPR(2),
> + query->bo,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + I915_GEM_DOMAIN_INSTRUCTION,
> + 1 * sizeof(uint64_t));
> +
> + BEGIN_BATCH(5);
> + OUT_BATCH(HSW_MI_MATH | (5 - 2));
> +
> + OUT_BATCH(MI_MATH_ALU2(LOAD, SRCA, R2));
> + OUT_BATCH(MI_MATH_ALU2(LOAD, SRCB, R1));
> + OUT_BATCH(MI_MATH_ALU0(SUB));
> + OUT_BATCH(MI_MATH_ALU2(STORE, R0, ACCU));
> +
> + ADVANCE_BATCH();
> + break;
> +
> + default:
> + unreachable("Unrecognized query target in
brw_queryobj_get_results()");
The function name is wrong here, but if we switch to if/else we won't
need the unreachable().
> + }
> +
> + switch (query->Base.Target) {
> + case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
> + /* Implement the "WaDividePSInvocationCountBy4:HSW,BDW" workaround:
> + * "Invocation counter is 4 times actual. WA: SW to divide HW
reported
> + * PS Invocations value by 4."
> + *
> + * Prior to Haswell, invocation count was counted by the WM, and it
> + * buggily counted invocations in units of subspans (2x2 unit). To
get the
> + * correct value, the CS multiplied this by 4. With HSW the logic
moved,
> + * and correctly emitted the number of pixel shader invocations, but,
> + * whomever forgot to undo the multiply by 4.
> + */
We need to skip this on Skylake and later:
if (brw->gen < 9)
shr_gpr0_by_2_bits(brw);
> + shr_gpr0_by_2_bits(brw);
> + break;
> + case GL_TIME_ELAPSED:
> + case GL_TIMESTAMP:
> + mult_gpr0_by_80(brw);
> + if (query->Base.Target == GL_TIMESTAMP) {
> + keep_gpr0_lower_n_bits(brw, 36);
> + }
> + break;
> + case GL_ANY_SAMPLES_PASSED:
> + case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
> + gpr0_to_bool(brw);
> + break;
> + }
> +}
> +
> +/*
> + * Store immediate data into the user buffer using the requested size.
> + */
> +static void
> +store_query_result_imm(struct brw_context *brw, drm_intel_bo *bo,
> + uint32_t offset, GLenum ptype, uint64_t imm)
> +{
> + switch (ptype) {
> + case GL_INT:
> + case GL_UNSIGNED_INT:
> + //printf("imm @ %d = %ld\n", offset, imm);
> + brw_store_data_imm32(brw, bo, offset, imm);
> + break;
> + case GL_INT64_ARB:
> + case GL_UNSIGNED_INT64_ARB:
> + brw_store_data_imm64(brw, bo, offset, imm);
> + break;
> + default:
> + unreachable("Unexpected result type");
> + }
> +}
> +
> +static void
> +set_predicate(struct brw_context *brw, drm_intel_bo *query_bo)
> +{
> + brw_load_register_imm64(brw, MI_PREDICATE_SRC1, 0ull);
> +
> + /* Load query availability into SRC0 */
> + brw_load_register_mem64(brw, MI_PREDICATE_SRC0, query_bo,
> + I915_GEM_DOMAIN_INSTRUCTION, 0,
> + 2 * sizeof(uint64_t));
> +
> + /* predicate = !(query_availability == 0); */
> + BEGIN_BATCH(1);
> + OUT_BATCH(GEN7_MI_PREDICATE |
> + MI_PREDICATE_LOADOP_LOADINV |
> + MI_PREDICATE_COMBINEOP_SET |
> + MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
> + ADVANCE_BATCH();
> +}
> +
> +/*
> + * Store data from the register into the user buffer using the requested
size.
> + * The write also enables the predication to prevent writing the result if
the
> + * query has not finished yet.
> + */
> +static void
> +store_query_result_reg(struct brw_context *brw, drm_intel_bo *bo,
> + uint32_t offset, GLenum ptype, uint32_t reg,
> + const bool pipelined)
> +{
> + uint32_t cmd_size = brw->gen >= 8 ? 4 : 3;
> + uint32_t dwords = (ptype == GL_INT || ptype == GL_UNSIGNED_INT) ? 1 : 2;
> + assert(brw->gen >= 6);
> +
> + BEGIN_BATCH(dwords * cmd_size);
> + for (int i = 0; i < dwords; i++) {
> + OUT_BATCH(MI_STORE_REGISTER_MEM |
> + (pipelined ? MI_STORE_REGISTER_MEM_PREDICATE : 0) |
> + (cmd_size - 2));
> + OUT_BATCH(reg + 4 * i);
> + if (brw->gen >= 8) {
> + OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION,
> + I915_GEM_DOMAIN_INSTRUCTION, offset + 4 * i);
> + } else {
> + OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION,
> + I915_GEM_DOMAIN_INSTRUCTION, offset + 4 * i);
> + }
> + }
> + ADVANCE_BATCH();
> +}
> +
> +static void
> +hsw_store_query_result(struct gl_context *ctx, struct gl_query_object *q,
> + struct gl_buffer_object *buf, intptr_t offset,
> + GLenum pname, GLenum ptype)
> +{
> + struct brw_context *brw = brw_context(ctx);
> + struct brw_query_object *query = (struct brw_query_object *)q;
> + struct intel_buffer_object *bo = intel_buffer_object(buf);
> + const bool pipelined = brw_is_query_pipelined(query);
> +
> + if (pname == GL_QUERY_TARGET) {
> + store_query_result_imm(brw, bo->buffer, offset, ptype,
> + query->Base.Target);
> + return;
> + } else if (pname == GL_QUERY_RESULT_AVAILABLE && !pipelined) {
> + store_query_result_imm(brw, bo->buffer, offset, ptype, 1ull);
> + } else if (query->bo) {
> + /* The query bo still around. Therefore, we:
> + *
> + * 1. Compute the current result in GPR0
> + * 2. Set the command streamer predicate based on query availability
> + * 3. (With predication) Write GPR0 to the requested buffer
> + */
> + hsw_result_to_gpr0(ctx, query, buf, offset, pname, ptype);
> + if (pipelined)
> + set_predicate(brw, query->bo);
> + store_query_result_reg(brw, bo->buffer, offset, ptype, HSW_CS_GPR(0),
> + pipelined);
> + } else {
> + /* The query bo is gone, so the query must have been processed into
> + * client memory. In this case we can fill the buffer location with
the
> + * requested data using MI_STORE_DATA_IMM.
> + */
> + switch (pname) {
> + case GL_QUERY_RESULT_AVAILABLE:
> + store_query_result_imm(brw, bo->buffer, offset, ptype, 1ull);
> + break;
> + case GL_QUERY_RESULT_NO_WAIT:
> + case GL_QUERY_RESULT:
> + store_query_result_imm(brw, bo->buffer, offset, ptype,
> + q->Result);
> + break;
> + default:
> + unreachable("Unexpected result type");
> + }
> + }
> +
> +}
> +
> +/* Initialize hsw+-specific query object functions. */
> +void hsw_init_queryobj_functions(struct dd_function_table *functions)
> +{
> + gen6_init_queryobj_functions(functions);
> + functions->StoreQueryResult = hsw_store_query_result;
> +}
> diff --git a/src/mesa/drivers/dri/i965/intel_extensions.c b/src/mesa/
drivers/dri/i965/intel_extensions.c
> index 6a20bd6..af922ee 100644
> --- a/src/mesa/drivers/dri/i965/intel_extensions.c
> +++ b/src/mesa/drivers/dri/i965/intel_extensions.c
> @@ -366,6 +366,10 @@ intelInitExtensions(struct gl_context *ctx)
> }
> }
>
> + if (brw->gen >= 8 || brw->is_haswell) {
> + ctx->Extensions.ARB_query_buffer_object = true;
> + }
> +
> if (brw->gen >= 8) {
> ctx->Extensions.ARB_stencil_texturing = true;
> }
> diff --git a/src/mesa/drivers/dri/i965/intel_reg.h b/src/mesa/drivers/dri/
i965/intel_reg.h
> index 40931b3..95365fe 100644
> --- a/src/mesa/drivers/dri/i965/intel_reg.h
> +++ b/src/mesa/drivers/dri/i965/intel_reg.h
> @@ -43,6 +43,7 @@
>
> #define MI_STORE_REGISTER_MEM (CMD_MI | (0x24 << 23))
> # define MI_STORE_REGISTER_MEM_USE_GGTT (1 << 22)
> +# define MI_STORE_REGISTER_MEM_PREDICATE (1 << 21)
>
> /* Load a value from memory into a register. Only available on Gen7+. */
> #define GEN7_MI_LOAD_REGISTER_MEM (CMD_MI | (0x29 << 23))
>
signature.asc
Description: This is a digitally signed message part.
_______________________________________________ mesa-dev mailing list [email protected] https://lists.freedesktop.org/mailman/listinfo/mesa-dev
