Re: [Mesa-dev] [PATCH 3/8] nir/builder: Add iadd_imm and imul_imm helpers

2018-11-14 Thread Karol Herbst
Reviewed-by: Karol Herbst 
On Wed, Nov 14, 2018 at 12:23 AM Jason Ekstrand  wrote:
>
> The pattern of adding or multiplying an integer by an immediate is
> fairly common especially in deref chain handling.  This adds a helper
> for it and uses it a few places.  The advantage to the helper is that
> it automatically handles bit sizes for you.
> ---
>  src/compiler/nir/nir_builder.h | 12 
>  src/compiler/nir/nir_lower_io.c|  5 ++---
>  src/compiler/spirv/vtn_variables.c | 14 ++
>  3 files changed, 20 insertions(+), 11 deletions(-)
>
> diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h
> index 3be630ab3dd..cf096d41bf2 100644
> --- a/src/compiler/nir/nir_builder.h
> +++ b/src/compiler/nir/nir_builder.h
> @@ -553,6 +553,18 @@ nir_channels(nir_builder *b, nir_ssa_def *def, 
> nir_component_mask_t mask)
> return nir_swizzle(b, def, swizzle, num_channels, false);
>  }
>
> +static inline nir_ssa_def *
> +nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
> +{
> +   return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
> +}
> +
> +static inline nir_ssa_def *
> +nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
> +{
> +   return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
> +}
> +
>  /**
>   * Turns a nir_src into a nir_ssa_def * so it can be passed to
>   * nir_build_alu()-based builder calls.
> diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
> index b3595bb19d5..f3377eaec8f 100644
> --- a/src/compiler/nir/nir_lower_io.c
> +++ b/src/compiler/nir/nir_lower_io.c
> @@ -127,8 +127,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
>   unsigned size = type_size((*p)->type);
>
>   nir_ssa_def *mul =
> -nir_imul(b, nir_imm_int(b, size),
> - nir_ssa_for_src(b, (*p)->arr.index, 1));
> +nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
>
>   offset = nir_iadd(b, offset, mul);
>} else if ((*p)->deref_type == nir_deref_type_struct) {
> @@ -139,7 +138,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
>   for (unsigned i = 0; i < (*p)->strct.index; i++) {
>  field_offset += type_size(glsl_get_struct_field(parent->type, 
> i));
>   }
> - offset = nir_iadd(b, offset, nir_imm_int(b, field_offset));
> + offset = nir_iadd_imm(b, offset, field_offset);
>} else {
>   unreachable("Unsupported deref type");
>}
> diff --git a/src/compiler/spirv/vtn_variables.c 
> b/src/compiler/spirv/vtn_variables.c
> index c5cf345d02a..586dcc5b99c 100644
> --- a/src/compiler/spirv/vtn_variables.c
> +++ b/src/compiler/spirv/vtn_variables.c
> @@ -138,7 +138,7 @@ vtn_access_link_as_ssa(struct vtn_builder *b, struct 
> vtn_access_link link,
>nir_ssa_def *src0 = vtn_ssa_value(b, link.id)->def;
>if (src0->bit_size != 32)
>   src0 = nir_u2u32(>nb, src0);
> -  return nir_imul(>nb, src0, nir_imm_int(>nb, stride));
> +  return nir_imul_imm(>nb, src0, stride);
> }
>  }
>
> @@ -332,8 +332,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
>case GLSL_TYPE_STRUCT: {
>   vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
>   unsigned member = deref_chain->link[idx].id;
> - nir_ssa_def *mem_offset = nir_imm_int(>nb, 
> type->offsets[member]);
> - offset = nir_iadd(>nb, offset, mem_offset);
> + offset = nir_iadd_imm(>nb, offset, type->offsets[member]);
>   type = type->members[member];
>   access |= type->access;
>   break;
> @@ -717,7 +716,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
> nir_intrinsic_op op, bool load,
>
>   for (unsigned i = 0; i < num_ops; i++) {
>  nir_ssa_def *elem_offset =
> -   nir_iadd(>nb, offset, nir_imm_int(>nb, i * col_stride));
> +   nir_iadd_imm(>nb, offset, i * col_stride);
>  _vtn_load_store_tail(b, op, load, index, elem_offset,
>   access_offset, access_size,
>   &(*inout)->elems[i],
> @@ -747,8 +746,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
> nir_intrinsic_op op, bool load,
>  nir_ssa_def *per_comp[4];
>  for (unsigned i = 0; i < elems; i++) {
> nir_ssa_def *elem_offset =
> -  nir_iadd(>nb, offset,
> -   nir_imm_int(>nb, i * type->stride));
> +  nir_iadd_imm(>nb, offset, i * type->stride);
> struct vtn_ssa_value *comp, temp_val;
> if (!load) {
>temp_val.def = nir_channel(>nb, (*inout)->def, i);
> @@ -775,7 +773,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
> nir_intrinsic_op op, bool load,
>unsigned elems = glsl_get_length(type->type);
>for (unsigned i = 0; i < elems; i++) {
>   

[Mesa-dev] [PATCH 3/8] nir/builder: Add iadd_imm and imul_imm helpers

2018-11-13 Thread Jason Ekstrand
The pattern of adding or multiplying an integer by an immediate is
fairly common especially in deref chain handling.  This adds a helper
for it and uses it a few places.  The advantage to the helper is that
it automatically handles bit sizes for you.
---
 src/compiler/nir/nir_builder.h | 12 
 src/compiler/nir/nir_lower_io.c|  5 ++---
 src/compiler/spirv/vtn_variables.c | 14 ++
 3 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h
index 3be630ab3dd..cf096d41bf2 100644
--- a/src/compiler/nir/nir_builder.h
+++ b/src/compiler/nir/nir_builder.h
@@ -553,6 +553,18 @@ nir_channels(nir_builder *b, nir_ssa_def *def, 
nir_component_mask_t mask)
return nir_swizzle(b, def, swizzle, num_channels, false);
 }
 
+static inline nir_ssa_def *
+nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+   return nir_iadd(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
+static inline nir_ssa_def *
+nir_imul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+   return nir_imul(build, x, nir_imm_intN_t(build, y, x->bit_size));
+}
+
 /**
  * Turns a nir_src into a nir_ssa_def * so it can be passed to
  * nir_build_alu()-based builder calls.
diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c
index b3595bb19d5..f3377eaec8f 100644
--- a/src/compiler/nir/nir_lower_io.c
+++ b/src/compiler/nir/nir_lower_io.c
@@ -127,8 +127,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
  unsigned size = type_size((*p)->type);
 
  nir_ssa_def *mul =
-nir_imul(b, nir_imm_int(b, size),
- nir_ssa_for_src(b, (*p)->arr.index, 1));
+nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
 
  offset = nir_iadd(b, offset, mul);
   } else if ((*p)->deref_type == nir_deref_type_struct) {
@@ -139,7 +138,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref,
  for (unsigned i = 0; i < (*p)->strct.index; i++) {
 field_offset += type_size(glsl_get_struct_field(parent->type, i));
  }
- offset = nir_iadd(b, offset, nir_imm_int(b, field_offset));
+ offset = nir_iadd_imm(b, offset, field_offset);
   } else {
  unreachable("Unsupported deref type");
   }
diff --git a/src/compiler/spirv/vtn_variables.c 
b/src/compiler/spirv/vtn_variables.c
index c5cf345d02a..586dcc5b99c 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -138,7 +138,7 @@ vtn_access_link_as_ssa(struct vtn_builder *b, struct 
vtn_access_link link,
   nir_ssa_def *src0 = vtn_ssa_value(b, link.id)->def;
   if (src0->bit_size != 32)
  src0 = nir_u2u32(>nb, src0);
-  return nir_imul(>nb, src0, nir_imm_int(>nb, stride));
+  return nir_imul_imm(>nb, src0, stride);
}
 }
 
@@ -332,8 +332,7 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
   case GLSL_TYPE_STRUCT: {
  vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
  unsigned member = deref_chain->link[idx].id;
- nir_ssa_def *mem_offset = nir_imm_int(>nb, type->offsets[member]);
- offset = nir_iadd(>nb, offset, mem_offset);
+ offset = nir_iadd_imm(>nb, offset, type->offsets[member]);
  type = type->members[member];
  access |= type->access;
  break;
@@ -717,7 +716,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
nir_intrinsic_op op, bool load,
 
  for (unsigned i = 0; i < num_ops; i++) {
 nir_ssa_def *elem_offset =
-   nir_iadd(>nb, offset, nir_imm_int(>nb, i * col_stride));
+   nir_iadd_imm(>nb, offset, i * col_stride);
 _vtn_load_store_tail(b, op, load, index, elem_offset,
  access_offset, access_size,
  &(*inout)->elems[i],
@@ -747,8 +746,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
nir_intrinsic_op op, bool load,
 nir_ssa_def *per_comp[4];
 for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_offset =
-  nir_iadd(>nb, offset,
-   nir_imm_int(>nb, i * type->stride));
+  nir_iadd_imm(>nb, offset, i * type->stride);
struct vtn_ssa_value *comp, temp_val;
if (!load) {
   temp_val.def = nir_channel(>nb, (*inout)->def, i);
@@ -775,7 +773,7 @@ _vtn_block_load_store(struct vtn_builder *b, 
nir_intrinsic_op op, bool load,
   unsigned elems = glsl_get_length(type->type);
   for (unsigned i = 0; i < elems; i++) {
  nir_ssa_def *elem_off =
-nir_iadd(>nb, offset, nir_imm_int(>nb, i * type->stride));
+nir_iadd_imm(>nb, offset, i * type->stride);
  _vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,