Re: [051/nnn] poly_int: emit_group_load/store

2017-12-05 Thread Jeff Law
On 10/23/2017 11:21 AM, Richard Sandiford wrote:
> This patch changes the sizes passed to emit_group_load and
> emit_group_store from int to poly_int64.
> 
> 
> 2017-10-23  Richard Sandiford  
>   Alan Hayward  
>   David Sherwood  
> 
> gcc/
>   * expr.h (emit_group_load, emit_group_load_into_temps)
>   (emit_group_store): Take the size as a poly_int64 rather than an int.
>   * expr.c (emit_group_load_1, emit_group_load): Likewise.
>   (emit_group_load_into_temp, emit_group_store): Likewise.
> 
OK.
jeff


[051/nnn] poly_int: emit_group_load/store

2017-10-23 Thread Richard Sandiford
This patch changes the sizes passed to emit_group_load and
emit_group_store from int to poly_int64.


2017-10-23  Richard Sandiford  
Alan Hayward  
David Sherwood  

gcc/
* expr.h (emit_group_load, emit_group_load_into_temps)
(emit_group_store): Take the size as a poly_int64 rather than an int.
* expr.c (emit_group_load_1, emit_group_load): Likewise.
(emit_group_load_into_temp, emit_group_store): Likewise.

Index: gcc/expr.h
===
--- gcc/expr.h  2017-10-23 17:18:56.434286222 +0100
+++ gcc/expr.h  2017-10-23 17:20:49.571719793 +0100
@@ -128,10 +128,10 @@ extern rtx gen_group_rtx (rtx);
 
 /* Load a BLKmode value into non-consecutive registers represented by a
PARALLEL.  */
-extern void emit_group_load (rtx, rtx, tree, int);
+extern void emit_group_load (rtx, rtx, tree, poly_int64);
 
 /* Similarly, but load into new temporaries.  */
-extern rtx emit_group_load_into_temps (rtx, rtx, tree, int);
+extern rtx emit_group_load_into_temps (rtx, rtx, tree, poly_int64);
 
 /* Move a non-consecutive group of registers represented by a PARALLEL into
a non-consecutive group of registers represented by a PARALLEL.  */
@@ -142,7 +142,7 @@ extern rtx emit_group_move_into_temps (r
 
 /* Store a BLKmode value from non-consecutive registers represented by a
PARALLEL.  */
-extern void emit_group_store (rtx, rtx, tree, int);
+extern void emit_group_store (rtx, rtx, tree, poly_int64);
 
 extern rtx maybe_emit_group_store (rtx, tree);
 
Index: gcc/expr.c
===
--- gcc/expr.c  2017-10-23 17:18:57.860160878 +0100
+++ gcc/expr.c  2017-10-23 17:20:49.571719793 +0100
@@ -2095,7 +2095,8 @@ gen_group_rtx (rtx orig)
into corresponding XEXP (XVECEXP (DST, 0, i), 0) element.  */
 
 static void
-emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
+emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
+  poly_int64 ssize)
 {
   rtx src;
   int start, i;
@@ -2134,12 +2135,16 @@ emit_group_load_1 (rtx *tmps, rtx dst, r
   for (i = start; i < XVECLEN (dst, 0); i++)
 {
   machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
-  HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
-  unsigned int bytelen = GET_MODE_SIZE (mode);
-  int shift = 0;
-
-  /* Handle trailing fragments that run over the size of the struct.  */
-  if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+  poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
+  poly_int64 bytelen = GET_MODE_SIZE (mode);
+  poly_int64 shift = 0;
+
+  /* Handle trailing fragments that run over the size of the struct.
+It's the target's responsibility to make sure that the fragment
+cannot be strictly smaller in some cases and strictly larger
+in others.  */
+  gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
+  if (known_size_p (ssize) && may_gt (bytepos + bytelen, ssize))
{
  /* Arrange to shift the fragment to where it belongs.
 extract_bit_field loads to the lsb of the reg.  */
@@ -2153,7 +2158,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, r
  )
shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
  bytelen = ssize - bytepos;
- gcc_assert (bytelen > 0);
+ gcc_assert (may_gt (bytelen, 0));
}
 
   /* If we won't be loading directly from memory, protect the real source
@@ -2177,33 +2182,34 @@ emit_group_load_1 (rtx *tmps, rtx dst, r
   if (MEM_P (src)
  && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
  || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
- && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
- && bytelen == GET_MODE_SIZE (mode))
+ && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
+ && must_eq (bytelen, GET_MODE_SIZE (mode)))
{
  tmps[i] = gen_reg_rtx (mode);
  emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
}
   else if (COMPLEX_MODE_P (mode)
   && GET_MODE (src) == mode
-  && bytelen == GET_MODE_SIZE (mode))
+  && must_eq (bytelen, GET_MODE_SIZE (mode)))
/* Let emit_move_complex do the bulk of the work.  */
tmps[i] = src;
   else if (GET_CODE (src) == CONCAT)
{
- unsigned int slen = GET_MODE_SIZE (GET_MODE (src));
- unsigned int slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
- unsigned int elt = bytepos / slen0;
- unsigned int subpos = bytepos % slen0;
+ poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
+ poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
+ unsigned int elt;
+