Re: [PATCH 2/2] arm: Improve handling of relocations with small offsets with -mpure-code on v6m (PR96770)

2020-11-01 Thread Christophe Lyon via Gcc-patches
On Tue, 27 Oct 2020 at 17:22, Richard Earnshaw
 wrote:
>
> On 28/09/2020 10:09, Christophe Lyon via Gcc-patches wrote:
> > With -mpure-code on v6m (thumb-1), we can use small offsets with
> > upper/lower relocations to avoid the extra addition of the
> > offset.
> >
> > This patch accepts expressions symbol+offset as legitimate constants
> > when the literal pool is disabled, making sure that the offset is
> > within the range supported by thumb-1 [0..255].
> >
> > It also makes sure that thumb1_movsi_insn emits an error in case we
> > try to use it with an unsupported RTL construct.
> >
> > 2020-09-28  Christophe Lyon  
> >
> >   gcc/
> >   * config/arm/arm.c (thumb_legitimate_constant_p): Accept
> >   (symbol_ref + addend) when literal pool is disabled.
> >   (arm_valid_symbolic_address_p): Add support for thumb-1 without
> >   MOVT/MOVW.
> >   * config/arm/thumb1.md (*thumb1_movsi_insn): Accept (symbol_ref +
> >   addend) in the pure-code alternative.
> >
> >   gcc/testsuite/
> >   * gcc.target/arm/pure-code/pr96770.c: New test.
> > ---
> >  gcc/config/arm/arm.c | 15 ---
> >  gcc/config/arm/thumb1.md |  5 +++--
> >  gcc/testsuite/gcc.target/arm/pure-code/pr96770.c | 21 +
> >  3 files changed, 36 insertions(+), 5 deletions(-)
> >  create mode 100644 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> >
> > diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> > index abe357e..ceeb91f 100644
> > --- a/gcc/config/arm/arm.c
> > +++ b/gcc/config/arm/arm.c
> > @@ -9489,7 +9489,8 @@ thumb_legitimate_constant_p (machine_mode mode 
> > ATTRIBUTE_UNUSED, rtx x)
> >we build the symbol address with upper/lower
> >relocations.  */
> > || (TARGET_THUMB1
> > -   && GET_CODE (x) == SYMBOL_REF
> > +   && !label_mentioned_p (x)
> > +   && arm_valid_symbolic_address_p (x)
> > && arm_disable_literal_pool)
> > || flag_pic);
> >  }
> > @@ -31495,7 +31496,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, 
> > rtx out, rtx in,
> > According to the ARM ELF ABI, the initial addend of REL-type relocations
> > processing MOVW and MOVT instructions is formed by interpreting the 
> > 16-bit
> > literal field of the instruction as a 16-bit signed value in the range
> > -   -32768 <= A < 32768.  */
> > +   -32768 <= A < 32768.
> > +
> > +   In Thumb-1 mode, we use upper/lower relocations which have an 8-bit
> > +   unsigned range of 0 <= A < 256.  */
>
> I think it should be made clear that the range comes from the AAELF32
> relocation encoding for REL-type relocations (which is an unsigned value
> in this case).
>
> Otherwise, OK.
>

Thanks Richard, I've just pushed the patch with updated comment &
commit message.

Christophe

> >
> >  bool
> >  arm_valid_symbolic_address_p (rtx addr)
> > @@ -31519,7 +31523,12 @@ arm_valid_symbolic_address_p (rtx addr)
> >xop1 = XEXP (tmp, 1);
> >
> >if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
> > -   return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> > + {
> > +   if (TARGET_THUMB1 && !TARGET_HAVE_MOVT)
> > + return IN_RANGE (INTVAL (xop1), 0, 0xff);
> > +   else
> > + return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> > + }
> >  }
> >
> >return false;
> > diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
> > index 3dedcae..2258a52 100644
> > --- a/gcc/config/arm/thumb1.md
> > +++ b/gcc/config/arm/thumb1.md
> > @@ -675,7 +675,7 @@ (define_insn "*thumb1_movsi_insn"
> >case 7:
> >/* pure-code alternative: build the constant byte by byte,
> >instead of loading it from a constant pool.  */
> > - if (GET_CODE (operands[1]) == SYMBOL_REF)
> > + if (arm_valid_symbolic_address_p (operands[1]))
> > {
> >   output_asm_insn (\"movs\\t%0, #:upper8_15:%1\", operands);
> >   output_asm_insn (\"lsls\\t%0, #8\", operands);
> > @@ -686,7 +686,7 @@ (define_insn "*thumb1_movsi_insn"
> >   output_asm_insn (\"adds\\t%0, #:lower0_7:%1\", operands);
> >   return \"\";
> > }
> > - else
> > + else if (GET_CODE (operands[1]) == CONST_INT)
> > {
> >   int i;
> >   HOST_WIDE_INT op1 = INTVAL (operands[1]);
> > @@ -721,6 +721,7 @@ (define_insn "*thumb1_movsi_insn"
> > output_asm_insn ("adds\t%0, %1", ops);
> >   return "";
> > }
> > +   gcc_unreachable ();
> >
> >case 8: return "ldr\t%0, %1";
> >case 9: return "str\t%1, %0";
> > diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c 
> > b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> > new file mode 100644
> > index 000..a43d71f
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> > @@ -0,0 +1,21 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-mpure-code" } */
> > +
> 

Re: [PATCH 2/2] arm: Improve handling of relocations with small offsets with -mpure-code on v6m (PR96770)

2020-10-27 Thread Richard Earnshaw via Gcc-patches
On 28/09/2020 10:09, Christophe Lyon via Gcc-patches wrote:
> With -mpure-code on v6m (thumb-1), we can use small offsets with
> upper/lower relocations to avoid the extra addition of the
> offset.
> 
> This patch accepts expressions symbol+offset as legitimate constants
> when the literal pool is disabled, making sure that the offset is
> within the range supported by thumb-1 [0..255].
> 
> It also makes sure that thumb1_movsi_insn emits an error in case we
> try to use it with an unsupported RTL construct.
> 
> 2020-09-28  Christophe Lyon  
> 
>   gcc/
>   * config/arm/arm.c (thumb_legitimate_constant_p): Accept
>   (symbol_ref + addend) when literal pool is disabled.
>   (arm_valid_symbolic_address_p): Add support for thumb-1 without
>   MOVT/MOVW.
>   * config/arm/thumb1.md (*thumb1_movsi_insn): Accept (symbol_ref +
>   addend) in the pure-code alternative.
> 
>   gcc/testsuite/
>   * gcc.target/arm/pure-code/pr96770.c: New test.
> ---
>  gcc/config/arm/arm.c | 15 ---
>  gcc/config/arm/thumb1.md |  5 +++--
>  gcc/testsuite/gcc.target/arm/pure-code/pr96770.c | 21 +
>  3 files changed, 36 insertions(+), 5 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> 
> diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> index abe357e..ceeb91f 100644
> --- a/gcc/config/arm/arm.c
> +++ b/gcc/config/arm/arm.c
> @@ -9489,7 +9489,8 @@ thumb_legitimate_constant_p (machine_mode mode 
> ATTRIBUTE_UNUSED, rtx x)
>we build the symbol address with upper/lower
>relocations.  */
> || (TARGET_THUMB1
> -   && GET_CODE (x) == SYMBOL_REF
> +   && !label_mentioned_p (x)
> +   && arm_valid_symbolic_address_p (x)
> && arm_disable_literal_pool)
> || flag_pic);
>  }
> @@ -31495,7 +31496,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, 
> rtx out, rtx in,
> According to the ARM ELF ABI, the initial addend of REL-type relocations
> processing MOVW and MOVT instructions is formed by interpreting the 16-bit
> literal field of the instruction as a 16-bit signed value in the range
> -   -32768 <= A < 32768.  */
> +   -32768 <= A < 32768.
> +
> +   In Thumb-1 mode, we use upper/lower relocations which have an 8-bit
> +   unsigned range of 0 <= A < 256.  */

I think it should be made clear that the range comes from the AAELF32
relocation encoding for REL-type relocations (which is an unsigned value
in this case).

Otherwise, OK.

>  
>  bool
>  arm_valid_symbolic_address_p (rtx addr)
> @@ -31519,7 +31523,12 @@ arm_valid_symbolic_address_p (rtx addr)
>xop1 = XEXP (tmp, 1);
>  
>if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
> -   return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> + {
> +   if (TARGET_THUMB1 && !TARGET_HAVE_MOVT)
> + return IN_RANGE (INTVAL (xop1), 0, 0xff);
> +   else
> + return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> + }
>  }
>  
>return false;
> diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
> index 3dedcae..2258a52 100644
> --- a/gcc/config/arm/thumb1.md
> +++ b/gcc/config/arm/thumb1.md
> @@ -675,7 +675,7 @@ (define_insn "*thumb1_movsi_insn"
>case 7:
>/* pure-code alternative: build the constant byte by byte,
>instead of loading it from a constant pool.  */
> - if (GET_CODE (operands[1]) == SYMBOL_REF)
> + if (arm_valid_symbolic_address_p (operands[1]))
> {
>   output_asm_insn (\"movs\\t%0, #:upper8_15:%1\", operands);
>   output_asm_insn (\"lsls\\t%0, #8\", operands);
> @@ -686,7 +686,7 @@ (define_insn "*thumb1_movsi_insn"
>   output_asm_insn (\"adds\\t%0, #:lower0_7:%1\", operands);
>   return \"\";
> }
> - else
> + else if (GET_CODE (operands[1]) == CONST_INT)
> {
>   int i;
>   HOST_WIDE_INT op1 = INTVAL (operands[1]);
> @@ -721,6 +721,7 @@ (define_insn "*thumb1_movsi_insn"
> output_asm_insn ("adds\t%0, %1", ops);
>   return "";
> }
> +   gcc_unreachable ();
>  
>case 8: return "ldr\t%0, %1";
>case 9: return "str\t%1, %0";
> diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c 
> b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> new file mode 100644
> index 000..a43d71f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> @@ -0,0 +1,21 @@
> +/* { dg-do compile } */
> +/* { dg-options "-mpure-code" } */
> +
> +int arr[1000];
> +int *f4 (void) { return [1]; }
> +
> +/* For cortex-m0 (thumb-1/v6m), we generate 4 movs with upper/lower:#arr+4.  
> */
> +/* { dg-final { scan-assembler-times "\\+4" 4 { target { { ! 
> arm_thumb1_movt_ok } && { ! arm_thumb2_ok } } } } } */
> +
> +/* For cortex-m with movt/movw (thumb-1/v8m.base or thumb-2), we
> +   generate a movt/movw pair with 

Re: [PATCH 2/2] arm: Improve handling of relocations with small offsets with -mpure-code on v6m (PR96770)

2020-10-22 Thread Christophe Lyon via Gcc-patches
ping?

On Tue, 6 Oct 2020 at 10:30, Christophe Lyon  wrote:
>
> ping?
>
> On Mon, 28 Sep 2020 at 11:09, Christophe Lyon
>  wrote:
> >
> > With -mpure-code on v6m (thumb-1), we can use small offsets with
> > upper/lower relocations to avoid the extra addition of the
> > offset.
> >
> > This patch accepts expressions symbol+offset as legitimate constants
> > when the literal pool is disabled, making sure that the offset is
> > within the range supported by thumb-1 [0..255].
> >
> > It also makes sure that thumb1_movsi_insn emits an error in case we
> > try to use it with an unsupported RTL construct.
> >
> > 2020-09-28  Christophe Lyon  
> >
> > gcc/
> > * config/arm/arm.c (thumb_legitimate_constant_p): Accept
> > (symbol_ref + addend) when literal pool is disabled.
> > (arm_valid_symbolic_address_p): Add support for thumb-1 without
> > MOVT/MOVW.
> > * config/arm/thumb1.md (*thumb1_movsi_insn): Accept (symbol_ref +
> > addend) in the pure-code alternative.
> >
> > gcc/testsuite/
> > * gcc.target/arm/pure-code/pr96770.c: New test.
> > ---
> >  gcc/config/arm/arm.c | 15 ---
> >  gcc/config/arm/thumb1.md |  5 +++--
> >  gcc/testsuite/gcc.target/arm/pure-code/pr96770.c | 21 +
> >  3 files changed, 36 insertions(+), 5 deletions(-)
> >  create mode 100644 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> >
> > diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> > index abe357e..ceeb91f 100644
> > --- a/gcc/config/arm/arm.c
> > +++ b/gcc/config/arm/arm.c
> > @@ -9489,7 +9489,8 @@ thumb_legitimate_constant_p (machine_mode mode 
> > ATTRIBUTE_UNUSED, rtx x)
> >  we build the symbol address with upper/lower
> >  relocations.  */
> >   || (TARGET_THUMB1
> > - && GET_CODE (x) == SYMBOL_REF
> > + && !label_mentioned_p (x)
> > + && arm_valid_symbolic_address_p (x)
> >   && arm_disable_literal_pool)
> >   || flag_pic);
> >  }
> > @@ -31495,7 +31496,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, 
> > rtx out, rtx in,
> > According to the ARM ELF ABI, the initial addend of REL-type relocations
> > processing MOVW and MOVT instructions is formed by interpreting the 
> > 16-bit
> > literal field of the instruction as a 16-bit signed value in the range
> > -   -32768 <= A < 32768.  */
> > +   -32768 <= A < 32768.
> > +
> > +   In Thumb-1 mode, we use upper/lower relocations which have an 8-bit
> > +   unsigned range of 0 <= A < 256.  */
> >
> >  bool
> >  arm_valid_symbolic_address_p (rtx addr)
> > @@ -31519,7 +31523,12 @@ arm_valid_symbolic_address_p (rtx addr)
> >xop1 = XEXP (tmp, 1);
> >
> >if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
> > - return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> > +   {
> > + if (TARGET_THUMB1 && !TARGET_HAVE_MOVT)
> > +   return IN_RANGE (INTVAL (xop1), 0, 0xff);
> > + else
> > +   return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> > +   }
> >  }
> >
> >return false;
> > diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
> > index 3dedcae..2258a52 100644
> > --- a/gcc/config/arm/thumb1.md
> > +++ b/gcc/config/arm/thumb1.md
> > @@ -675,7 +675,7 @@ (define_insn "*thumb1_movsi_insn"
> >case 7:
> >/* pure-code alternative: build the constant byte by byte,
> >  instead of loading it from a constant pool.  */
> > -   if (GET_CODE (operands[1]) == SYMBOL_REF)
> > +   if (arm_valid_symbolic_address_p (operands[1]))
> >   {
> > output_asm_insn (\"movs\\t%0, #:upper8_15:%1\", operands);
> > output_asm_insn (\"lsls\\t%0, #8\", operands);
> > @@ -686,7 +686,7 @@ (define_insn "*thumb1_movsi_insn"
> > output_asm_insn (\"adds\\t%0, #:lower0_7:%1\", operands);
> > return \"\";
> >   }
> > -   else
> > +   else if (GET_CODE (operands[1]) == CONST_INT)
> >   {
> > int i;
> > HOST_WIDE_INT op1 = INTVAL (operands[1]);
> > @@ -721,6 +721,7 @@ (define_insn "*thumb1_movsi_insn"
> >   output_asm_insn ("adds\t%0, %1", ops);
> > return "";
> >   }
> > + gcc_unreachable ();
> >
> >case 8: return "ldr\t%0, %1";
> >case 9: return "str\t%1, %0";
> > diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c 
> > b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> > new file mode 100644
> > index 000..a43d71f
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> > @@ -0,0 +1,21 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-mpure-code" } */
> > +
> > +int arr[1000];
> > +int *f4 (void) { return [1]; }
> > +
> > +/* For cortex-m0 (thumb-1/v6m), we generate 4 movs with 
> > upper/lower:#arr+4.  */
> > +/* { dg-final { 

Re: [PATCH 2/2] arm: Improve handling of relocations with small offsets with -mpure-code on v6m (PR96770)

2020-10-06 Thread Christophe Lyon via Gcc-patches
ping?

On Mon, 28 Sep 2020 at 11:09, Christophe Lyon
 wrote:
>
> With -mpure-code on v6m (thumb-1), we can use small offsets with
> upper/lower relocations to avoid the extra addition of the
> offset.
>
> This patch accepts expressions symbol+offset as legitimate constants
> when the literal pool is disabled, making sure that the offset is
> within the range supported by thumb-1 [0..255].
>
> It also makes sure that thumb1_movsi_insn emits an error in case we
> try to use it with an unsupported RTL construct.
>
> 2020-09-28  Christophe Lyon  
>
> gcc/
> * config/arm/arm.c (thumb_legitimate_constant_p): Accept
> (symbol_ref + addend) when literal pool is disabled.
> (arm_valid_symbolic_address_p): Add support for thumb-1 without
> MOVT/MOVW.
> * config/arm/thumb1.md (*thumb1_movsi_insn): Accept (symbol_ref +
> addend) in the pure-code alternative.
>
> gcc/testsuite/
> * gcc.target/arm/pure-code/pr96770.c: New test.
> ---
>  gcc/config/arm/arm.c | 15 ---
>  gcc/config/arm/thumb1.md |  5 +++--
>  gcc/testsuite/gcc.target/arm/pure-code/pr96770.c | 21 +
>  3 files changed, 36 insertions(+), 5 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
>
> diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
> index abe357e..ceeb91f 100644
> --- a/gcc/config/arm/arm.c
> +++ b/gcc/config/arm/arm.c
> @@ -9489,7 +9489,8 @@ thumb_legitimate_constant_p (machine_mode mode 
> ATTRIBUTE_UNUSED, rtx x)
>  we build the symbol address with upper/lower
>  relocations.  */
>   || (TARGET_THUMB1
> - && GET_CODE (x) == SYMBOL_REF
> + && !label_mentioned_p (x)
> + && arm_valid_symbolic_address_p (x)
>   && arm_disable_literal_pool)
>   || flag_pic);
>  }
> @@ -31495,7 +31496,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, 
> rtx out, rtx in,
> According to the ARM ELF ABI, the initial addend of REL-type relocations
> processing MOVW and MOVT instructions is formed by interpreting the 16-bit
> literal field of the instruction as a 16-bit signed value in the range
> -   -32768 <= A < 32768.  */
> +   -32768 <= A < 32768.
> +
> +   In Thumb-1 mode, we use upper/lower relocations which have an 8-bit
> +   unsigned range of 0 <= A < 256.  */
>
>  bool
>  arm_valid_symbolic_address_p (rtx addr)
> @@ -31519,7 +31523,12 @@ arm_valid_symbolic_address_p (rtx addr)
>xop1 = XEXP (tmp, 1);
>
>if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
> - return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> +   {
> + if (TARGET_THUMB1 && !TARGET_HAVE_MOVT)
> +   return IN_RANGE (INTVAL (xop1), 0, 0xff);
> + else
> +   return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
> +   }
>  }
>
>return false;
> diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
> index 3dedcae..2258a52 100644
> --- a/gcc/config/arm/thumb1.md
> +++ b/gcc/config/arm/thumb1.md
> @@ -675,7 +675,7 @@ (define_insn "*thumb1_movsi_insn"
>case 7:
>/* pure-code alternative: build the constant byte by byte,
>  instead of loading it from a constant pool.  */
> -   if (GET_CODE (operands[1]) == SYMBOL_REF)
> +   if (arm_valid_symbolic_address_p (operands[1]))
>   {
> output_asm_insn (\"movs\\t%0, #:upper8_15:%1\", operands);
> output_asm_insn (\"lsls\\t%0, #8\", operands);
> @@ -686,7 +686,7 @@ (define_insn "*thumb1_movsi_insn"
> output_asm_insn (\"adds\\t%0, #:lower0_7:%1\", operands);
> return \"\";
>   }
> -   else
> +   else if (GET_CODE (operands[1]) == CONST_INT)
>   {
> int i;
> HOST_WIDE_INT op1 = INTVAL (operands[1]);
> @@ -721,6 +721,7 @@ (define_insn "*thumb1_movsi_insn"
>   output_asm_insn ("adds\t%0, %1", ops);
> return "";
>   }
> + gcc_unreachable ();
>
>case 8: return "ldr\t%0, %1";
>case 9: return "str\t%1, %0";
> diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c 
> b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> new file mode 100644
> index 000..a43d71f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
> @@ -0,0 +1,21 @@
> +/* { dg-do compile } */
> +/* { dg-options "-mpure-code" } */
> +
> +int arr[1000];
> +int *f4 (void) { return [1]; }
> +
> +/* For cortex-m0 (thumb-1/v6m), we generate 4 movs with upper/lower:#arr+4.  
> */
> +/* { dg-final { scan-assembler-times "\\+4" 4 { target { { ! 
> arm_thumb1_movt_ok } && { ! arm_thumb2_ok } } } } } */
> +
> +/* For cortex-m with movt/movw (thumb-1/v8m.base or thumb-2), we
> +   generate a movt/movw pair with upper/lower:#arr+4.  */
> +/* { dg-final { scan-assembler-times "\\+4" 2 { target { arm_thumb1_movt_ok 
> || 

[PATCH 2/2] arm: Improve handling of relocations with small offsets with -mpure-code on v6m (PR96770)

2020-09-28 Thread Christophe Lyon via Gcc-patches
With -mpure-code on v6m (thumb-1), we can use small offsets with
upper/lower relocations to avoid the extra addition of the
offset.

This patch accepts expressions symbol+offset as legitimate constants
when the literal pool is disabled, making sure that the offset is
within the range supported by thumb-1 [0..255].

It also makes sure that thumb1_movsi_insn emits an error in case we
try to use it with an unsupported RTL construct.

2020-09-28  Christophe Lyon  

gcc/
* config/arm/arm.c (thumb_legitimate_constant_p): Accept
(symbol_ref + addend) when literal pool is disabled.
(arm_valid_symbolic_address_p): Add support for thumb-1 without
MOVT/MOVW.
* config/arm/thumb1.md (*thumb1_movsi_insn): Accept (symbol_ref +
addend) in the pure-code alternative.

gcc/testsuite/
* gcc.target/arm/pure-code/pr96770.c: New test.
---
 gcc/config/arm/arm.c | 15 ---
 gcc/config/arm/thumb1.md |  5 +++--
 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c | 21 +
 3 files changed, 36 insertions(+), 5 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/arm/pure-code/pr96770.c

diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index abe357e..ceeb91f 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -9489,7 +9489,8 @@ thumb_legitimate_constant_p (machine_mode mode 
ATTRIBUTE_UNUSED, rtx x)
 we build the symbol address with upper/lower
 relocations.  */
  || (TARGET_THUMB1
- && GET_CODE (x) == SYMBOL_REF
+ && !label_mentioned_p (x)
+ && arm_valid_symbolic_address_p (x)
  && arm_disable_literal_pool)
  || flag_pic);
 }
@@ -31495,7 +31496,10 @@ arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx 
out, rtx in,
According to the ARM ELF ABI, the initial addend of REL-type relocations
processing MOVW and MOVT instructions is formed by interpreting the 16-bit
literal field of the instruction as a 16-bit signed value in the range
-   -32768 <= A < 32768.  */
+   -32768 <= A < 32768.
+
+   In Thumb-1 mode, we use upper/lower relocations which have an 8-bit
+   unsigned range of 0 <= A < 256.  */
 
 bool
 arm_valid_symbolic_address_p (rtx addr)
@@ -31519,7 +31523,12 @@ arm_valid_symbolic_address_p (rtx addr)
   xop1 = XEXP (tmp, 1);
 
   if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
- return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
+   {
+ if (TARGET_THUMB1 && !TARGET_HAVE_MOVT)
+   return IN_RANGE (INTVAL (xop1), 0, 0xff);
+ else
+   return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
+   }
 }
 
   return false;
diff --git a/gcc/config/arm/thumb1.md b/gcc/config/arm/thumb1.md
index 3dedcae..2258a52 100644
--- a/gcc/config/arm/thumb1.md
+++ b/gcc/config/arm/thumb1.md
@@ -675,7 +675,7 @@ (define_insn "*thumb1_movsi_insn"
   case 7:
   /* pure-code alternative: build the constant byte by byte,
 instead of loading it from a constant pool.  */
-   if (GET_CODE (operands[1]) == SYMBOL_REF)
+   if (arm_valid_symbolic_address_p (operands[1]))
  {
output_asm_insn (\"movs\\t%0, #:upper8_15:%1\", operands);
output_asm_insn (\"lsls\\t%0, #8\", operands);
@@ -686,7 +686,7 @@ (define_insn "*thumb1_movsi_insn"
output_asm_insn (\"adds\\t%0, #:lower0_7:%1\", operands);
return \"\";
  }
-   else
+   else if (GET_CODE (operands[1]) == CONST_INT)
  {
int i;
HOST_WIDE_INT op1 = INTVAL (operands[1]);
@@ -721,6 +721,7 @@ (define_insn "*thumb1_movsi_insn"
  output_asm_insn ("adds\t%0, %1", ops);
return "";
  }
+ gcc_unreachable ();
 
   case 8: return "ldr\t%0, %1";
   case 9: return "str\t%1, %0";
diff --git a/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c 
b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
new file mode 100644
index 000..a43d71f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/pure-code/pr96770.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-mpure-code" } */
+
+int arr[1000];
+int *f4 (void) { return [1]; }
+
+/* For cortex-m0 (thumb-1/v6m), we generate 4 movs with upper/lower:#arr+4.  */
+/* { dg-final { scan-assembler-times "\\+4" 4 { target { { ! 
arm_thumb1_movt_ok } && { ! arm_thumb2_ok } } } } } */
+
+/* For cortex-m with movt/movw (thumb-1/v8m.base or thumb-2), we
+   generate a movt/movw pair with upper/lower:#arr+4.  */
+/* { dg-final { scan-assembler-times "\\+4" 2 { target { arm_thumb1_movt_ok || 
arm_thumb2_ok } } } } */
+
+int *f5 (void) { return [80]; }
+
+/* For cortex-m0 (thumb-1/v6m), we generate 1 ldr from rodata pointer to 
arr+320.  */
+/* { dg-final { scan-assembler-times "\\+320" 1 { target { { ! 
arm_thumb1_movt_ok } && { ! arm_thumb2_ok } } } } } */
+
+/* For cortex-m with