Contrary to all documentation, SLOW_BYTE_ACCESS simply means accessing
bitfields by their declared type, which results in better codegeneration
on practically any target.  So set it correctly to 1 on Arm.

As a result we generate much better code for bitfields:

typedef struct
{
  int x : 2, y : 8, z : 2;
} X;

int bitfield (X *p)
{
  return p->x + p->y + p->z;
}


Before:
        ldrb    r3, [r0]        @ zero_extendqisi2
        ldrh    r2, [r0]
        ldrb    r0, [r0, #1]    @ zero_extendqisi2
        sbfx    r3, r3, #0, #2
        sbfx    r2, r2, #2, #8
        sbfx    r0, r0, #2, #2
        sxtab   r3, r2, r3
        sxtab   r0, r3, r0
        bx      lr

After:
        ldr     r0, [r0]
        sbfx    r3, r0, #0, #2
        sbfx    r2, r0, #2, #8
        sbfx    r0, r0, #10, #2
        sxtab   r3, r2, r3
        add     r0, r0, r3
        bx      lr

Bootstrap OK, OK for commit?

ChangeLog:
2019-09-11  Wilco Dijkstra  <wdijk...@arm.com>

        * config/arm/arm.h (SLOW_BYTE_ACCESS): Set to 1.

--

diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 
8b92c830de09a3ad49420fdfacde02d8efc2a89b..11212d988a0f56299c2266bace80170d074be56c
 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1892,8 +1892,9 @@ enum arm_auto_incmodes
    ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND                      \
     : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : UNKNOWN)))
 
-/* Nonzero if access to memory by bytes is slow and undesirable.  */
-#define SLOW_BYTE_ACCESS 0
+/* Contrary to all documentation, this enables wide bitfield accesses,
+   which results in better code when accessing multiple bitfields.  */
+#define SLOW_BYTE_ACCESS 1
 
 /* Immediate shift counts are truncated by the output routines (or was it
    the assembler?).  Shift counts in a register are truncated by ARM.  Note

Reply via email to