This patch replaces three separate default definitions of
SLOW_UNALIGNED_ACCESS with a single global one. Note that tm.texi
requires SLOW_UNALIGNED_ACCESS to be true if STRICT_ALIGNMENT.
Tested on x86_64-linux-gnu, powerpc64-linux-gnu and mipsisa64-elf.
Applied as obvious.
Richard
gcc/
* defaults.h (SLOW_UNALIGNED_ACCESS): Provide default definition.
* expmed.c (SLOW_UNALIGNED_ACCESS): Remove default definition.
* expr.c (SLOW_UNALIGNED_ACCESS): Likewise.
* lra-constraints.c (SLOW_UNALIGNED_ACCESS): Likewise.
(simplify_operand_subreg): Don't check STRICT_ALIGNMENT here.
Index: gcc/defaults.h
===================================================================
--- gcc/defaults.h 2012-08-02 21:10:06.000000000 +0100
+++ gcc/defaults.h 2012-10-28 10:30:47.340353996 +0000
@@ -1218,6 +1218,10 @@ #define MINIMUM_ALIGNMENT(EXP,MODE,ALIGN
#define ATTRIBUTE_ALIGNED_VALUE BIGGEST_ALIGNMENT
#endif
+#ifndef SLOW_UNALIGNED_ACCESS
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
+#endif
+
/* For most ports anything that evaluates to a constant symbolic
or integer value is acceptable as a constant address. */
#ifndef CONSTANT_ADDRESS_P
Index: gcc/expmed.c
===================================================================
--- gcc/expmed.c 2012-10-28 10:25:12.000000000 +0000
+++ gcc/expmed.c 2012-10-28 10:30:44.178354004 +0000
@@ -69,11 +69,6 @@ static rtx expand_sdiv_pow2 (enum machin
/* Test whether a value is zero of a power of two. */
#define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
-#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
-#endif
-
-
/* Reduce conditional compilation elsewhere. */
#ifndef HAVE_insv
#define HAVE_insv 0
Index: gcc/expr.c
===================================================================
--- gcc/expr.c 2012-10-25 10:08:06.000000000 +0100
+++ gcc/expr.c 2012-10-28 10:31:44.133353857 +0000
@@ -189,12 +189,6 @@ #define STORE_BY_PIECES_P(SIZE, ALIGN) \
(move_by_pieces_ninsns (SIZE, ALIGN, STORE_MAX_PIECES + 1) \
< (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()))
#endif
-
-/* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */
-
-#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
-#endif
/* This is run to set up which modes can be used
directly in memory and to initialize the block move optab. It is run
Index: gcc/lra-constraints.c
===================================================================
--- gcc/lra-constraints.c 2012-10-26 11:50:16.000000000 +0100
+++ gcc/lra-constraints.c 2012-10-28 10:32:02.499353813 +0000
@@ -1105,10 +1105,6 @@ process_addr_reg (rtx *loc, rtx *before,
return true;
}
-#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS(mode, align) 0
-#endif
-
/* Make reloads for subreg in operand NOP with internal subreg mode
REG_MODE, add new reloads for further processing. Return true if
any reload was generated. */
@@ -1132,8 +1128,7 @@ simplify_operand_subreg (int nop, enum m
address might violate the necessary alignment or the access might
be slow. So take this into consideration. */
if ((MEM_P (reg)
- && ((! STRICT_ALIGNMENT
- && ! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (reg)))
+ && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (reg))
|| MEM_ALIGN (reg) >= GET_MODE_ALIGNMENT (mode)))
|| (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER))
{