diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index aed035a..f42cb75 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -109,6 +109,7 @@ enum aarch64_code_model aarch64_cmodel;
 #define TARGET_HAVE_TLS 1
 #endif
 
+static bool aarch64_lra_p (void);
 static bool aarch64_composite_type_p (const_tree, enum machine_mode);
 static bool aarch64_vfp_is_call_or_return_candidate (enum machine_mode,
 						     const_tree,
@@ -6083,6 +6084,13 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
   return -1;
 }
 
+/* Return true if we use LRA instead of reload pass.  */
+static bool
+aarch64_lra_p (void)
+{
+  return aarch64_lra_flag;
+}
+
 /* Return TRUE if the type, as described by TYPE and MODE, is a composite
    type as described in AAPCS64 \S 4.3.  This includes aggregate, union and
    array types.  The C99 floating-point complex types are also considered
@@ -8208,6 +8216,9 @@ aarch64_vectorize_vec_perm_const_ok (enum machine_mode vmode,
 #undef TARGET_LIBGCC_CMP_RETURN_MODE
 #define TARGET_LIBGCC_CMP_RETURN_MODE aarch64_libgcc_cmp_return_mode
 
+#undef TARGET_LRA_P
+#define TARGET_LRA_P aarch64_lra_p
+
 #undef TARGET_MANGLE_TYPE
 #define TARGET_MANGLE_TYPE aarch64_mangle_type
 
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
index 8ff6ca1..0c31312 100644
--- a/gcc/config/aarch64/aarch64.opt
+++ b/gcc/config/aarch64/aarch64.opt
@@ -103,6 +103,10 @@ mabi=
 Target RejectNegative Joined Enum(aarch64_abi) Var(aarch64_abi) Init(AARCH64_ABI_DEFAULT)
 -mabi=ABI	Generate code that conforms to the specified ABI
 
+mlra
+Target Report Var(aarch64_lra_flag) Init(1) Save
+Use LRA instead of reload
+
 Enum
 Name(aarch64_abi) Type(int)
 Known AArch64 ABIs (for use with the -mabi= option):
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index f731bb6..182b009 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -68,6 +68,7 @@ struct four_ints
 };
 
 /* Forward function declarations.  */
+static bool arm_lra_p (void);
 static bool arm_needs_doubleword_align (enum machine_mode, const_tree);
 static int arm_compute_static_chain_stack_bytes (void);
 static arm_stack_offsets *arm_get_frame_offsets (void);
@@ -338,6 +339,9 @@ static const struct attribute_spec arm_attribute_table[] =
 #undef TARGET_LEGITIMIZE_ADDRESS
 #define TARGET_LEGITIMIZE_ADDRESS arm_legitimize_address
 
+#undef TARGET_LRA_P
+#define TARGET_LRA_P arm_lra_p
+
 #undef  TARGET_ATTRIBUTE_TABLE
 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
 
@@ -4971,6 +4975,12 @@ arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
     }
 }
 
+/* Return true if we use LRA instead of reload pass.  */
+static bool
+arm_lra_p (void)
+{
+  return arm_lra_flag;
+}
 
 /* Return true if mode/type need doubleword alignment.  */
 static bool
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 1781b75..05a271e 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1266,11 +1266,12 @@ enum reg_class
 
 /* Must leave BASE_REGS reloads alone */
 #define THUMB_SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X)		\
+  (lra_in_progress ? NO_REGS : 						\
   ((CLASS) != LO_REGS && (CLASS) != BASE_REGS				\
    ? ((true_regnum (X) == -1 ? LO_REGS					\
        : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS	\
        : NO_REGS)) 							\
-   : NO_REGS)
+   : NO_REGS))
 
 #define THUMB_SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X)		\
   ((CLASS) != LO_REGS && (CLASS) != BASE_REGS				\
diff --git a/gcc/config/arm/arm.opt b/gcc/config/arm/arm.opt
index b9ae2b0..7c9ea36 100644
--- a/gcc/config/arm/arm.opt
+++ b/gcc/config/arm/arm.opt
@@ -109,6 +109,10 @@ mfloat-abi=
 Target RejectNegative Joined Enum(float_abi_type) Var(arm_float_abi) Init(TARGET_DEFAULT_FLOAT_ABI)
 Specify if floating point hardware should be used
 
+mlra
+Target Report Var(arm_lra_flag) Init(1) Save
+Use LRA instead of reload
+
 Enum
 Name(float_abi_type) Type(enum float_abi_type)
 Known floating-point ABIs (for use with the -mfloat-abi= option):
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 95a314f..04832cf 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -5483,7 +5483,16 @@ must_be_base_p (rtx x)
 static bool
 must_be_index_p (rtx x)
 {
-  return GET_CODE (x) == MULT || GET_CODE (x) == ASHIFT;
+  rtx addr = x;
+  if (GET_CODE (addr) == SIGN_EXTRACT)
+    addr = *strip_address_mutations (&XEXP (addr, 0));
+
+  return (GET_CODE (x) == MULT
+          || GET_CODE (x) == ASHIFT
+          || GET_CODE (x) == ASHIFTRT
+          || GET_CODE (x) == LSHIFTRT
+          || GET_CODE (x) == ROTATE
+          || GET_CODE (x) == ROTATERT);
 }
 
 /* Set the segment part of address INFO to LOC, given that INNER is the
@@ -5505,6 +5514,9 @@ set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
 static void
 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
 {
+  if (GET_CODE (*inner) == SIGN_EXTRACT)
+    inner = strip_address_mutations (&XEXP (*inner, 0));
+
   if (GET_CODE (*inner) == LO_SUM)
     inner = strip_address_mutations (&XEXP (*inner, 0));
   gcc_checking_assert (REG_P (*inner)
@@ -5522,7 +5534,15 @@ set_address_base (struct address_info *info, rtx *loc, rtx *inner)
 static void
 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
 {
-  if ((GET_CODE (*inner) == MULT || GET_CODE (*inner) == ASHIFT)
+  if (GET_CODE (*inner) == SIGN_EXTRACT)
+    inner = strip_address_mutations (&XEXP (*inner, 0));
+
+  if ((GET_CODE (*inner) == MULT
+       || GET_CODE (*inner) == ASHIFT
+       || GET_CODE (*inner) == ASHIFTRT
+       || GET_CODE (*inner) == LSHIFTRT
+       || GET_CODE (*inner) == ROTATE
+       || GET_CODE (*inner) == ROTATERT)
       && CONSTANT_P (XEXP (*inner, 1)))
     inner = strip_address_mutations (&XEXP (*inner, 0));
   gcc_checking_assert (REG_P (*inner)
