diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -668,6 +668,8 @@
   BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
   BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
   BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+  BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
+  BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
 
   /* Implemented by aarch64_simd_bsl<mode>.  */
   BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2625,6 +2625,22 @@
   [(set_attr "type" "neon_fp_mul_<stype><q>")]
 )
 
+(define_insn "mul_lane<mode>3"
+  [(set (match_operand:VDQSF 0 "register_operand" "=w")
+	(mult:VDQSF
+	  (vec_duplicate:VDQSF
+	    (vec_select:<VEL>
+	      (match_operand:V2SF 2 "register_operand" "w")
+	      (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+	  (match_operand:VDQSF 1 "register_operand" "w")))]
+  "TARGET_SIMD"
+  {
+    operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
+    return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
+  }
+  [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
+)
+
 (define_expand "div<mode>3"
  [(set (match_operand:VHSDF 0 "register_operand")
        (div:VHSDF (match_operand:VHSDF 1 "register_operand")
@@ -2728,6 +2744,46 @@
   }
 )
 
+(define_expand "aarch64_float_mla_lane<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(plus:VDQSF
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (vec_select:<VEL>
+		(match_operand:V2SF 3 "register_operand")
+		(parallel [(match_operand:SI 4 "immediate_operand")])))
+	    (match_operand:VDQSF 2 "register_operand"))
+	  (match_operand:VDQSF 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+				    operands[3], operands[4]));
+    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
+(define_expand "aarch64_float_mls_lane<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(minus:VDQSF
+	  (match_operand:VDQSF 1 "register_operand")
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (vec_select:<VEL>
+		(match_operand:V2SF 3 "register_operand")
+		(parallel [(match_operand:SI 4 "immediate_operand")])))
+	    (match_operand:VDQSF 2 "register_operand"))))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+				    operands[3], operands[4]));
+    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
 (define_insn "fma<mode>4"
   [(set (match_operand:VHSDF 0 "register_operand" "=w")
        (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
 	       float32x2_t __c, const int __lane)
 {
-  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x4_t
@@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
 		float32x2_t __c, const int __lane)
 {
-  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x8_t
@@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
 	       float32x2_t __c, const int __lane)
 {
-  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x4_t
@@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
 		float32x2_t __c, const int __lane)
 {
-  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+  return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
 }
 
 __extension__ extern __inline int16x8_t
