Add support for the `SVE_BFSCALE` architecture extension.

gcc/ChangeLog:

        * doc/invoke.texi: Document `+sve-bfscale` flag.
        * config/aarch64/aarch64-option-extensions.def (AARCH64_OPT_EXTENSION): 
New extension.
        * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): New 
predefined macro.
        * config/aarch64/aarch64-sve-builtins-base.cc: Skip constant folding for
        floating point or unpredicated multiplications.
        * config/aarch64/aarch64-sve-builtins-base.def (svscale): New function.
        (svmul): New function.
        * config/aarch64/aarch64-sve.md (@aarch64_sve_<optab><mode>): New insn.
        (@aarch64_sve_<optab><mode>_single): New insn.
        * config/aarch64/aarch64.h (TARGET_SVE_BFSCALE): New macro.
        * config/aarch64/iterators.md (SVE_FULL_F_SCALAR): Add `VNx16BF`.
        (SVE_FULL_F_BFSCALE): New mode iterator.
        (SVE_BFx24): New mode iterator.
        (UNSPEC_FMUL): New unspec.
        (V_INT_EQUIV): Add entries for `VNx16BF` and `VNx32BF`.
        (b): Add entries for `BF` and `HF`.
        (SVSCALE_SINGLE_INTARG): New mode attr.
        (SVE_FP_MUL): New int iterator.
        (optab): Add entry for `UNSPEC_FMUL`.

gcc/testsuite/ChangeLog:

        * lib/target-supports.exp: Add `sve-bfscale` to `sve_exts`.
        * gcc.target/aarch64/pragma_cpp_predefs_4.c: Test for
        `__ARM_FEATURE_TARGET_SVE_BFSCALE` macro.
        * gcc.target/aarch64/sve/acle/asm/bfmul_bf16.c: New test.
        * gcc.target/aarch64/sve/acle/asm/bfscale_bf16.c: New test.
        * gcc.target/aarch64/sve/acle/asm/bfscale_bf16_pred.c: New test.
        * gcc.target/aarch64/sve/acle/general-c/bfscale.c: New test.
---
 gcc/config/aarch64/aarch64-c.cc               |   2 +
 .../aarch64/aarch64-option-extensions.def     |   1 +
 .../aarch64/aarch64-sve-builtins-base.cc      |  11 +-
 .../aarch64/aarch64-sve-builtins-base.def     |  13 ++
 gcc/config/aarch64/aarch64-sve.md             | 150 +++++++++++++-----
 gcc/config/aarch64/aarch64.h                  |   1 +
 gcc/config/aarch64/iterators.md               |  29 +++-
 gcc/doc/invoke.texi                           |   5 +-
 .../gcc.target/aarch64/pragma_cpp_predefs_4.c |   5 +
 .../aarch64/sve/acle/asm/bfmul_bf16.c         |  49 ++++++
 .../aarch64/sve/acle/asm/bfscale_bf16.c       |  49 ++++++
 .../aarch64/sve/acle/asm/bfscale_bf16_pred.c  |  76 +++++++++
 .../aarch64/sve/acle/general-c/bfscale.c      |  63 ++++++++
 gcc/testsuite/lib/target-supports.exp         |  14 +-
 14 files changed, 413 insertions(+), 55 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmul_bf16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16_pred.c
 create mode 100644 
gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/bfscale.c

diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
index c3957c762ef..4b2814df824 100644
--- a/gcc/config/aarch64/aarch64-c.cc
+++ b/gcc/config/aarch64/aarch64-c.cc
@@ -267,6 +267,8 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
                        "__ARM_FEATURE_BF16", pfile);
   aarch64_def_or_undef (TARGET_SVE_BF16,
                        "__ARM_FEATURE_SVE_BF16", pfile);
+  aarch64_def_or_undef (TARGET_SVE_BFSCALE,
+                       "__ARM_FEATURE_SVE_BFSCALE", pfile);
 
   aarch64_def_or_undef (TARGET_LUT, "__ARM_FEATURE_LUT", pfile);
   aarch64_def_or_undef (TARGET_SME_LUTv2, "__ARM_FEATURE_SME_LUTv2", pfile);
diff --git a/gcc/config/aarch64/aarch64-option-extensions.def 
b/gcc/config/aarch64/aarch64-option-extensions.def
index 083515d890d..379d549ceb2 100644
--- a/gcc/config/aarch64/aarch64-option-extensions.def
+++ b/gcc/config/aarch64/aarch64-option-extensions.def
@@ -183,6 +183,7 @@ AARCH64_OPT_FMV_EXTENSION("sve", SVE, (SIMD, F16, FCMA), 
(), (), "sve")
 
 /* This specifically does not imply +sve.  */
 AARCH64_OPT_EXTENSION("sve-b16b16", SVE_B16B16, (), (), (), "sveb16b16")
+AARCH64_OPT_EXTENSION ("sve-bfscale", SVE_BFSCALE, (), (), (), "svebfscale")
 
 AARCH64_OPT_EXTENSION("f32mm", F32MM, (SVE), (), (), "svef32mm")
 
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index ecc06877cac..ded1bfcf175 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -2302,11 +2302,20 @@ class svmul_impl : public rtx_code_function
 {
 public:
   CONSTEXPR svmul_impl ()
-    : rtx_code_function (MULT, MULT, UNSPEC_COND_FMUL) {}
+    : rtx_code_function (MULT, MULT, UNSPEC_COND_FMUL, UNSPEC_FMUL)
+  {}
 
   gimple *
   fold (gimple_folder &f) const override
   {
+    /* The code below assumes that the function has 3 arguments (pg, rn, rm).
+       Unpredicated functions have only two arguments (rn, rm) so will cause
+       the below code to crash.
+       Also skip if it does not operate on integers, since all the 
optimizations
+       below are for integer multiplication.  */
+    if (f.pred == aarch64_sve::PRED_none || !f.type_suffix (0).integer_p)
+      return nullptr;
+
     if (auto *res = f.fold_const_binary (MULT_EXPR))
       return res;
 
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.def 
b/gcc/config/aarch64/aarch64-sve-builtins-base.def
index 4c8d9a62d71..18e5ae3b558 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.def
@@ -345,6 +345,19 @@ DEF_SVE_FUNCTION (svcvtnt, unary_convert_narrowt, 
cvt_bfloat, mx)
 DEF_SVE_FUNCTION (svbfmmla, ternary_bfloat, s_float, none)
 #undef REQUIRED_EXTENSIONS
 
+#define REQUIRED_EXTENSIONS nonstreaming_sve (AARCH64_FL_SVE_BFSCALE)
+DEF_SVE_FUNCTION (svscale, binary_int_opt_n, h_bfloat, mxz)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS nonstreaming_sve (AARCH64_FL_SME2 \
+                                           | AARCH64_FL_SVE_BFSCALE)
+DEF_SVE_FUNCTION_GS (svscale, binary_int_opt_single_n, h_bfloat, x24, none)
+#undef REQUIRED_EXTENSIONS
+
+#define REQUIRED_EXTENSIONS ssve (AARCH64_FL_SVE_BFSCALE | AARCH64_FL_SME2)
+DEF_SVE_FUNCTION_GS (svmul, binary_opt_single_n, h_bfloat, x24, none)
+#undef REQUIRED_EXTENSIONS
+
 #define REQUIRED_EXTENSIONS ssve (AARCH64_FL_I8MM)
 DEF_SVE_FUNCTION (svsudot, ternary_intq_uintq_opt_n, s_signed, none)
 DEF_SVE_FUNCTION (svsudot_lane, ternary_intq_uintq_lane, s_signed, none)
diff --git a/gcc/config/aarch64/aarch64-sve.md 
b/gcc/config/aarch64/aarch64-sve.md
index 4648aa67e0c..be8f0b0e0bd 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -5485,6 +5485,7 @@ (define_insn_and_rewrite "*cond_<sve_int_op><mode>_any"
 ;; -------------------------------------------------------------------------
 ;; Includes:
 ;; - FSCALE
+;; - BFSCALE (SVE_BFSCALE)
 ;; - FTSMUL
 ;; - FTSSEL
 ;; -------------------------------------------------------------------------
@@ -5517,20 +5518,46 @@ (define_insn "@aarch64_sve_<optab><mode>"
   [(set_attr "sve_type" "sve_fp_mul")]
 )
 
+;; BFSCALE (multiple vectors)
+;; svbfloat16x2_t svscale[_bf16_x2] (svbfloat16x2_t zdn, svint16x2_t zm);
+;; svbfloat16x4_t svscale[_bf16_x4] (svbfloat16x4_t zdn, svint16x4_t zm);
+(define_insn "@aarch64_sve_<optab><mode>"
+  [(set (match_operand:SVE_BFx24 0 "register_operand")
+       (unspec:SVE_BFx24
+         [(match_operand:SVE_BFx24     1 "register_operand" "0")
+          (match_operand:<V_INT_EQUIV> 2 "register_operand" 
"Uw<vector_count>")]
+         SVE_COND_FP_BINARY_INT))]
+  "TARGET_SME2 && TARGET_SVE_BFSCALE"
+  "bfscale %0, %1, %2"
+)
+
+;; BFSCALE (multiple and single vector)
+;; svbfloat16x2_t svscale[_single_bf16_x2](svbfloat16x2_t zn, svint16_t zm);
+;; svbfloat16x4_t svscale[_single_bf16_x4](svbfloat16x4_t zn, svint16_t zm);
+(define_insn "@aarch64_sve_<optab><mode>_single"
+  [(set (match_operand:SVE_BFx24 0 "register_operand")
+       (unspec:SVE_BFx24
+         [(match_operand:SVE_BFx24               1 "register_operand" "0")
+          (match_operand:<SVSCALE_SINGLE_INTARG> 2 "register_operand" "x")]
+         SVE_COND_FP_BINARY_INT))]
+  "TARGET_SME2 && TARGET_SVE_BFSCALE"
+  "bfscale %0, %1, %2.h"
+)
+
 ;; Predicated floating-point binary operations that take an integer
 ;; as their second operand.
 (define_insn "@aarch64_pred_<optab><mode>"
   [(set (match_operand:SVE_FULL_F_SCALAR 0 "register_operand")
        (unspec:SVE_FULL_F_SCALAR
-         [(match_operand:<VPRED> 1 "register_operand")
-          (match_operand:SI 4 "aarch64_sve_gp_strictness")
+         [(match_operand:<VPRED>           1 "register_operand")
+          (match_operand:SI                4 "aarch64_sve_gp_strictness")
           (match_operand:SVE_FULL_F_SCALAR 2 "register_operand")
-          (match_operand:<V_INT_EQUIV> 3 "register_operand")]
+          (match_operand:<V_INT_EQUIV>     3 "register_operand")]
          SVE_COND_FP_BINARY_INT))]
   "TARGET_SVE"
-  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
-     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%Z0.<Vetype>, 
%1/m, %Z0.<Vetype>, %Z3.<Vetype>
-     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%Z0, 
%Z2\;<sve_fp_op>\t%Z0.<Vetype>, %1/m, %Z0.<Vetype>, %Z3.<Vetype>
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w       , Upl , 0 , w ; *               ] <b><sve_fp_op>\t%Z0.<Vetype>, 
%1/m, %Z0.<Vetype>, %Z3.<Vetype>
+     [ ?&w     , Upl , w , w ; yes             ] movprfx\t%Z0, 
%Z2\;<b><sve_fp_op>\t%Z0.<Vetype>, %1/m, %Z0.<Vetype>, %Z3.<Vetype>
   }
   [(set_attr "sve_type" "sve_fp_mul")]
 )
@@ -5538,16 +5565,16 @@ (define_insn "@aarch64_pred_<optab><mode>"
 ;; Predicated floating-point binary operations with merging, taking an
 ;; integer as their second operand.
 (define_expand "@cond_<optab><mode>"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand")
-       (unspec:SVE_FULL_F
+  [(set (match_operand:SVE_FULL_F_BFSCALE 0 "register_operand")
+       (unspec:SVE_FULL_F_BFSCALE
          [(match_operand:<VPRED> 1 "register_operand")
-          (unspec:SVE_FULL_F
+          (unspec:SVE_FULL_F_BFSCALE
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
+             (match_operand:SVE_FULL_F_BFSCALE 2 "register_operand")
+             (match_operand:<V_INT_EQUIV>      3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
+          (match_operand:SVE_FULL_F_BFSCALE 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE"
 )
@@ -5555,21 +5582,21 @@ (define_expand "@cond_<optab><mode>"
 ;; Predicated floating-point binary operations that take an integer as their
 ;; second operand, with inactive lanes coming from the first operand.
 (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand")
-       (unspec:SVE_FULL_F
+  [(set (match_operand:SVE_FULL_F_BFSCALE 0 "register_operand")
+       (unspec:SVE_FULL_F_BFSCALE
          [(match_operand:<VPRED> 1 "register_operand")
-          (unspec:SVE_FULL_F
+          (unspec:SVE_FULL_F_BFSCALE
             [(match_operand 4)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F_BFSCALE 2 "register_operand")
              (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
-     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, 
%1/m, %0.<Vetype>, %3.<Vetype>
-     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, 
%2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w       , Upl , 0 , w ; *               ] <b><sve_fp_op>\t%0.<Vetype>, 
%1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w     , Upl , w , w ; yes             ] movprfx\t%0, 
%2\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
   }
   "&& !rtx_equal_p (operands[1], operands[4])"
   {
@@ -5579,21 +5606,21 @@ (define_insn_and_rewrite "*cond_<optab><mode>_2_relaxed"
 )
 
 (define_insn "*cond_<optab><mode>_2_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand")
-       (unspec:SVE_FULL_F
+  [(set (match_operand:SVE_FULL_F_BFSCALE 0 "register_operand")
+       (unspec:SVE_FULL_F_BFSCALE
          [(match_operand:<VPRED> 1 "register_operand")
-          (unspec:SVE_FULL_F
+          (unspec:SVE_FULL_F_BFSCALE
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F_BFSCALE 2 "register_operand")
              (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
           (match_dup 2)]
          UNSPEC_SEL))]
   "TARGET_SVE"
-  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
-     [ w        , Upl , 0 , w ; *              ] <sve_fp_op>\t%0.<Vetype>, 
%1/m, %0.<Vetype>, %3.<Vetype>
-     [ ?&w      , Upl , w , w ; yes            ] movprfx\t%0, 
%2\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+  {@ [ cons: =0 , 1   , 2 , 3 ; attrs: movprfx ]
+     [ w       , Upl , 0 , w ; *               ] <b><sve_fp_op>\t%0.<Vetype>, 
%1/m, %0.<Vetype>, %3.<Vetype>
+     [ ?&w     , Upl , w , w ; yes             ] movprfx\t%0, 
%2\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
   }
   [(set_attr "sve_type" "sve_fp_mul")]
 )
@@ -5602,22 +5629,22 @@ (define_insn "*cond_<optab><mode>_2_strict"
 ;; their second operand, with the values of inactive lanes being distinct
 ;; from the other inputs.
 (define_insn_and_rewrite "*cond_<optab><mode>_any_relaxed"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand")
-       (unspec:SVE_FULL_F
+  [(set (match_operand:SVE_FULL_F_BFSCALE 0 "register_operand")
+       (unspec:SVE_FULL_F_BFSCALE
          [(match_operand:<VPRED> 1 "register_operand")
-          (unspec:SVE_FULL_F
+          (unspec:SVE_FULL_F_BFSCALE
             [(match_operand 5)
              (const_int SVE_RELAXED_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand")
+             (match_operand:SVE_FULL_F_BFSCALE 2 "register_operand")
              (match_operand:<V_INT_EQUIV> 3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
+          (match_operand:SVE_FULL_F_BFSCALE 4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
   {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
-     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
      [ ?&w      , Upl , w , w , w   ] #
   }
   "&& 1"
@@ -5640,22 +5667,22 @@ (define_insn_and_rewrite 
"*cond_<optab><mode>_any_relaxed"
 )
 
 (define_insn_and_rewrite "*cond_<optab><mode>_any_strict"
-  [(set (match_operand:SVE_FULL_F 0 "register_operand")
-       (unspec:SVE_FULL_F
+  [(set (match_operand:SVE_FULL_F_BFSCALE 0 "register_operand")
+       (unspec:SVE_FULL_F_BFSCALE
          [(match_operand:<VPRED> 1 "register_operand")
-          (unspec:SVE_FULL_F
+          (unspec:SVE_FULL_F_BFSCALE
             [(match_dup 1)
              (const_int SVE_STRICT_GP)
-             (match_operand:SVE_FULL_F 2 "register_operand")
-             (match_operand:<V_INT_EQUIV> 3 "register_operand")]
+             (match_operand:SVE_FULL_F_BFSCALE 2 "register_operand")
+             (match_operand:<V_INT_EQUIV>      3 "register_operand")]
             SVE_COND_FP_BINARY_INT)
-          (match_operand:SVE_FULL_F 4 "aarch64_simd_reg_or_zero")]
+          (match_operand:SVE_FULL_F_BFSCALE    4 "aarch64_simd_reg_or_zero")]
          UNSPEC_SEL))]
   "TARGET_SVE && !rtx_equal_p (operands[2], operands[4])"
   {@ [ cons: =0 , 1   , 2 , 3 , 4   ]
-     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
-     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, 
%2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , 0 , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , Dz  ] movprfx\t%0.<Vetype>, %1/z, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+     [ &w       , Upl , w , w , 0   ] movprfx\t%0.<Vetype>, %1/m, 
%2.<Vetype>\;<b><sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
      [ ?&w      , Upl , w , w , w   ] #
   }
   "&& reload_completed
@@ -6853,8 +6880,9 @@ (define_insn_and_rewrite 
"*aarch64_cond_abd<mode>_any_strict"
 ;; ---- [FP] Multiplication
 ;; -------------------------------------------------------------------------
 ;; Includes:
-;; - BFMUL (SVE_B16B16)
+;; - BFMUL (SVE_B16B16 || SVE_BFSCALE)
 ;; - FMUL
+;; - BFSCALE (SVE_BFSCALE)
 ;; -------------------------------------------------------------------------
 
 ;; Predicated floating-point multiplication.
@@ -6894,6 +6922,40 @@ (define_insn "@aarch64_mul_lane_<mode>"
   [(set_attr "sve_type" "sve_fp_mul")]
 )
 
+;; BFMUL (multiple vectors)
+;; svbfloat16x2_t svmul[_bf16_x2](svbfloat16x2_t zd, svbfloat16x2_t zm)
+;;   __arm_streaming;
+;; svbfloat16x4_t svmul[_bf16_x4](svbfloat16x4_t zd, svbfloat16x4_t zm)
+;;   __arm_streaming;
+;; BFMUL { <Zd1>.H-<Zd2>.H }, { <Zn1>.H-<Zn2>.H }, { <Zm1>.H-<Zm2>.H }
+;; BFMUL { <Zd1>.H-<Zd4>.H }, { <Zn1>.H-<Zn4>.H }, { <Zm1>.H-<Zm4>.H }
+(define_insn "@aarch64_sve_<optab><mode>"
+  [(set (match_operand:SVE_BFx24 0 "register_operand")
+       (unspec:SVE_BFx24
+         [(match_operand:SVE_BFx24 1 "register_operand" "Uw<vector_count>")
+          (match_operand:SVE_BFx24 2 "register_operand" "Uw<vector_count>")]
+         SVE_FP_MUL))]
+  "TARGET_SME2 && TARGET_SVE_BFSCALE"
+  "bfmul %0, %1, %2"
+)
+
+;; BFMUL (multiple and single vector)
+;; svbfloat16x2_t svmul[_single_bf16_x2](svbfloat16x2_t zd, svbfloat16_t zm)
+;;   __arm_streaming;
+;; svbfloat16x4_t svmul[_single_bf16_x4](svbfloat16x4_t zd, svbfloat16_t zm)
+;;   __arm_streaming;
+;; BFMUL { <Zd1>.H-<Zd2>.H }, { <Zn1>.H-<Zn2>.H }, <Zm>.H
+;; BFMUL { <Zd1>.H-<Zd4>.H }, { <Zn1>.H-<Zn4>.H }, <Zm>.H
+(define_insn "@aarch64_sve_<optab><mode>_single"
+  [(set (match_operand:SVE_BFx24 0 "register_operand")
+       (unspec:SVE_BFx24
+         [(match_operand:SVE_BFx24 1 "register_operand" "Uw<vector_count>")
+          (match_operand:<VSINGLE> 2 "register_operand" "x")]
+         SVE_FP_MUL))]
+  "TARGET_SME2 && TARGET_SVE_BFSCALE"
+  "bfmul %0, %1, %2.h"
+)
+
 ;; -------------------------------------------------------------------------
 ;; ---- [FP] Division
 ;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 0e596b59744..1726f0243b7 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -370,6 +370,7 @@ constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
 #define TARGET_BF16_FP AARCH64_HAVE_ISA (BF16)
 #define TARGET_BF16_SIMD (TARGET_BF16_FP && TARGET_SIMD)
 #define TARGET_SVE_BF16 (TARGET_BF16_FP && TARGET_SVE)
+#define TARGET_SVE_BFSCALE (AARCH64_HAVE_ISA (SVE_BFSCALE))
 
 /* PAUTH instructions are enabled through +pauth.  */
 #define TARGET_PAUTH AARCH64_HAVE_ISA (PAUTH)
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 517b2808b5f..6e5a6bb9c0e 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -494,10 +494,13 @@ (define_mode_iterator SVE_PARTIAL_F [VNx2HF VNx4HF 
VNx2SF])
 (define_mode_iterator SVE_F [SVE_PARTIAL_F SVE_FULL_F])
 
 ;; Fully-packed SVE floating-point vector modes and their scalar equivalents.
-(define_mode_iterator SVE_FULL_F_SCALAR [SVE_FULL_F GPF_HF])
+(define_mode_iterator SVE_FULL_F_SCALAR [SVE_FULL_F GPF_HF VNx8BF])
 
 (define_mode_iterator SVE_FULL_F_B16B16 [(VNx8BF "TARGET_SSVE_B16B16") 
SVE_FULL_F])
 
+(define_mode_iterator SVE_FULL_F_BFSCALE [SVE_FULL_F
+                                        (VNx8BF "TARGET_SVE_BFSCALE")])
+
 (define_mode_iterator SVE_PARTIAL_F_B16B16 [(VNx2BF "TARGET_SSVE_B16B16")
                                            (VNx4BF "TARGET_SSVE_B16B16")
                                            SVE_PARTIAL_F])
@@ -733,6 +736,8 @@ (define_mode_iterator SVE_Fx24 [(VNx16BF 
"TARGET_SSVE_B16B16")
                                VNx16HF VNx8SF VNx4DF
                                VNx32HF VNx16SF VNx8DF])
 
+(define_mode_iterator SVE_BFx24 [VNx16BF VNx32BF])
+
 (define_mode_iterator SVE_SFx24 [VNx8SF VNx16SF])
 
 ;; The modes used to represent different ZA access sizes.
@@ -794,6 +799,7 @@ (define_c_enum "unspec"
     UNSPEC_FMAX                ; Used in aarch64-simd.md.
     UNSPEC_FMAXNMV     ; Used in aarch64-simd.md.
     UNSPEC_FMAXV       ; Used in aarch64-simd.md.
+    UNSPEC_FMUL                ; Used in aarch64-sve.md.
     UNSPEC_FMIN                ; Used in aarch64-simd.md.
     UNSPEC_FMINNMV     ; Used in aarch64-simd.md.
     UNSPEC_FMINV       ; Used in aarch64-simd.md.
@@ -2156,6 +2162,8 @@ (define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI 
"V16QI")
                               (VNx16QI "VNx16QI")
                               (VNx8HI  "VNx8HI") (VNx8HF "VNx8HI")
                               (VNx8BF  "VNx8HI")
+                              (VNx16BF  "VNx16HI")
+                              (VNx32BF  "VNx32HI")
                               (VNx4SI  "VNx4SI") (VNx4SF "VNx4SI")
                               (VNx2DI  "VNx2DI") (VNx2DF "VNx2DI")
                               (VNx8SF  "VNx8SI") (VNx16SF "VNx16SI")
@@ -2723,7 +2731,8 @@ (define_mode_attr vec_or_offset [(V8QI "vec") (V16QI 
"vec") (V4HI "vec")
                                 (V8HI "vec") (V2SI "vec") (V4SI "vec")
                                 (V2DI "vec") (DI "offset")])
 
-(define_mode_attr b [(V4BF "b") (V4HF "") (V8BF "b") (V8HF "")
+(define_mode_attr b [(BF "b") (HF "")
+                    (V4BF "b") (V4HF "") (V8BF "b") (V8HF "")
                     (VNx2BF "b") (VNx2HF "") (VNx2SF "")
                     (VNx4BF "b") (VNx4HF "") (VNx4SF "")
                     (VNx8BF "b") (VNx8HF "") (VNx2DF "")
@@ -2758,6 +2767,19 @@ (define_mode_attr aligned_fpr [(VNx16QI "w") (VNx8HI "w")
 (define_mode_attr LD1_EXTENDQ_MEM [(VNx4SI "VNx1SI") (VNx4SF "VNx1SI")
                                   (VNx2DI "VNx1DI") (VNx2DF "VNx1DI")])
 
+;; Maps the output type of svscale to the corresponding int vector type in the
+;; second argument.
+(define_mode_attr SVSCALE_SINGLE_INTARG [
+       (VNx16HF "VNx8HI") ;; f16_x2 -> i16
+       (VNx32HF "VNx8HI") ;; f16_x4 -> i16
+       (VNx16BF "VNx8HI") ;; bf16_x2 -> i16
+       (VNx32BF "VNx8HI") ;; bf16_x4 -> i16
+       (VNx8SF  "VNx4SI") ;; f32_x2 -> i32
+       (VNx16SF "VNx4SI") ;; f32_x4 -> i32
+       (VNx4DF  "VNx2DI") ;; f64_x2 -> i64
+       (VNx8DF  "VNx2DI") ;; f64_x4 -> i64
+])
+
 ;; -------------------------------------------------------------------
 ;; Code Iterators
 ;; -------------------------------------------------------------------
@@ -3529,6 +3551,8 @@ (define_int_iterator SVE_COND_FP_ADD [UNSPEC_COND_FADD])
 (define_int_iterator SVE_COND_FP_SUB [UNSPEC_COND_FSUB])
 (define_int_iterator SVE_COND_FP_MUL [UNSPEC_COND_FMUL])
 
+(define_int_iterator SVE_FP_MUL [UNSPEC_FMUL])
+
 (define_int_iterator SVE_COND_FP_BINARY_I1 [UNSPEC_COND_FMAX
                                            UNSPEC_COND_FMAXNM
                                            UNSPEC_COND_FMIN
@@ -4079,6 +4103,7 @@ (define_int_attr optab [(UNSPEC_ANDF "and")
                        (UNSPEC_FMINNMQV "fminnmqv")
                        (UNSPEC_FMINNMV "smin")
                        (UNSPEC_FMINV "smin_nan")
+                       (UNSPEC_FMUL "fmul")
                        (UNSPEC_SMUL_HIGHPART "smulh")
                        (UNSPEC_UMUL_HIGHPART "umulh")
                        (UNSPEC_FMLA "fma")
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index df4331fbad0..d1971639704 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -16333,7 +16333,7 @@ as a parameter of @code{free} call or compared with 
@code{NULL}.  If
 with @option{-fmalloc-dce=2} also comparisons with @code{NULL} pointer are
 considered safe to remove.
 
-The default is @option{-fmalloc-dce=2}.  See also @option{-fallocation-dce}. 
+The default is @option{-fmalloc-dce=2}.  See also @option{-fallocation-dce}.
 
 @opindex fmove-loop-invariants
 @item -fmove-loop-invariants
@@ -23092,6 +23092,9 @@ Enable the Checked Pointer Arithmetic instructions.
 @item sve-b16b16
 Enable the SVE non-widening brain floating-point (@code{bf16}) extension.
 This only has an effect when @code{sve2} or @code{sme2} are also enabled.
+@item sve-bfscale
+Enable the SVE BFloat16 floating-point adjust exponent vectors instructions
+(@code{bfscale}) extension.
 
 @end table
 
diff --git a/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c 
b/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
index 3799fb46df1..d49a835ad45 100644
--- a/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/pragma_cpp_predefs_4.c
@@ -105,6 +105,11 @@
 #error Foo
 #endif
 
+#pragma GCC target "+nothing+sve-bfscale"
+#ifndef __ARM_FEATURE_SVE_BFSCALE
+#error Foo
+#endif
+
 #pragma GCC target "+nothing+sve2+sme-f16f16"
 #ifndef __ARM_FEATURE_SME_F16F16
 #error Foo
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmul_bf16.c 
b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmul_bf16.c
new file mode 100644
index 00000000000..e64c1cffa71
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfmul_bf16.c
@@ -0,0 +1,49 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sve,+sve2,+sme,+sme2,+sve-bfscale"
+
+#include "test_sve_acle.h"
+
+/*
+** test_bfmul_bf16x2:
+**     bfmul {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z2\.h - z3\.h}
+**     ret
+*/
+svbfloat16x2_t
+test_bfmul_bf16x2 (svbfloat16x2_t zn, svbfloat16x2_t zm)
+{
+  return svmul_bf16_x2 (zn, zm);
+}
+
+/*
+** test_bfmul_bf16x4:
+**     bfmul {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+**     ret
+*/
+svbfloat16x4_t
+test_bfmul_bf16x4 (svbfloat16x4_t zn, svbfloat16x4_t zm)
+{
+  return svmul_bf16_x4 (zn, zm);
+}
+
+/*
+** test_bfmul_single_bf16x2:
+**     bfmul {z0\.h - z1\.h}, {z0\.h - z1\.h}, z2\.h
+**     ret
+*/
+svbfloat16x2_t
+test_bfmul_single_bf16x2 (svbfloat16x2_t zn, svbfloat16_t zm)
+{
+  return svmul_single_bf16_x2 (zn, zm);
+}
+
+/*
+** test_bfmul_single_bf16x4:
+**     bfmul {z0\.h - z3\.h}, {z0\.h - z3\.h}, z4\.h
+**     ret
+*/
+svbfloat16x4_t
+test_bfmul_single_bf16x4 (svbfloat16x4_t zn, svbfloat16_t zm)
+{
+  return svmul_single_bf16_x4 (zn, zm);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16.c 
b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16.c
new file mode 100644
index 00000000000..12d20a0f015
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16.c
@@ -0,0 +1,49 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sve,+sve2,+sme,+sme2,+sve-bfscale"
+
+#include "test_sve_acle.h"
+
+/*
+** test_bfscale_bf16x2:
+**     bfscale {z0\.h - z1\.h}, {z0\.h - z1\.h}, {z2\.h - z3\.h}
+**     ret
+*/
+svbfloat16x2_t
+test_bfscale_bf16x2 (svbfloat16x2_t zdn, svint16x2_t zm)
+{
+  return svscale_bf16_x2 (zdn, zm);
+}
+
+/*
+** test_bfscale_bf16x4:
+**     bfscale {z0\.h - z3\.h}, {z0\.h - z3\.h}, {z4\.h - z7\.h}
+**     ret
+*/
+svbfloat16x4_t
+test_bfscale_bf16x4 (svbfloat16x4_t zdn, svint16x4_t zm)
+{
+  return svscale_bf16_x4 (zdn, zm);
+}
+
+/*
+** test_bfscale_single_bf16x2:
+**     bfscale {z0\.h - z1\.h}, {z0\.h - z1\.h}, z2\.h
+**     ret
+*/
+svbfloat16x2_t
+test_bfscale_single_bf16x2 (svbfloat16x2_t zdn, svint16_t zm)
+{
+  return svscale_single_bf16_x2 (zdn, zm);
+}
+
+/*
+** test_bfscale_single_bf16x4:
+**     bfscale {z0\.h - z3\.h}, {z0\.h - z3\.h}, z4\.h
+**     ret
+*/
+svbfloat16x4_t
+test_bfscale_single_bf16x4 (svbfloat16x4_t zdn, svint16_t zm)
+{
+  return svscale_single_bf16_x4 (zdn, zm);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16_pred.c 
b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16_pred.c
new file mode 100644
index 00000000000..fde19b3288b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/asm/bfscale_bf16_pred.c
@@ -0,0 +1,76 @@
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#pragma GCC target "+sve,+sve2,+sme,+sme2,+sve-bfscale"
+
+#include "test_sve_acle.h"
+
+/*
+** test_bfscale_bf16_m:
+**     bfscale z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_bf16_m (svbool_t pg, svbfloat16_t zdn, svint16_t zm)
+{
+  return svscale_bf16_m (pg, zdn, zm);
+}
+
+/*
+** test_bfscale_bf16_z:
+**     movprfx z0\.h, p0/z, z0\.h
+**     bfscale z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_bf16_z (svbool_t pg, svbfloat16_t zdn, svint16_t zm)
+{
+  return svscale_bf16_z (pg, zdn, zm);
+}
+
+/*
+** test_bfscale_bf16_x:
+**     bfscale z0\.h, p0/m, z0\.h, z1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_bf16_x (svbool_t pg, svbfloat16_t zdn, svint16_t zm)
+{
+  return svscale_bf16_x (pg, zdn, zm);
+}
+
+/*
+** test_bfscale_n_bf16_m:
+**     mov     z([0-9]+)\.h, w0
+**     bfscale z0\.h, p0/m, z0\.h, z\1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_n_bf16_m (svbool_t pg, svbfloat16_t zdn, int16_t zm)
+{
+  return svscale_n_bf16_m (pg, zdn, zm);
+}
+
+/*
+** test_bfscale_n_bf16_z:
+**     mov     z([0-9]+)\.h, w0
+**     movprfx z0\.h, p0/z, z0\.h
+**     bfscale z0\.h, p0/m, z0\.h, z\1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_n_bf16_z (svbool_t pg, svbfloat16_t zdn, int16_t zm)
+{
+  return svscale_n_bf16_z (pg, zdn, zm);
+}
+
+/*
+** test_bfscale_n_bf16_x:
+**     mov     z([0-9]+)\.h, w0
+**     bfscale z0\.h, p0/m, z0\.h, z\1\.h
+**     ret
+*/
+svbfloat16_t
+test_bfscale_n_bf16_x (svbool_t pg, svbfloat16_t zdn, int16_t zm)
+{
+  return svscale_n_bf16_x (pg, zdn, zm);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/bfscale.c 
b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/bfscale.c
new file mode 100644
index 00000000000..a1e867115cd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/bfscale.c
@@ -0,0 +1,63 @@
+// { dg-options "-std=c23 -fsyntax-only" }
+// { dg-do compile }
+
+#pragma GCC target "+sve,+sve2,+sme,+sme2,+sve-bfscale"
+static_assert (__ARM_FEATURE_SVE == 1);
+static_assert (__ARM_FEATURE_SVE_BFSCALE == 1);
+#include <arm_sve.h>
+
+/*
+  // BFloat16 floating-point adjust exponent vectors.
+  // Only if __ARM_FEATURE_SVE_BFSCALE != 0
+  svbfloat16_t svscale[_bf16]_m   (svbool_t pg, svbfloat16_t zdn, svint16_t 
zm);
+  svbfloat16_t svscale[_bf16]_x   (svbool_t pg, svbfloat16_t zdn, svint16_t 
zm);
+  svbfloat16_t svscale[_bf16]_z   (svbool_t pg, svbfloat16_t zdn, svint16_t 
zm);
+  svbfloat16_t svscale[_n_bf16]_m (svbool_t pg, svbfloat16_t zdn,   int16_t 
zm);
+  svbfloat16_t svscale[_n_bf16]_x (svbool_t pg, svbfloat16_t zdn,   int16_t 
zm);
+  svbfloat16_t svscale[_n_bf16]_z (svbool_t pg, svbfloat16_t zdn,   int16_t 
zm);
+
+  // BFloat16 floating-point adjust exponent vectors.
+  // Only if __ARM_FEATURE_SVE_BFSCALE != 0
+  svbfloat16x2_t svscale[_bf16_x2]        (svbfloat16x2_t zdn, svint16x2_t zm);
+  svbfloat16x2_t svscale[_single_bf16_x2] (svbfloat16x2_t zn,  svint16_t   zm);
+  svbfloat16x4_t svscale[_bf16_x4]        (svbfloat16x4_t zdn, svint16x4_t zm);
+  svbfloat16x4_t svscale[_single_bf16_x4] (svbfloat16x4_t zn,  svint16_t   zm);
+
+  // BFloat16 Multi-vector floating-point multiply
+  // Only if __ARM_FEATURE_SVE_BFSCALE != 0
+  svbfloat16x2_t svmul[_bf16_x2]        (svbfloat16x2_t zd, svbfloat16x2_t zm) 
__arm_streaming;
+  svbfloat16x4_t svmul[_bf16_x4]        (svbfloat16x4_t zd, svbfloat16x4_t zm) 
__arm_streaming;
+  svbfloat16x2_t svmul[_single_bf16_x2] (svbfloat16x2_t zd, svbfloat16_t   zm) 
__arm_streaming;
+  svbfloat16x4_t svmul[_single_bf16_x4] (svbfloat16x4_t zd, svbfloat16_t   zm) 
__arm_streaming;
+*/
+
+void
+svscale_ok (svbool_t b, svbfloat16_t bf16, svbfloat16x2_t bf16x2,
+           svbfloat16x4_t bf16x4, svint16_t i16, int16_t i16n,
+           svint16x2_t i16x2, svint16x4_t i16x4)
+{
+  bf16 = svscale_bf16_m (b, bf16, i16);
+  bf16 = svscale_bf16_x (b, bf16, i16);
+  bf16 = svscale_bf16_z (b, bf16, i16);
+
+  bf16 = svscale_n_bf16_m (b, bf16, i16n);
+  bf16 = svscale_n_bf16_x (b, bf16, i16n);
+  bf16 = svscale_n_bf16_z (b, bf16, i16n);
+
+  bf16x2 = svscale_bf16_x2 (bf16x2, i16x2);
+  bf16x4 = svscale_bf16_x4 (bf16x4, i16x4);
+
+  bf16x2 = svscale_single_bf16_x2 (bf16x2, i16);
+  bf16x4 = svscale_single_bf16_x4 (bf16x4, i16);
+}
+
+void
+svmul_ok (svbfloat16_t bf16, svbfloat16x2_t bf16x2,
+         svbfloat16x4_t bf16x4) __arm_streaming
+{
+  svmul_bf16_x2 (bf16x2, bf16x2);
+  svmul_bf16_x4 (bf16x4, bf16x4);
+
+  svmul_single_bf16_x2 (bf16x2, bf16);
+  svmul_single_bf16_x4 (bf16x4, bf16);
+}
diff --git a/gcc/testsuite/lib/target-supports.exp 
b/gcc/testsuite/lib/target-supports.exp
index 67f1a3c8230..53b7c82fbbe 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -338,7 +338,7 @@ proc check_weak_available { } {
 
     # VxWorks supports it only since VxWorks 7 (assumed >= r2) for RTPs.
     # Kernel mode works fine as well for our testsuite's purposes.
- 
+
     if { [istarget *-*-vxworks*] } {
        return [istarget *-*-vxworks7*]
     }
@@ -6216,7 +6216,7 @@ foreach { armfunc armflag armdefs } {
 #        /* { dg-add-options arm_cpu_xscale } */
 #       /* { dg-require-effective-target arm_xscale_multilib } */
 
-# NOTE: -mcpu does not override -mtune, so to ensure the tuning is consistent 
+# NOTE: -mcpu does not override -mtune, so to ensure the tuning is consistent
 # for tests using these flags all entries should set -mcpu and -mtune 
explicitly
 
 # This table should only be used to set -mcpu= (and associated
@@ -12565,7 +12565,7 @@ set exts {
 # archiecture for SME and the features that require it.
 set exts_sve2 {
     "sme-b16b16" "sme-f16f16" "sme-i16i64" "sme" "sme2" "sme2p1"
-    "ssve-fp8dot2" "ssve-fp8dot4" "ssve-fp8fma"
+    "ssve-fp8dot2" "ssve-fp8dot4" "ssve-fp8fma" "sve-bfscale"
 }
 
 foreach { aarch64_ext } $exts {
@@ -13462,7 +13462,7 @@ proc check_effective_target_autoincdec { } {
 #
 proc check_effective_target_supports_stack_clash_protection { } {
 
-    if { [check_effective_target_x86] 
+    if { [check_effective_target_x86]
          || [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
          || [istarget aarch64*-**] || [istarget s390*-*-*]
          || [istarget loongarch64*-**] || [istarget riscv64*-**] } {
@@ -14452,7 +14452,7 @@ proc check_nvptx_default_ptx_isa_version_at_least { 
major minor } {
             "#error unsupported" \
             "#endif"]
     set src [join $src "\n"]
-    
+
     set res [check_no_compiler_messages $name assembly $src ""]
 
     return $res
@@ -14487,7 +14487,7 @@ proc 
check_nvptx_default_ptx_isa_target_architecture_at_least { ta } {
             "#error unsupported" \
             "#endif"]
     set src [join $src "\n"]
-    
+
     set res [check_no_compiler_messages $name assembly $src ""]
 
     return $res
@@ -14602,7 +14602,7 @@ proc check_effective_target_alarm { } {
        #include <stdlib.h>
        #include <unistd.h>
        void do_exit(int i) { exit (0); }
-       int main (void) { 
+       int main (void) {
          struct sigaction s;
          sigemptyset (&s.sa_mask);
          s.sa_handler = exit;
-- 
2.43.0

Reply via email to