commit:     549c51ef7f24622f906ad3941b9dcea359ada5e1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 11 11:30:48 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 11 11:30:48 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=549c51ef

Linux patch 5.10.105

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1104_linux-5.10.105.patch | 3857 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3861 insertions(+)

diff --git a/0000_README b/0000_README
index f4f4b91a..6fbdd908 100644
--- a/0000_README
+++ b/0000_README
@@ -459,6 +459,10 @@ Patch:  1103_linux-5.10.104.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.104
 
+Patch:  1104_linux-5.10.105.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.105
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1104_linux-5.10.105.patch b/1104_linux-5.10.105.patch
new file mode 100644
index 00000000..aa2a1fc7
--- /dev/null
+++ b/1104_linux-5.10.105.patch
@@ -0,0 +1,3857 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst 
b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 985181dba0bac..6bd97cd50d625 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -60,8 +60,8 @@ privileged data touched during the speculative execution.
+ Spectre variant 1 attacks take advantage of speculative execution of
+ conditional branches, while Spectre variant 2 attacks use speculative
+ execution of indirect branches to leak privileged memory.
+-See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
+-:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
++See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[6] <spec_ref6>`
++:ref:`[7] <spec_ref7>` :ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
+ 
+ Spectre variant 1 (Bounds Check Bypass)
+ ---------------------------------------
+@@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, 
and measure the
+ speculative execution's side effects left in level 1 cache to infer the
+ victim's data.
+ 
++Yet another variant 2 attack vector is for the attacker to poison the
++Branch History Buffer (BHB) to speculatively steer an indirect branch
++to a specific Branch Target Buffer (BTB) entry, even if the entry isn't
++associated with the source address of the indirect branch. Specifically,
++the BHB might be shared across privilege levels even in the presence of
++Enhanced IBRS.
++
++Currently the only known real-world BHB attack vector is via
++unprivileged eBPF. Therefore, it's highly recommended to not enable
++unprivileged eBPF, especially when eIBRS is used (without retpolines).
++For a full mitigation against BHB attacks, it's recommended to use
++retpolines (or eIBRS combined with retpolines).
++
+ Attack scenarios
+ ----------------
+ 
+@@ -364,13 +377,15 @@ The possible values in this file are:
+ 
+   - Kernel status:
+ 
+-  ====================================  =================================
+-  'Not affected'                        The processor is not vulnerable
+-  'Vulnerable'                          Vulnerable, no mitigation
+-  'Mitigation: Full generic retpoline'  Software-focused mitigation
+-  'Mitigation: Full AMD retpoline'      AMD-specific software mitigation
+-  'Mitigation: Enhanced IBRS'           Hardware-focused mitigation
+-  ====================================  =================================
++  ========================================  =================================
++  'Not affected'                            The processor is not vulnerable
++  'Mitigation: None'                        Vulnerable, no mitigation
++  'Mitigation: Retpolines'                  Use Retpoline thunks
++  'Mitigation: LFENCE'                      Use LFENCE instructions
++  'Mitigation: Enhanced IBRS'               Hardware-focused mitigation
++  'Mitigation: Enhanced IBRS + Retpolines'  Hardware-focused + Retpolines
++  'Mitigation: Enhanced IBRS + LFENCE'      Hardware-focused + LFENCE
++  ========================================  =================================
+ 
+   - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
+     used to protect against Spectre variant 2 attacks when calling firmware 
(x86 only).
+@@ -584,12 +599,13 @@ kernel command line.
+ 
+               Specific mitigations can also be selected manually:
+ 
+-              retpoline
+-                                      replace indirect branches
+-              retpoline,generic
+-                                      google's original retpoline
+-              retpoline,amd
+-                                      AMD-specific minimal thunk
++                retpoline               auto pick between generic,lfence
++                retpoline,generic       Retpolines
++                retpoline,lfence        LFENCE; indirect branch
++                retpoline,amd           alias for retpoline,lfence
++                eibrs                   enhanced IBRS
++                eibrs,retpoline         enhanced IBRS + Retpolines
++                eibrs,lfence            enhanced IBRS + LFENCE
+ 
+               Not specifying this option is equivalent to
+               spectre_v2=auto.
+@@ -730,7 +746,7 @@ AMD white papers:
+ 
+ .. _spec_ref6:
+ 
+-[6] `Software techniques for managing speculation on AMD processors 
<https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
++[6] `Software techniques for managing speculation on AMD processors 
<https://developer.amd.com/wp-content/resources/Managing-Speculation-on-AMD-Processors.pdf>`_.
+ 
+ ARM white papers:
+ 
+diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
+index d00618967854d..611172f68bb57 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4957,8 +4957,12 @@
+                       Specific mitigations can also be selected manually:
+ 
+                       retpoline         - replace indirect branches
+-                      retpoline,generic - google's original retpoline
+-                      retpoline,amd     - AMD-specific minimal thunk
++                      retpoline,generic - Retpolines
++                      retpoline,lfence  - LFENCE; indirect branch
++                      retpoline,amd     - alias for retpoline,lfence
++                      eibrs             - enhanced IBRS
++                      eibrs,retpoline   - enhanced IBRS + Retpolines
++                      eibrs,lfence      - enhanced IBRS + LFENCE
+ 
+                       Not specifying this option is equivalent to
+                       spectre_v2=auto.
+diff --git a/Documentation/arm64/cpu-feature-registers.rst 
b/Documentation/arm64/cpu-feature-registers.rst
+index 328e0c454fbd4..749ae970c3195 100644
+--- a/Documentation/arm64/cpu-feature-registers.rst
++++ b/Documentation/arm64/cpu-feature-registers.rst
+@@ -235,7 +235,15 @@ infrastructure:
+      | DPB                          | [3-0]   |    y    |
+      +------------------------------+---------+---------+
+ 
+-  6) ID_AA64MMFR2_EL1 - Memory model feature register 2
++  6) ID_AA64MMFR0_EL1 - Memory model feature register 0
++
++     +------------------------------+---------+---------+
++     | Name                         |  bits   | visible |
++     +------------------------------+---------+---------+
++     | ECV                          | [63-60] |    y    |
++     +------------------------------+---------+---------+
++
++  7) ID_AA64MMFR2_EL1 - Memory model feature register 2
+ 
+      +------------------------------+---------+---------+
+      | Name                         |  bits   | visible |
+@@ -243,7 +251,7 @@ infrastructure:
+      | AT                           | [35-32] |    y    |
+      +------------------------------+---------+---------+
+ 
+-  7) ID_AA64ZFR0_EL1 - SVE feature ID register 0
++  8) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+ 
+      +------------------------------+---------+---------+
+      | Name                         |  bits   | visible |
+@@ -267,6 +275,23 @@ infrastructure:
+      | SVEVer                       | [3-0]   |    y    |
+      +------------------------------+---------+---------+
+ 
++  8) ID_AA64MMFR1_EL1 - Memory model feature register 1
++
++     +------------------------------+---------+---------+
++     | Name                         |  bits   | visible |
++     +------------------------------+---------+---------+
++     | AFP                          | [47-44] |    y    |
++     +------------------------------+---------+---------+
++
++  9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2
++
++     +------------------------------+---------+---------+
++     | Name                         |  bits   | visible |
++     +------------------------------+---------+---------+
++     | RPRES                        | [7-4]   |    y    |
++     +------------------------------+---------+---------+
++
++
+ Appendix I: Example
+ -------------------
+ 
+diff --git a/Documentation/arm64/elf_hwcaps.rst 
b/Documentation/arm64/elf_hwcaps.rst
+index bbd9cf54db6c7..e88d245d426da 100644
+--- a/Documentation/arm64/elf_hwcaps.rst
++++ b/Documentation/arm64/elf_hwcaps.rst
+@@ -245,6 +245,18 @@ HWCAP2_MTE
+     Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
+     by Documentation/arm64/memory-tagging-extension.rst.
+ 
++HWCAP2_ECV
++
++    Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
++
++HWCAP2_AFP
++
++    Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
++
++HWCAP2_RPRES
++
++    Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
++
+ 4. Unused AT_HWCAP bits
+ -----------------------
+ 
+diff --git a/Makefile b/Makefile
+index 6e6efe5516872..ea665736db040 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 104
++SUBLEVEL = 105
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm/include/asm/assembler.h 
b/arch/arm/include/asm/assembler.h
+index 72627c5fb3b2c..24a1f498b3b5f 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -107,6 +107,16 @@
+       .endm
+ #endif
+ 
++#if __LINUX_ARM_ARCH__ < 7
++      .macro  dsb, args
++      mcr     p15, 0, r0, c7, c10, 4
++      .endm
++
++      .macro  isb, args
++      mcr     p15, 0, r0, c7, c5, 4
++      .endm
++#endif
++
+       .macro asm_trace_hardirqs_off, save=1
+ #if defined(CONFIG_TRACE_IRQFLAGS)
+       .if \save
+diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
+new file mode 100644
+index 0000000000000..d1fa5607d3aa3
+--- /dev/null
++++ b/arch/arm/include/asm/spectre.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef __ASM_SPECTRE_H
++#define __ASM_SPECTRE_H
++
++enum {
++      SPECTRE_UNAFFECTED,
++      SPECTRE_MITIGATED,
++      SPECTRE_VULNERABLE,
++};
++
++enum {
++      __SPECTRE_V2_METHOD_BPIALL,
++      __SPECTRE_V2_METHOD_ICIALLU,
++      __SPECTRE_V2_METHOD_SMC,
++      __SPECTRE_V2_METHOD_HVC,
++      __SPECTRE_V2_METHOD_LOOP8,
++};
++
++enum {
++      SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
++      SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
++      SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
++      SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++      SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
++};
++
++void spectre_v2_update_state(unsigned int state, unsigned int methods);
++
++int spectre_bhb_update_vectors(unsigned int method);
++
++#endif
+diff --git a/arch/arm/include/asm/vmlinux.lds.h 
b/arch/arm/include/asm/vmlinux.lds.h
+index 4a91428c324db..fad45c884e988 100644
+--- a/arch/arm/include/asm/vmlinux.lds.h
++++ b/arch/arm/include/asm/vmlinux.lds.h
+@@ -26,6 +26,19 @@
+ #define ARM_MMU_DISCARD(x)    x
+ #endif
+ 
++/*
++ * ld.lld does not support NOCROSSREFS:
++ * https://github.com/ClangBuiltLinux/linux/issues/1609
++ */
++#ifdef CONFIG_LD_IS_LLD
++#define NOCROSSREFS
++#endif
++
++/* Set start/end symbol names to the LMA for the section */
++#define ARM_LMA(sym, section)                                         \
++      sym##_start = LOADADDR(section);                                \
++      sym##_end = LOADADDR(section) + SIZEOF(section)
++
+ #define PROC_INFO                                                     \
+               . = ALIGN(4);                                           \
+               __proc_info_begin = .;                                  \
+@@ -110,19 +123,31 @@
+  * only thing that matters is their relative offsets
+  */
+ #define ARM_VECTORS                                                   \
+-      __vectors_start = .;                                            \
+-      .vectors 0xffff0000 : AT(__vectors_start) {                     \
+-              *(.vectors)                                             \
++      __vectors_lma = .;                                              \
++      OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {            \
++              .vectors {                                              \
++                      *(.vectors)                                     \
++              }                                                       \
++              .vectors.bhb.loop8 {                                    \
++                      *(.vectors.bhb.loop8)                           \
++              }                                                       \
++              .vectors.bhb.bpiall {                                   \
++                      *(.vectors.bhb.bpiall)                          \
++              }                                                       \
+       }                                                               \
+-      . = __vectors_start + SIZEOF(.vectors);                         \
+-      __vectors_end = .;                                              \
++      ARM_LMA(__vectors, .vectors);                                   \
++      ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);               \
++      ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);             \
++      . = __vectors_lma + SIZEOF(.vectors) +                          \
++              SIZEOF(.vectors.bhb.loop8) +                            \
++              SIZEOF(.vectors.bhb.bpiall);                            \
+                                                                       \
+-      __stubs_start = .;                                              \
+-      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {            \
++      __stubs_lma = .;                                                \
++      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {              \
+               *(.stubs)                                               \
+       }                                                               \
+-      . = __stubs_start + SIZEOF(.stubs);                             \
+-      __stubs_end = .;                                                \
++      ARM_LMA(__stubs, .stubs);                                       \
++      . = __stubs_lma + SIZEOF(.stubs);                               \
+                                                                       \
+       PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+ 
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index 89e5d864e9234..79588b5623532 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -106,4 +106,6 @@ endif
+ 
+ obj-$(CONFIG_HAVE_ARM_SMCCC)  += smccc-call.o
+ 
++obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
++
+ extra-y := $(head-y) vmlinux.lds
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 63fbcdc97ded9..3cbd35c82a66c 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1005,12 +1005,11 @@ vector_\name:
+       sub     lr, lr, #\correction
+       .endif
+ 
+-      @
+-      @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
+-      @ (parent CPSR)
+-      @
++      @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}            @ save r0, lr
+-      mrs     lr, spsr
++
++      @ Save spsr_<exception> (parent CPSR)
++2:    mrs     lr, spsr
+       str     lr, [sp, #8]            @ save spsr
+ 
+       @
+@@ -1031,6 +1030,44 @@ vector_\name:
+       movs    pc, lr                  @ branch to handler in SVC mode
+ ENDPROC(vector_\name)
+ 
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .subsection 1
++      .align 5
++vector_bhb_loop8_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mov     r0, #8
++1:    b       . + 4
++      subs    r0, r0, #1
++      bne     1b
++      dsb
++      isb
++      b       2b
++ENDPROC(vector_bhb_loop8_\name)
++
++vector_bhb_bpiall_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
++      @ isb not needed due to "movs pc, lr" in the vector stub
++      @ which gives a "context synchronisation".
++      b       2b
++ENDPROC(vector_bhb_bpiall_\name)
++      .previous
++#endif
++
+       .align  2
+       @ handler addresses follow this label
+ 1:
+@@ -1039,6 +1076,10 @@ ENDPROC(vector_\name)
+       .section .stubs, "ax", %progbits
+       @ This must be the first word
+       .word   vector_swi
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .word   vector_bhb_loop8_swi
++      .word   vector_bhb_bpiall_swi
++#endif
+ 
+ vector_rst:
+  ARM( swi     SYS_ERROR0      )
+@@ -1153,8 +1194,10 @@ vector_addrexcptn:
+  * FIQ "NMI" handler
+  
*-----------------------------------------------------------------------------
+  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
+- * systems.
++ * systems. This must be the last vector stub, so lets place it in its own
++ * subsection.
+  */
++      .subsection 2
+       vector_stub     fiq, FIQ_MODE, 4
+ 
+       .long   __fiq_usr                       @  0  (USR_26 / USR_32)
+@@ -1187,6 +1230,30 @@ vector_addrexcptn:
+       W(b)    vector_irq
+       W(b)    vector_fiq
+ 
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .section .vectors.bhb.loop8, "ax", %progbits
++.L__vectors_bhb_loop8_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_loop8_und
++      W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
++      W(b)    vector_bhb_loop8_pabt
++      W(b)    vector_bhb_loop8_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_loop8_irq
++      W(b)    vector_bhb_loop8_fiq
++
++      .section .vectors.bhb.bpiall, "ax", %progbits
++.L__vectors_bhb_bpiall_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_bpiall_und
++      W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
++      W(b)    vector_bhb_bpiall_pabt
++      W(b)    vector_bhb_bpiall_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_bpiall_irq
++      W(b)    vector_bhb_bpiall_fiq
++#endif
++
+       .data
+       .align  2
+ 
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 271cb8a1eba1e..bd619da73c84e 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -162,6 +162,29 @@ ENDPROC(ret_from_fork)
+  
*-----------------------------------------------------------------------------
+  */
+ 
++      .align  5
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++ENTRY(vector_bhb_loop8_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mov     r8, #8
++1:    b       2f
++2:    subs    r8, r8, #1
++      bne     1b
++      dsb
++      isb
++      b       3f
++ENDPROC(vector_bhb_loop8_swi)
++
++      .align  5
++ENTRY(vector_bhb_bpiall_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
++      isb
++      b       3f
++ENDPROC(vector_bhb_bpiall_swi)
++#endif
+       .align  5
+ ENTRY(vector_swi)
+ #ifdef CONFIG_CPU_V7M
+@@ -169,6 +192,7 @@ ENTRY(vector_swi)
+ #else
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
++3:
+  ARM( add     r8, sp, #S_PC           )
+  ARM( stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
+  THUMB(       mov     r8, sp                  )
+diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
+new file mode 100644
+index 0000000000000..0dcefc36fb7a0
+--- /dev/null
++++ b/arch/arm/kernel/spectre.c
+@@ -0,0 +1,71 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/bpf.h>
++#include <linux/cpu.h>
++#include <linux/device.h>
++
++#include <asm/spectre.h>
++
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++      return !sysctl_unprivileged_bpf_disabled;
++#else
++      return false;
++#endif
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++static unsigned int spectre_v2_state;
++static unsigned int spectre_v2_methods;
++
++void spectre_v2_update_state(unsigned int state, unsigned int method)
++{
++      if (state > spectre_v2_state)
++              spectre_v2_state = state;
++      spectre_v2_methods |= method;
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      const char *method;
++
++      if (spectre_v2_state == SPECTRE_UNAFFECTED)
++              return sprintf(buf, "%s\n", "Not affected");
++
++      if (spectre_v2_state != SPECTRE_MITIGATED)
++              return sprintf(buf, "%s\n", "Vulnerable");
++
++      if (_unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
++
++      switch (spectre_v2_methods) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              method = "Branch predictor hardening";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              method = "I-cache invalidation";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++      case SPECTRE_V2_METHOD_HVC:
++              method = "Firmware call";
++              break;
++
++      case SPECTRE_V2_METHOD_LOOP8:
++              method = "History overwrite";
++              break;
++
++      default:
++              method = "Multiple mitigations";
++              break;
++      }
++
++      return sprintf(buf, "Mitigation: %s\n", method);
++}
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 17d5a785df28b..2d9e72ad1b0f9 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -30,6 +30,7 @@
+ #include <linux/atomic.h>
+ #include <asm/cacheflush.h>
+ #include <asm/exception.h>
++#include <asm/spectre.h>
+ #include <asm/unistd.h>
+ #include <asm/traps.h>
+ #include <asm/ptrace.h>
+@@ -806,10 +807,59 @@ static inline void __init kuser_init(void *vectors)
+ }
+ #endif
+ 
++#ifndef CONFIG_CPU_V7M
++static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
++{
++      memcpy(vma, lma_start, lma_end - lma_start);
++}
++
++static void flush_vectors(void *vma, size_t offset, size_t size)
++{
++      unsigned long start = (unsigned long)vma + offset;
++      unsigned long end = start + size;
++
++      flush_icache_range(start, end);
++}
++
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++int spectre_bhb_update_vectors(unsigned int method)
++{
++      extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
++      extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
++      void *vec_start, *vec_end;
++
++      if (system_state > SYSTEM_SCHEDULING) {
++              pr_err("CPU%u: Spectre BHB workaround too late - system 
vulnerable\n",
++                     smp_processor_id());
++              return SPECTRE_VULNERABLE;
++      }
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              vec_start = __vectors_bhb_loop8_start;
++              vec_end = __vectors_bhb_loop8_end;
++              break;
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              vec_start = __vectors_bhb_bpiall_start;
++              vec_end = __vectors_bhb_bpiall_end;
++              break;
++
++      default:
++              pr_err("CPU%u: unknown Spectre BHB state %d\n",
++                     smp_processor_id(), method);
++              return SPECTRE_VULNERABLE;
++      }
++
++      copy_from_lma(vectors_page, vec_start, vec_end);
++      flush_vectors(vectors_page, 0, vec_end - vec_start);
++
++      return SPECTRE_MITIGATED;
++}
++#endif
++
+ void __init early_trap_init(void *vectors_base)
+ {
+-#ifndef CONFIG_CPU_V7M
+-      unsigned long vectors = (unsigned long)vectors_base;
+       extern char __stubs_start[], __stubs_end[];
+       extern char __vectors_start[], __vectors_end[];
+       unsigned i;
+@@ -830,17 +880,20 @@ void __init early_trap_init(void *vectors_base)
+        * into the vector page, mapped at 0xffff0000, and ensure these
+        * are visible to the instruction stream.
+        */
+-      memcpy((void *)vectors, __vectors_start, __vectors_end - 
__vectors_start);
+-      memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - 
__stubs_start);
++      copy_from_lma(vectors_base, __vectors_start, __vectors_end);
++      copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
+ 
+       kuser_init(vectors_base);
+ 
+-      flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
++      flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
++}
+ #else /* ifndef CONFIG_CPU_V7M */
++void __init early_trap_init(void *vectors_base)
++{
+       /*
+        * on V7-M there is no need to copy the vector table to a dedicated
+        * memory area. The address is configurable and so a table in the kernel
+        * image can be used.
+        */
+-#endif
+ }
++#endif
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 423a97dd2f57c..c6bf34a33849c 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -833,6 +833,7 @@ config CPU_BPREDICT_DISABLE
+ 
+ config CPU_SPECTRE
+       bool
++      select GENERIC_CPU_VULNERABILITIES
+ 
+ config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+@@ -853,6 +854,16 @@ config HARDEN_BRANCH_PREDICTOR
+ 
+          If unsure, say Y.
+ 
++config HARDEN_BRANCH_HISTORY
++      bool "Harden Spectre style attacks against branch history" if EXPERT
++      depends on CPU_SPECTRE
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation. When
++        taking an exception, a sequence of branches overwrites the branch
++        history, or branch history is invalidated.
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+index 114c05ab4dd91..06dbfb968182d 100644
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -6,8 +6,35 @@
+ #include <asm/cp15.h>
+ #include <asm/cputype.h>
+ #include <asm/proc-fns.h>
++#include <asm/spectre.h>
+ #include <asm/system_misc.h>
+ 
++#ifdef CONFIG_ARM_PSCI
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++
++      switch ((int)res.a0) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++
++      default:
++              return SPECTRE_VULNERABLE;
++      }
++}
++#else
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+ 
+@@ -36,13 +63,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+ 
+-static void cpu_v7_spectre_init(void)
++static unsigned int spectre_v2_install_workaround(unsigned int method)
+ {
+       const char *spectre_v2_method = NULL;
+       int cpu = smp_processor_id();
+ 
+       if (per_cpu(harden_branch_predictor_fn, cpu))
+-              return;
++              return SPECTRE_MITIGATED;
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_bpiall;
++              spectre_v2_method = "BPIALL";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_iciallu;
++              spectre_v2_method = "ICIALLU";
++              break;
++
++      case SPECTRE_V2_METHOD_HVC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_hvc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
++              spectre_v2_method = "hypervisor";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_smc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_smc_switch_mm;
++              spectre_v2_method = "firmware";
++              break;
++      }
++
++      if (spectre_v2_method)
++              pr_info("CPU%u: Spectre v2: using %s workaround\n",
++                      smp_processor_id(), spectre_v2_method);
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static unsigned int spectre_v2_install_workaround(unsigned int method)
++{
++      pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
++              smp_processor_id());
++
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_v2_init(void)
++{
++      unsigned int state, method = 0;
+ 
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A8:
+@@ -51,69 +126,133 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_bpiall;
+-              spectre_v2_method = "BPIALL";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+ 
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_iciallu;
+-              spectre_v2_method = "ICIALLU";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_ICIALLU;
+               break;
+ 
+-#ifdef CONFIG_ARM_PSCI
+       case ARM_CPU_PART_BRAHMA_B53:
+               /* Requires no workaround */
++              state = SPECTRE_UNAFFECTED;
+               break;
++
+       default:
+               /* Other ARM CPUs require no workaround */
+-              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
++                      state = SPECTRE_UNAFFECTED;
+                       break;
++              }
++
+               fallthrough;
+-              /* Cortex A57/A72 require firmware workaround */
+-      case ARM_CPU_PART_CORTEX_A57:
+-      case ARM_CPU_PART_CORTEX_A72: {
+-              struct arm_smccc_res res;
+ 
+-              arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+-                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+-              if ((int)res.a0 != 0)
+-                      return;
++      /* Cortex A57/A72 require firmware workaround */
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = spectre_v2_get_cpu_fw_mitigation_state();
++              if (state != SPECTRE_MITIGATED)
++                      break;
+ 
+               switch (arm_smccc_1_1_get_conduit()) {
+               case SMCCC_CONDUIT_HVC:
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_hvc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+-                      spectre_v2_method = "hypervisor";
++                      method = SPECTRE_V2_METHOD_HVC;
+                       break;
+ 
+               case SMCCC_CONDUIT_SMC:
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_smc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+-                      spectre_v2_method = "firmware";
++                      method = SPECTRE_V2_METHOD_SMC;
+                       break;
+ 
+               default:
++                      state = SPECTRE_VULNERABLE;
+                       break;
+               }
+       }
+-#endif
++
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_v2_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++static int spectre_bhb_method;
++
++static const char *spectre_bhb_method_name(int method)
++{
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              return "loop";
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              return "BPIALL";
++
++      default:
++              return "unknown";
+       }
++}
+ 
+-      if (spectre_v2_method)
+-              pr_info("CPU%u: Spectre v2: using %s workaround\n",
+-                      smp_processor_id(), spectre_v2_method);
++static int spectre_bhb_install_workaround(int method)
++{
++      if (spectre_bhb_method != method) {
++              if (spectre_bhb_method) {
++                      pr_err("CPU%u: Spectre BHB: method disagreement, system 
vulnerable\n",
++                             smp_processor_id());
++
++                      return SPECTRE_VULNERABLE;
++              }
++
++              if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
++                      return SPECTRE_VULNERABLE;
++
++              spectre_bhb_method = method;
++      }
++
++      pr_info("CPU%u: Spectre BHB: using %s workaround\n",
++              smp_processor_id(), spectre_bhb_method_name(method));
++
++      return SPECTRE_MITIGATED;
+ }
+ #else
+-static void cpu_v7_spectre_init(void)
++static int spectre_bhb_install_workaround(int method)
+ {
++      return SPECTRE_VULNERABLE;
+ }
+ #endif
+ 
++static void cpu_v7_spectre_bhb_init(void)
++{
++      unsigned int state, method = 0;
++
++      switch (read_cpuid_part()) {
++      case ARM_CPU_PART_CORTEX_A15:
++      case ARM_CPU_PART_BRAHMA_B15:
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_LOOP8;
++              break;
++
++      case ARM_CPU_PART_CORTEX_A73:
++      case ARM_CPU_PART_CORTEX_A75:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
++              break;
++
++      default:
++              state = SPECTRE_UNAFFECTED;
++              break;
++      }
++
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_bhb_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+ {
+@@ -142,16 +281,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
+ void cpu_v7_ca8_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ 
+ void cpu_v7_ca15_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ 
+ void cpu_v7_bugs_init(void)
+ {
+-      cpu_v7_spectre_init();
++      cpu_v7_spectre_v2_init();
++      cpu_v7_spectre_bhb_init();
+ }
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 3da71fe56b922..7c7906e9dafda 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1184,6 +1184,15 @@ config UNMAP_KERNEL_AT_EL0
+ 
+         If unsure, say Y.
+ 
++config MITIGATE_SPECTRE_BRANCH_HISTORY
++      bool "Mitigate Spectre style attacks against branch history" if EXPERT
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation.
++        When taking an exception from user-space, a sequence of branches
++        or a firmware call overwrites the branch history.
++
+ config RODATA_FULL_DEFAULT_ENABLED
+       bool "Apply r/o permissions of VM areas also to their linear aliases"
+       default y
+diff --git a/arch/arm64/include/asm/assembler.h 
b/arch/arm64/include/asm/assembler.h
+index ddbe6bf00e336..011e681a23366 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -97,6 +97,13 @@
+       hint    #20
+       .endm
+ 
++/*
++ * Clear Branch History instruction
++ */
++      .macro clearbhb
++      hint    #22
++      .endm
++
+ /*
+  * Speculation barrier
+  */
+@@ -795,4 +802,30 @@ USER(\label, ic   ivau, \tmp2)                    // 
invalidate I line PoU
+ 
+ #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
+ 
++      .macro __mitigate_spectre_bhb_loop      tmp
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++alternative_cb  spectre_bhb_patch_loop_iter
++      mov     \tmp, #32               // Patched to correct the immediate
++alternative_cb_end
++.Lspectre_bhb_loop\@:
++      b       . + 4
++      subs    \tmp, \tmp, #1
++      b.ne    .Lspectre_bhb_loop\@
++      sb
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      .endm
++
++      /* Save/restores x0-x3 to the stack */
++      .macro __mitigate_spectre_bhb_fw
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      stp     x0, x1, [sp, #-16]!
++      stp     x2, x3, [sp, #-16]!
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
++alternative_cb        smccc_patch_fw_mitigation_conduit
++      nop                                     // Patched to SMC/HVC #0
++alternative_cb_end
++      ldp     x2, x3, [sp], #16
++      ldp     x0, x1, [sp], #16
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      .endm
+ #endif        /* __ASM_ASSEMBLER_H */
+diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
+index 7faae6ff3ab4d..24ed6643da266 100644
+--- a/arch/arm64/include/asm/cpu.h
++++ b/arch/arm64/include/asm/cpu.h
+@@ -25,6 +25,7 @@ struct cpuinfo_arm64 {
+       u64             reg_id_aa64dfr1;
+       u64             reg_id_aa64isar0;
+       u64             reg_id_aa64isar1;
++      u64             reg_id_aa64isar2;
+       u64             reg_id_aa64mmfr0;
+       u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64mmfr2;
+diff --git a/arch/arm64/include/asm/cpucaps.h 
b/arch/arm64/include/asm/cpucaps.h
+index e7d98997c09c3..f42fd0a2e81c8 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -66,7 +66,8 @@
+ #define ARM64_HAS_TLB_RANGE                   56
+ #define ARM64_MTE                             57
+ #define ARM64_WORKAROUND_1508412              58
++#define ARM64_SPECTRE_BHB                     59
+ 
+-#define ARM64_NCAPS                           59
++#define ARM64_NCAPS                           60
+ 
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
+index da250e4741bd7..423f9b40e4d95 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -606,6 +606,34 @@ static inline bool cpu_supports_mixed_endian_el0(void)
+       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+ }
+ 
++static inline bool supports_csv2p3(int scope)
++{
++      u64 pfr0;
++      u8 csv2_val;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
++      else
++              pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
++      csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
++                                                      ID_AA64PFR0_CSV2_SHIFT);
++      return csv2_val == 3;
++}
++
++static inline bool supports_clearbhb(int scope)
++{
++      u64 isar2;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
++      else
++              isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
++
++      return cpuid_feature_extract_unsigned_field(isar2,
++                                                  
ID_AA64ISAR2_CLEARBHB_SHIFT);
++}
++
+ static inline bool system_supports_32bit_el0(void)
+ {
+       return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+diff --git a/arch/arm64/include/asm/cputype.h 
b/arch/arm64/include/asm/cputype.h
+index ef5b040dee44d..bfbf0c4c7c5e5 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -59,6 +59,7 @@
+ #define ARM_CPU_IMP_NVIDIA            0x4E
+ #define ARM_CPU_IMP_FUJITSU           0x46
+ #define ARM_CPU_IMP_HISI              0x48
++#define ARM_CPU_IMP_APPLE             0x61
+ 
+ #define ARM_CPU_PART_AEM_V8           0xD0F
+ #define ARM_CPU_PART_FOUNDATION               0xD00
+@@ -72,6 +73,14 @@
+ #define ARM_CPU_PART_CORTEX_A76               0xD0B
+ #define ARM_CPU_PART_NEOVERSE_N1      0xD0C
+ #define ARM_CPU_PART_CORTEX_A77               0xD0D
++#define ARM_CPU_PART_NEOVERSE_V1      0xD40
++#define ARM_CPU_PART_CORTEX_A78               0xD41
++#define ARM_CPU_PART_CORTEX_X1                0xD44
++#define ARM_CPU_PART_CORTEX_A510      0xD46
++#define ARM_CPU_PART_CORTEX_A710      0xD47
++#define ARM_CPU_PART_CORTEX_X2                0xD48
++#define ARM_CPU_PART_NEOVERSE_N2      0xD49
++#define ARM_CPU_PART_CORTEX_A78C      0xD4B
+ 
+ #define APM_CPU_PART_POTENZA          0x000
+ 
+@@ -99,6 +108,9 @@
+ 
+ #define HISI_CPU_PART_TSV110          0xD01
+ 
++#define APPLE_CPU_PART_M1_ICESTORM    0x022
++#define APPLE_CPU_PART_M1_FIRESTORM   0x023
++
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A57)
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A72)
+@@ -109,6 +121,14 @@
+ #define MIDR_CORTEX_A76       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A76)
+ #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N1)
+ #define MIDR_CORTEX_A77       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A77)
++#define MIDR_NEOVERSE_V1      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_V1)
++#define MIDR_CORTEX_A78       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78)
++#define MIDR_CORTEX_X1        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_X1)
++#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A510)
++#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A710)
++#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
++#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_NEOVERSE_N2)
++#define MIDR_CORTEX_A78C      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A78C)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, 
CAVIUM_CPU_PART_THUNDERX_83XX)
+@@ -127,6 +147,8 @@
+ #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, 
NVIDIA_CPU_PART_CARMEL)
+ #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, 
FUJITSU_CPU_PART_A64FX)
+ #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, 
HISI_CPU_PART_TSV110)
++#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, 
APPLE_CPU_PART_M1_ICESTORM)
++#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, 
APPLE_CPU_PART_M1_FIRESTORM)
+ 
+ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+ #define MIDR_FUJITSU_ERRATUM_010001           MIDR_FUJITSU_A64FX
+diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
+index 4335800201c97..daff882883f92 100644
+--- a/arch/arm64/include/asm/fixmap.h
++++ b/arch/arm64/include/asm/fixmap.h
+@@ -62,9 +62,11 @@ enum fixed_addresses {
+ #endif /* CONFIG_ACPI_APEI_GHES */
+ 
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++      FIX_ENTRY_TRAMP_TEXT3,
++      FIX_ENTRY_TRAMP_TEXT2,
++      FIX_ENTRY_TRAMP_TEXT1,
+       FIX_ENTRY_TRAMP_DATA,
+-      FIX_ENTRY_TRAMP_TEXT,
+-#define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
++#define TRAMP_VALIAS          (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+       __end_of_permanent_fixed_addresses,
+ 
+diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
+index 9a5498c2c8eea..6422147ea612f 100644
+--- a/arch/arm64/include/asm/hwcap.h
++++ b/arch/arm64/include/asm/hwcap.h
+@@ -105,6 +105,9 @@
+ #define KERNEL_HWCAP_RNG              __khwcap2_feature(RNG)
+ #define KERNEL_HWCAP_BTI              __khwcap2_feature(BTI)
+ #define KERNEL_HWCAP_MTE              __khwcap2_feature(MTE)
++#define KERNEL_HWCAP_ECV              __khwcap2_feature(ECV)
++#define KERNEL_HWCAP_AFP              __khwcap2_feature(AFP)
++#define KERNEL_HWCAP_RPRES            __khwcap2_feature(RPRES)
+ 
+ /*
+  * This yields a mask that user programs can use to figure out what
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index 4b39293d0f72d..d45b42295254d 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -65,6 +65,7 @@ enum aarch64_insn_hint_cr_op {
+       AARCH64_INSN_HINT_PSB  = 0x11 << 5,
+       AARCH64_INSN_HINT_TSB  = 0x12 << 5,
+       AARCH64_INSN_HINT_CSDB = 0x14 << 5,
++      AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5,
+ 
+       AARCH64_INSN_HINT_BTI   = 0x20 << 5,
+       AARCH64_INSN_HINT_BTIC  = 0x22 << 5,
+diff --git a/arch/arm64/include/asm/kvm_asm.h 
b/arch/arm64/include/asm/kvm_asm.h
+index 044bb9e2cd74f..ada24a20a5671 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -35,6 +35,9 @@
+ #define KVM_VECTOR_PREAMBLE   (2 * AARCH64_INSN_SIZE)
+ 
+ #define __SMCCC_WORKAROUND_1_SMC_SZ 36
++#define __SMCCC_WORKAROUND_3_SMC_SZ 36
++#define __SPECTRE_BHB_LOOP_SZ       44
++#define __SPECTRE_BHB_CLEARBHB_SZ   12
+ 
+ #define KVM_HOST_SMCCC_ID(id)                                         \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+@@ -199,6 +202,11 @@ extern void __vgic_v3_init_lrs(void);
+ extern u32 __kvm_get_mdcr_el2(void);
+ 
+ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
++extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ];
++extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ];
++extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ];
++extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ];
++extern char __spectre_bhb_clearbhb[__SPECTRE_BHB_LOOP_SZ];
+ 
+ /*
+  * Obtain the PC-relative address of a kernel symbol
+diff --git a/arch/arm64/include/asm/kvm_mmu.h 
b/arch/arm64/include/asm/kvm_mmu.h
+index 331394306ccee..47dafd6ab3a30 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -237,7 +237,8 @@ static inline void *kvm_get_hyp_vector(void)
+       void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+       int slot = -1;
+ 
+-      if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
++      if ((cpus_have_const_cap(ARM64_SPECTRE_V2) ||
++           cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
+               vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
+               slot = data->hyp_vectors_slot;
+       }
+diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
+index c7315862e2435..bc151b7dc042c 100644
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -67,6 +67,12 @@ typedef void (*bp_hardening_cb_t)(void);
+ struct bp_hardening_data {
+       int                     hyp_vectors_slot;
+       bp_hardening_cb_t       fn;
++
++      /*
++       * template_start is only used by the BHB mitigation to identify the
++       * hyp_vectors_slot sequence.
++       */
++      const char *template_start;
+ };
+ 
+ DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+diff --git a/arch/arm64/include/asm/sections.h 
b/arch/arm64/include/asm/sections.h
+index 3994169985efc..6a45c26da46e3 100644
+--- a/arch/arm64/include/asm/sections.h
++++ b/arch/arm64/include/asm/sections.h
+@@ -19,4 +19,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[];
+ extern char __mmuoff_data_start[], __mmuoff_data_end[];
+ extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+ 
++static inline size_t entry_tramp_text_size(void)
++{
++      return __entry_tramp_text_end - __entry_tramp_text_start;
++}
++
+ #endif /* __ASM_SECTIONS_H */
+diff --git a/arch/arm64/include/asm/spectre.h 
b/arch/arm64/include/asm/spectre.h
+index fcdfbce302bdf..4b3a5f050f71f 100644
+--- a/arch/arm64/include/asm/spectre.h
++++ b/arch/arm64/include/asm/spectre.h
+@@ -29,4 +29,8 @@ bool has_spectre_v4(const struct arm64_cpu_capabilities 
*cap, int scope);
+ void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities 
*__unused);
+ void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
+ 
++enum mitigation_state arm64_get_spectre_bhb_state(void);
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int 
scope);
++u8 spectre_bhb_loop_affected(int scope);
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities 
*__unused);
+ #endif        /* __ASM_SPECTRE_H */
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index 801861d054268..1f2209ad2cca1 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -175,6 +175,7 @@
+ 
+ #define SYS_ID_AA64ISAR0_EL1          sys_reg(3, 0, 0, 6, 0)
+ #define SYS_ID_AA64ISAR1_EL1          sys_reg(3, 0, 0, 6, 1)
++#define SYS_ID_AA64ISAR2_EL1          sys_reg(3, 0, 0, 6, 2)
+ 
+ #define SYS_ID_AA64MMFR0_EL1          sys_reg(3, 0, 0, 7, 0)
+ #define SYS_ID_AA64MMFR1_EL1          sys_reg(3, 0, 0, 7, 1)
+@@ -687,6 +688,21 @@
+ #define ID_AA64ISAR1_GPI_NI                   0x0
+ #define ID_AA64ISAR1_GPI_IMP_DEF              0x1
+ 
++/* id_aa64isar2 */
++#define ID_AA64ISAR2_CLEARBHB_SHIFT   28
++#define ID_AA64ISAR2_RPRES_SHIFT      4
++#define ID_AA64ISAR2_WFXT_SHIFT               0
++
++#define ID_AA64ISAR2_RPRES_8BIT               0x0
++#define ID_AA64ISAR2_RPRES_12BIT      0x1
++/*
++ * Value 0x1 has been removed from the architecture, and is
++ * reserved, but has not yet been removed from the ARM ARM
++ * as of ARM DDI 0487G.b.
++ */
++#define ID_AA64ISAR2_WFXT_NI          0x0
++#define ID_AA64ISAR2_WFXT_SUPPORTED   0x2
++
+ /* id_aa64pfr0 */
+ #define ID_AA64PFR0_CSV3_SHIFT                60
+ #define ID_AA64PFR0_CSV2_SHIFT                56
+@@ -786,6 +802,8 @@
+ #endif
+ 
+ /* id_aa64mmfr1 */
++#define ID_AA64MMFR1_ECBHB_SHIFT      60
++#define ID_AA64MMFR1_AFP_SHIFT                44
+ #define ID_AA64MMFR1_ETS_SHIFT                36
+ #define ID_AA64MMFR1_TWED_SHIFT               32
+ #define ID_AA64MMFR1_XNX_SHIFT                28
+diff --git a/arch/arm64/include/asm/vectors.h 
b/arch/arm64/include/asm/vectors.h
+new file mode 100644
+index 0000000000000..f64613a96d530
+--- /dev/null
++++ b/arch/arm64/include/asm/vectors.h
+@@ -0,0 +1,73 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (C) 2022 ARM Ltd.
++ */
++#ifndef __ASM_VECTORS_H
++#define __ASM_VECTORS_H
++
++#include <linux/bug.h>
++#include <linux/percpu.h>
++
++#include <asm/fixmap.h>
++
++extern char vectors[];
++extern char tramp_vectors[];
++extern char __bp_harden_el1_vectors[];
++
++/*
++ * Note: the order of this enum corresponds to two arrays in entry.S:
++ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
++ * 'full fat' vectors are used directly.
++ */
++enum arm64_bp_harden_el1_vectors {
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      /*
++       * Perform the BHB loop mitigation, before branching to the canonical
++       * vectors.
++       */
++      EL1_VECTOR_BHB_LOOP,
++
++      /*
++       * Make the SMC call for firmware mitigation, before branching to the
++       * canonical vectors.
++       */
++      EL1_VECTOR_BHB_FW,
++
++      /*
++       * Use the ClearBHB instruction, before branching to the canonical
++       * vectors.
++       */
++      EL1_VECTOR_BHB_CLEAR_INSN,
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
++      /*
++       * Remap the kernel before branching to the canonical vectors.
++       */
++      EL1_VECTOR_KPTI,
++};
++
++#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++#define EL1_VECTOR_BHB_LOOP           -1
++#define EL1_VECTOR_BHB_FW             -1
++#define EL1_VECTOR_BHB_CLEAR_INSN     -1
++#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++
++/* The vectors to use on return from EL0. e.g. to remap the kernel */
++DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
++
++#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
++#define TRAMP_VALIAS  0
++#endif
++
++static inline const char *
++arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot)
++{
++      if (arm64_kernel_unmapped_at_el0())
++              return (char *)TRAMP_VALIAS + SZ_2K * slot;
++
++      WARN_ON_ONCE(slot == EL1_VECTOR_KPTI);
++
++      return __bp_harden_el1_vectors + SZ_2K * slot;
++}
++
++#endif /* __ASM_VECTORS_H */
+diff --git a/arch/arm64/include/uapi/asm/hwcap.h 
b/arch/arm64/include/uapi/asm/hwcap.h
+index b8f41aa234ee1..f03731847d9df 100644
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -75,5 +75,8 @@
+ #define HWCAP2_RNG            (1 << 16)
+ #define HWCAP2_BTI            (1 << 17)
+ #define HWCAP2_MTE            (1 << 18)
++#define HWCAP2_ECV            (1 << 19)
++#define HWCAP2_AFP            (1 << 20)
++#define HWCAP2_RPRES          (1 << 21)
+ 
+ #endif /* _UAPI__ASM_HWCAP_H */
+diff --git a/arch/arm64/include/uapi/asm/kvm.h 
b/arch/arm64/include/uapi/asm/kvm.h
+index 1c17c3a24411d..531ff62e82e95 100644
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -273,6 +273,11 @@ struct kvm_vcpu_events {
+ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED      3
+ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED           (1U << 4)
+ 
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3   KVM_REG_ARM_FW_REG(3)
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL         0
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL             1
++#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED      2
++
+ /* SVE registers */
+ #define KVM_REG_ARM64_SVE             (0x15 << KVM_REG_ARM_COPROC_SHIFT)
+ 
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index cafaf0da05b7c..533559c7d2b31 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -473,6 +473,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+               .matches = has_spectre_v4,
+               .cpu_enable = spectre_v4_enable_mitigation,
+       },
++      {
++              .desc = "Spectre-BHB",
++              .capability = ARM64_SPECTRE_BHB,
++              .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++              .matches = is_spectre_bhb_affected,
++              .cpu_enable = spectre_bhb_enable_mitigation,
++      },
+ #ifdef CONFIG_ARM64_ERRATUM_1418040
+       {
+               .desc = "ARM erratum 1418040",
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 5001c43ea6c33..c9108ed406458 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -65,11 +65,13 @@
+ #include <linux/bsearch.h>
+ #include <linux/cpumask.h>
+ #include <linux/crash_dump.h>
++#include <linux/percpu.h>
+ #include <linux/sort.h>
+ #include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <linux/mm.h>
+ #include <linux/cpu.h>
++
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+ #include <asm/cpu_ops.h>
+@@ -79,6 +81,7 @@
+ #include <asm/processor.h>
+ #include <asm/sysreg.h>
+ #include <asm/traps.h>
++#include <asm/vectors.h>
+ #include <asm/virt.h>
+ 
+ /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+@@ -104,6 +107,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
+ bool arm64_use_ng_mappings = false;
+ EXPORT_SYMBOL(arm64_use_ng_mappings);
+ 
++DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
++
+ /*
+  * Flag to indicate if we have computed the system wide
+  * capabilities based on the boot time active CPUs. This
+@@ -205,6 +210,12 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+       ARM64_FTR_END,
+ };
+ 
++static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
++      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, 
ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, 
ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
++      ARM64_FTR_END,
++};
++
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 
ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 
ID_AA64PFR0_CSV2_SHIFT, 4, 0),
+@@ -259,7 +270,7 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ };
+ 
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+-      ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR0_ECV_SHIFT, 4, 0),
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR0_ECV_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR0_FGT_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR0_EXS_SHIFT, 4, 0),
+       /*
+@@ -305,6 +316,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+ };
+ 
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
++      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR1_AFP_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR1_ETS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR1_TWED_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 
ID_AA64MMFR1_XNX_SHIFT, 4, 0),
+@@ -596,6 +608,7 @@ static const struct __ftr_reg_entry {
+       /* Op1 = 0, CRn = 0, CRm = 6 */
+       ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
+       ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
++      ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
+ 
+       /* Op1 = 0, CRn = 0, CRm = 7 */
+       ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+@@ -830,6 +843,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
+       init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
++      init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
+@@ -1058,6 +1072,8 @@ void update_cpu_features(int cpu,
+                                     info->reg_id_aa64isar0, 
boot->reg_id_aa64isar0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
+                                     info->reg_id_aa64isar1, 
boot->reg_id_aa64isar1);
++      taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
++                                    info->reg_id_aa64isar2, 
boot->reg_id_aa64isar2);
+ 
+       /*
+        * Differing PARange support is fine as long as all peripherals and
+@@ -1157,6 +1173,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
+       read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
+       read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
+       read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
++      read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
+ 
+       read_sysreg_case(SYS_CNTFRQ_EL0);
+       read_sysreg_case(SYS_CTR_EL0);
+@@ -1402,6 +1419,12 @@ kpti_install_ng_mappings(const struct 
arm64_cpu_capabilities *__unused)
+ 
+       int cpu = smp_processor_id();
+ 
++      if (__this_cpu_read(this_cpu_vector) == vectors) {
++              const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
++
++              __this_cpu_write(this_cpu_vector, v);
++      }
++
+       /*
+        * We don't need to rewrite the page-tables if either we've done
+        * it already or we have KASLR enabled and therefore have not
+@@ -2252,6 +2275,9 @@ static const struct arm64_cpu_capabilities 
arm64_elf_hwcaps[] = {
+ #ifdef CONFIG_ARM64_MTE
+       HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, 
ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
+ #endif /* CONFIG_ARM64_MTE */
++      HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 
1, CAP_HWCAP, KERNEL_HWCAP_ECV),
++      HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 
1, CAP_HWCAP, KERNEL_HWCAP_AFP),
++      HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 
1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+       {},
+ };
+ 
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index 77605aec25fec..4c0e72781f31b 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -94,6 +94,9 @@ static const char *const hwcap_str[] = {
+       [KERNEL_HWCAP_RNG]              = "rng",
+       [KERNEL_HWCAP_BTI]              = "bti",
+       [KERNEL_HWCAP_MTE]              = "mte",
++      [KERNEL_HWCAP_ECV]              = "ecv",
++      [KERNEL_HWCAP_AFP]              = "afp",
++      [KERNEL_HWCAP_RPRES]            = "rpres",
+ };
+ 
+ #ifdef CONFIG_COMPAT
+@@ -364,6 +367,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+       info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
+       info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+       info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
++      info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1);
+       info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+       info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+       info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index fe83d6d67ec3d..d5bc1dbdd2fda 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -62,18 +62,21 @@
+ 
+       .macro kernel_ventry, el, label, regsize = 64
+       .align 7
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++.Lventry_start\@:
+       .if     \el == 0
+-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
++      /*
++       * This must be the first instruction of the EL0 vector entries. It is
++       * skipped by the trampoline vectors, to trigger the cleanup.
++       */
++      b       .Lskip_tramp_vectors_cleanup\@
+       .if     \regsize == 64
+       mrs     x30, tpidrro_el0
+       msr     tpidrro_el0, xzr
+       .else
+       mov     x30, xzr
+       .endif
+-alternative_else_nop_endif
++.Lskip_tramp_vectors_cleanup\@:
+       .endif
+-#endif
+ 
+       sub     sp, sp, #S_FRAME_SIZE
+ #ifdef CONFIG_VMAP_STACK
+@@ -120,11 +123,15 @@ alternative_else_nop_endif
+       mrs     x0, tpidrro_el0
+ #endif
+       b       el\()\el\()_\label
++.org .Lventry_start\@ + 128   // Did we overflow the ventry slot?
+       .endm
+ 
+-      .macro tramp_alias, dst, sym
++      .macro tramp_alias, dst, sym, tmp
+       mov_q   \dst, TRAMP_VALIAS
+-      add     \dst, \dst, #(\sym - .entry.tramp.text)
++      adr_l   \tmp, \sym
++      add     \dst, \dst, \tmp
++      adr_l   \tmp, .entry.tramp.text
++      sub     \dst, \dst, \tmp
+       .endm
+ 
+       /*
+@@ -141,7 +148,7 @@ alternative_cb_end
+       tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+       mov     w1, #\state
+-alternative_cb        spectre_v4_patch_fw_mitigation_conduit
++alternative_cb        smccc_patch_fw_mitigation_conduit
+       nop                                     // Patched to SMC/HVC #0
+ alternative_cb_end
+ .L__asm_ssbd_skip\@:
+@@ -351,21 +358,26 @@ alternative_else_nop_endif
+       ldp     x24, x25, [sp, #16 * 12]
+       ldp     x26, x27, [sp, #16 * 13]
+       ldp     x28, x29, [sp, #16 * 14]
+-      ldr     lr, [sp, #S_LR]
+-      add     sp, sp, #S_FRAME_SIZE           // restore sp
+ 
+       .if     \el == 0
+-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
++alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
++      ldr     lr, [sp, #S_LR]
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
++      eret
++alternative_else_nop_endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       bne     4f
+-      msr     far_el1, x30
+-      tramp_alias     x30, tramp_exit_native
++      msr     far_el1, x29
++      tramp_alias     x30, tramp_exit_native, x29
+       br      x30
+ 4:
+-      tramp_alias     x30, tramp_exit_compat
++      tramp_alias     x30, tramp_exit_compat, x29
+       br      x30
+ #endif
+       .else
++      ldr     lr, [sp, #S_LR]
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
++
+       /* Ensure any device/NC reads complete */
+       alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+ 
+@@ -764,12 +776,6 @@ SYM_CODE_END(ret_to_user)
+ 
+       .popsection                             // .entry.text
+ 
+-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-/*
+- * Exception vectors trampoline.
+- */
+-      .pushsection ".entry.tramp.text", "ax"
+-
+       // Move from tramp_pg_dir to swapper_pg_dir
+       .macro tramp_map_kernel, tmp
+       mrs     \tmp, ttbr1_el1
+@@ -803,12 +809,47 @@ alternative_else_nop_endif
+        */
+       .endm
+ 
+-      .macro tramp_ventry, regsize = 64
++      .macro tramp_data_page  dst
++      adr_l   \dst, .entry.tramp.text
++      sub     \dst, \dst, PAGE_SIZE
++      .endm
++
++      .macro tramp_data_read_var      dst, var
++#ifdef CONFIG_RANDOMIZE_BASE
++      tramp_data_page         \dst
++      add     \dst, \dst, #:lo12:__entry_tramp_data_\var
++      ldr     \dst, [\dst]
++#else
++      ldr     \dst, =\var
++#endif
++      .endm
++
++#define BHB_MITIGATION_NONE   0
++#define BHB_MITIGATION_LOOP   1
++#define BHB_MITIGATION_FW     2
++#define BHB_MITIGATION_INSN   3
++
++      .macro tramp_ventry, vector_start, regsize, kpti, bhb
+       .align  7
+ 1:
+       .if     \regsize == 64
+       msr     tpidrro_el0, x30        // Restored in kernel_ventry
+       .endif
++
++      .if     \bhb == BHB_MITIGATION_LOOP
++      /*
++       * This sequence must appear before the first indirect branch. i.e. the
++       * ret out of tramp_ventry. It appears here because x30 is free.
++       */
++      __mitigate_spectre_bhb_loop     x30
++      .endif // \bhb == BHB_MITIGATION_LOOP
++
++      .if     \bhb == BHB_MITIGATION_INSN
++      clearbhb
++      isb
++      .endif // \bhb == BHB_MITIGATION_INSN
++
++      .if     \kpti == 1
+       /*
+        * Defend against branch aliasing attacks by pushing a dummy
+        * entry onto the return stack and using a RET instruction to
+@@ -818,46 +859,75 @@ alternative_else_nop_endif
+       b       .
+ 2:
+       tramp_map_kernel        x30
+-#ifdef CONFIG_RANDOMIZE_BASE
+-      adr     x30, tramp_vectors + PAGE_SIZE
+ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+-      ldr     x30, [x30]
+-#else
+-      ldr     x30, =vectors
+-#endif
++      tramp_data_read_var     x30, vectors
+ alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
+-      prfm    plil1strm, [x30, #(1b - tramp_vectors)]
++      prfm    plil1strm, [x30, #(1b - \vector_start)]
+ alternative_else_nop_endif
++
+       msr     vbar_el1, x30
+-      add     x30, x30, #(1b - tramp_vectors)
+       isb
++      .else
++      ldr     x30, =vectors
++      .endif // \kpti == 1
++
++      .if     \bhb == BHB_MITIGATION_FW
++      /*
++       * The firmware sequence must appear before the first indirect branch.
++       * i.e. the ret out of tramp_ventry. But it also needs the stack to be
++       * mapped to save/restore the registers the SMC clobbers.
++       */
++      __mitigate_spectre_bhb_fw
++      .endif // \bhb == BHB_MITIGATION_FW
++
++      add     x30, x30, #(1b - \vector_start + 4)
+       ret
++.org 1b + 128 // Did we overflow the ventry slot?
+       .endm
+ 
+       .macro tramp_exit, regsize = 64
+-      adr     x30, tramp_vectors
++      tramp_data_read_var     x30, this_cpu_vector
++      this_cpu_offset x29
++      ldr     x30, [x30, x29]
++
+       msr     vbar_el1, x30
+-      tramp_unmap_kernel      x30
++      ldr     lr, [sp, #S_LR]
++      tramp_unmap_kernel      x29
+       .if     \regsize == 64
+-      mrs     x30, far_el1
++      mrs     x29, far_el1
+       .endif
++      add     sp, sp, #S_FRAME_SIZE           // restore sp
+       eret
+       sb
+       .endm
+ 
+-      .align  11
+-SYM_CODE_START_NOALIGN(tramp_vectors)
++      .macro  generate_tramp_vector,  kpti, bhb
++.Lvector_start\@:
+       .space  0x400
+ 
+-      tramp_ventry
+-      tramp_ventry
+-      tramp_ventry
+-      tramp_ventry
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 64, \kpti, \bhb
++      .endr
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 32, \kpti, \bhb
++      .endr
++      .endm
+ 
+-      tramp_ventry    32
+-      tramp_ventry    32
+-      tramp_ventry    32
+-      tramp_ventry    32
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
++/*
++ * Exception vectors trampoline.
++ * The order must match __bp_harden_el1_vectors and the
++ * arm64_bp_harden_el1_vectors enum.
++ */
++      .pushsection ".entry.tramp.text", "ax"
++      .align  11
++SYM_CODE_START_NOALIGN(tramp_vectors)
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_LOOP
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_FW
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_INSN
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++      generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_NONE
+ SYM_CODE_END(tramp_vectors)
+ 
+ SYM_CODE_START(tramp_exit_native)
+@@ -874,12 +944,56 @@ SYM_CODE_END(tramp_exit_compat)
+       .pushsection ".rodata", "a"
+       .align PAGE_SHIFT
+ SYM_DATA_START(__entry_tramp_data_start)
++__entry_tramp_data_vectors:
+       .quad   vectors
++#ifdef CONFIG_ARM_SDE_INTERFACE
++__entry_tramp_data___sdei_asm_handler:
++      .quad   __sdei_asm_handler
++#endif /* CONFIG_ARM_SDE_INTERFACE */
++__entry_tramp_data_this_cpu_vector:
++      .quad   this_cpu_vector
+ SYM_DATA_END(__entry_tramp_data_start)
+       .popsection                             // .rodata
+ #endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ 
++/*
++ * Exception vectors for spectre mitigations on entry from EL1 when
++ * kpti is not in use.
++ */
++      .macro generate_el1_vector, bhb
++.Lvector_start\@:
++      kernel_ventry   1, sync_invalid                 // Synchronous EL1t
++      kernel_ventry   1, irq_invalid                  // IRQ EL1t
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1t
++      kernel_ventry   1, error_invalid                // Error EL1t
++
++      kernel_ventry   1, sync                         // Synchronous EL1h
++      kernel_ventry   1, irq                          // IRQ EL1h
++      kernel_ventry   1, fiq_invalid                  // FIQ EL1h
++      kernel_ventry   1, error                        // Error EL1h
++
++      .rept   4
++      tramp_ventry    .Lvector_start\@, 64, 0, \bhb
++      .endr
++      .rept 4
++      tramp_ventry    .Lvector_start\@, 32, 0, \bhb
++      .endr
++      .endm
++
++/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. 
*/
++      .pushsection ".entry.text", "ax"
++      .align  11
++SYM_CODE_START(__bp_harden_el1_vectors)
++#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
++      generate_el1_vector     bhb=BHB_MITIGATION_LOOP
++      generate_el1_vector     bhb=BHB_MITIGATION_FW
++      generate_el1_vector     bhb=BHB_MITIGATION_INSN
++#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
++SYM_CODE_END(__bp_harden_el1_vectors)
++      .popsection
++
++
+ /*
+  * Register switch for AArch64. The callee-saved registers need to be saved
+  * and restored. On entry:
+@@ -969,13 +1083,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
+        */
+ 1:    str     x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ 
+-#ifdef CONFIG_RANDOMIZE_BASE
+-      adr     x4, tramp_vectors + PAGE_SIZE
+-      add     x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+-      ldr     x4, [x4]
+-#else
+-      ldr     x4, =__sdei_asm_handler
+-#endif
++      tramp_data_read_var     x4, __sdei_asm_handler
+       br      x4
+ SYM_CODE_END(__sdei_asm_entry_trampoline)
+ NOKPROBE(__sdei_asm_entry_trampoline)
+@@ -998,13 +1106,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline)
+ NOKPROBE(__sdei_asm_exit_trampoline)
+       .ltorg
+ .popsection           // .entry.tramp.text
+-#ifdef CONFIG_RANDOMIZE_BASE
+-.pushsection ".rodata", "a"
+-SYM_DATA_START(__sdei_asm_trampoline_next_handler)
+-      .quad   __sdei_asm_handler
+-SYM_DATA_END(__sdei_asm_trampoline_next_handler)
+-.popsection           // .rodata
+-#endif /* CONFIG_RANDOMIZE_BASE */
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ 
+ /*
+@@ -1112,7 +1213,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ alternative_else_nop_endif
+ 
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-      tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline
++      tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
+       br      x5
+ #endif
+ SYM_CODE_END(__sdei_asm_handler)
+diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
+index f6e4e3737405d..3dd489b62b29f 100644
+--- a/arch/arm64/kernel/proton-pack.c
++++ b/arch/arm64/kernel/proton-pack.c
+@@ -18,14 +18,18 @@
+  */
+ 
+ #include <linux/arm-smccc.h>
++#include <linux/bpf.h>
+ #include <linux/cpu.h>
+ #include <linux/device.h>
+ #include <linux/nospec.h>
+ #include <linux/prctl.h>
+ #include <linux/sched/task_stack.h>
+ 
++#include <asm/insn.h>
+ #include <asm/spectre.h>
+ #include <asm/traps.h>
++#include <asm/vectors.h>
++#include <asm/virt.h>
+ 
+ /*
+  * We try to ensure that the mitigation state can never change as the result 
of
+@@ -94,14 +98,51 @@ static bool spectre_v2_mitigations_off(void)
+       return ret;
+ }
+ 
++static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
++{
++      switch (bhb_state) {
++      case SPECTRE_UNAFFECTED:
++              return "";
++      default:
++      case SPECTRE_VULNERABLE:
++              return ", but not BHB";
++      case SPECTRE_MITIGATED:
++              return ", BHB";
++      }
++}
++
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++      return !sysctl_unprivileged_bpf_disabled;
++#else
++      return false;
++#endif
++}
++
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+                           char *buf)
+ {
++      enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
++      const char *bhb_str = get_bhb_affected_string(bhb_state);
++      const char *v2_str = "Branch predictor hardening";
++
+       switch (spectre_v2_state) {
+       case SPECTRE_UNAFFECTED:
+-              return sprintf(buf, "Not affected\n");
++              if (bhb_state == SPECTRE_UNAFFECTED)
++                      return sprintf(buf, "Not affected\n");
++
++              /*
++               * Platforms affected by Spectre-BHB can't report
++               * "Not affected" for Spectre-v2.
++               */
++              v2_str = "CSV2";
++              fallthrough;
+       case SPECTRE_MITIGATED:
+-              return sprintf(buf, "Mitigation: Branch predictor hardening\n");
++              if (bhb_state == SPECTRE_MITIGATED && 
_unprivileged_ebpf_enabled())
++                      return sprintf(buf, "Vulnerable: Unprivileged eBPF 
enabled\n");
++
++              return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+@@ -195,9 +236,9 @@ static void __copy_hyp_vect_bpi(int slot, const char 
*hyp_vecs_start,
+       __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+ }
+ 
++static DEFINE_RAW_SPINLOCK(bp_lock);
+ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+ {
+-      static DEFINE_RAW_SPINLOCK(bp_lock);
+       int cpu, slot = -1;
+       const char *hyp_vecs_start = __smccc_workaround_1_smc;
+       const char *hyp_vecs_end = __smccc_workaround_1_smc +
+@@ -228,6 +269,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
+ 
+       __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+       __this_cpu_write(bp_hardening_data.fn, fn);
++      __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
+       raw_spin_unlock(&bp_lock);
+ }
+ #else
+@@ -571,9 +613,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct 
alt_instr *alt,
+  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+  * to call into firmware to adjust the mitigation state.
+  */
+-void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+-                                                 __le32 *origptr,
+-                                                 __le32 *updptr, int nr_inst)
++void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
++                                             __le32 *origptr,
++                                             __le32 *updptr, int nr_inst)
+ {
+       u32 insn;
+ 
+@@ -787,3 +829,308 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, 
unsigned long which)
+               return -ENODEV;
+       }
+ }
++
++/*
++ * Spectre BHB.
++ *
++ * A CPU is either:
++ * - Mitigated by a branchy loop a CPU specific number of times, and listed
++ *   in our "loop mitigated list".
++ * - Mitigated in software by the firmware Spectre v2 call.
++ * - Has the ClearBHB instruction to perform the mitigation.
++ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
++ *   software mitigation in the vectors is needed.
++ * - Has CSV2.3, so is unaffected.
++ */
++static enum mitigation_state spectre_bhb_state;
++
++enum mitigation_state arm64_get_spectre_bhb_state(void)
++{
++      return spectre_bhb_state;
++}
++
++/*
++ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
++ * SCOPE_SYSTEM call will give the right answer.
++ */
++u8 spectre_bhb_loop_affected(int scope)
++{
++      u8 k = 0;
++      static u8 max_bhb_k;
++
++      if (scope == SCOPE_LOCAL_CPU) {
++              static const struct midr_range spectre_bhb_k32_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
++                      {},
++              };
++              static const struct midr_range spectre_bhb_k24_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
++                      MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
++                      {},
++              };
++              static const struct midr_range spectre_bhb_k8_list[] = {
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++                      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++                      {},
++              };
++
++              if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k32_list))
++                      k = 32;
++              else if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k24_list))
++                      k = 24;
++              else if (is_midr_in_range_list(read_cpuid_id(), 
spectre_bhb_k8_list))
++                      k =  8;
++
++              max_bhb_k = max(max_bhb_k, k);
++      } else {
++              k = max_bhb_k;
++      }
++
++      return k;
++}
++
++static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
++{
++      int ret;
++      struct arm_smccc_res res;
++
++      arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                           ARM_SMCCC_ARCH_WORKAROUND_3, &res);
++
++      ret = res.a0;
++      switch (ret) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++      default:
++              fallthrough;
++      case SMCCC_RET_NOT_SUPPORTED:
++              return SPECTRE_VULNERABLE;
++      }
++}
++
++static bool is_spectre_bhb_fw_affected(int scope)
++{
++      static bool system_affected;
++      enum mitigation_state fw_state;
++      bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
++      static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
++              MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
++              {},
++      };
++      bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
++                                       spectre_bhb_firmware_mitigated_list);
++
++      if (scope != SCOPE_LOCAL_CPU)
++              return system_affected;
++
++      fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++      if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
++              system_affected = true;
++              return true;
++      }
++
++      return false;
++}
++
++static bool supports_ecbhb(int scope)
++{
++      u64 mmfr1;
++
++      if (scope == SCOPE_LOCAL_CPU)
++              mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
++      else
++              mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
++
++      return cpuid_feature_extract_unsigned_field(mmfr1,
++                                                  ID_AA64MMFR1_ECBHB_SHIFT);
++}
++
++bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
++                           int scope)
++{
++      WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++
++      if (supports_csv2p3(scope))
++              return false;
++
++      if (supports_clearbhb(scope))
++              return true;
++
++      if (spectre_bhb_loop_affected(scope))
++              return true;
++
++      if (is_spectre_bhb_fw_affected(scope))
++              return true;
++
++      return false;
++}
++
++static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
++{
++      const char *v = arm64_get_bp_hardening_vector(slot);
++
++      if (slot < 0)
++              return;
++
++      __this_cpu_write(this_cpu_vector, v);
++
++      /*
++       * When KPTI is in use, the vectors are switched when exiting to
++       * user-space.
++       */
++      if (arm64_kernel_unmapped_at_el0())
++              return;
++
++      write_sysreg(v, vbar_el1);
++      isb();
++}
++
++#ifdef CONFIG_KVM
++static int kvm_bhb_get_vecs_size(const char *start)
++{
++      if (start == __smccc_workaround_3_smc)
++              return __SMCCC_WORKAROUND_3_SMC_SZ;
++      else if (start == __spectre_bhb_loop_k8 ||
++               start == __spectre_bhb_loop_k24 ||
++               start == __spectre_bhb_loop_k32)
++              return __SPECTRE_BHB_LOOP_SZ;
++      else if (start == __spectre_bhb_clearbhb)
++              return __SPECTRE_BHB_CLEARBHB_SZ;
++
++      return 0;
++}
++
++static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
++{
++      int cpu, slot = -1, size;
++      const char *hyp_vecs_end;
++
++      if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
++              return;
++
++      size = kvm_bhb_get_vecs_size(hyp_vecs_start);
++      if (WARN_ON_ONCE(!hyp_vecs_start || !size))
++              return;
++      hyp_vecs_end = hyp_vecs_start + size;
++
++      raw_spin_lock(&bp_lock);
++      for_each_possible_cpu(cpu) {
++              if (per_cpu(bp_hardening_data.template_start, cpu) == 
hyp_vecs_start) {
++                      slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
++                      break;
++              }
++      }
++
++      if (slot == -1) {
++              slot = atomic_inc_return(&arm64_el2_vector_last_slot);
++              BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
++              __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
++      }
++
++      __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
++      __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
++      raw_spin_unlock(&bp_lock);
++}
++#else
++#define __smccc_workaround_3_smc NULL
++#define __spectre_bhb_loop_k8 NULL
++#define __spectre_bhb_loop_k24 NULL
++#define __spectre_bhb_loop_k32 NULL
++#define __spectre_bhb_clearbhb NULL
++
++static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
++#endif /* CONFIG_KVM */
++
++void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
++{
++      enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
++
++      if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
++              return;
++
++      if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
++              /* No point mitigating Spectre-BHB alone. */
++      } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
++              pr_info_once("spectre-bhb mitigation disabled by compile time 
option\n");
++      } else if (cpu_mitigations_off()) {
++              pr_info_once("spectre-bhb mitigation disabled by command line 
option\n");
++      } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
++              state = SPECTRE_MITIGATED;
++      } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
++              kvm_setup_bhb_slot(__spectre_bhb_clearbhb);
++              this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
++
++              state = SPECTRE_MITIGATED;
++      } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
++              switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
++              case 8:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k8);
++                      break;
++              case 24:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k24);
++                      break;
++              case 32:
++                      kvm_setup_bhb_slot(__spectre_bhb_loop_k32);
++                      break;
++              default:
++                      WARN_ON_ONCE(1);
++              }
++              this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
++
++              state = SPECTRE_MITIGATED;
++      } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
++              fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
++              if (fw_state == SPECTRE_MITIGATED) {
++                      kvm_setup_bhb_slot(__smccc_workaround_3_smc);
++                      this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
++
++                      state = SPECTRE_MITIGATED;
++              }
++      }
++
++      update_mitigation_state(&spectre_bhb_state, state);
++}
++
++/* Patched to correct the immediate */
++void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
++                                 __le32 *origptr, __le32 *updptr, int nr_inst)
++{
++      u8 rd;
++      u32 insn;
++      u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
++
++      BUG_ON(nr_inst != 1); /* MOV -> MOV */
++
++      if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
++              return;
++
++      insn = le32_to_cpu(*origptr);
++      rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
++      insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
++                                       AARCH64_INSN_VARIANT_64BIT,
++                                       AARCH64_INSN_MOVEWIDE_ZERO);
++      *updptr++ = cpu_to_le32(insn);
++}
++
++#ifdef CONFIG_BPF_SYSCALL
++#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via 
Spectre v2 BHB attacks!\n"
++void unpriv_ebpf_notify(int new_state)
++{
++      if (spectre_v2_state == SPECTRE_VULNERABLE ||
++          spectre_bhb_state != SPECTRE_MITIGATED)
++              return;
++
++      if (!new_state)
++              pr_err("WARNING: %s", EBPF_WARN);
++}
++#endif
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index 30c1029789427..71f4b5f24d15f 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -299,7 +299,7 @@ ASSERT(__hibernate_exit_text_end - 
(__hibernate_exit_text_start & ~(SZ_4K - 1))
+       <= SZ_4K, "Hibernate exit text too big or misaligned")
+ #endif
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+-ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
+       "Entry trampoline text too big")
+ #endif
+ /*
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 5bc978be80434..4d63fcd7574b2 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1337,7 +1337,8 @@ static int kvm_map_vectors(void)
+        * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
+        *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
+        */
+-      if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
++      if (cpus_have_const_cap(ARM64_SPECTRE_V2) ||
++          cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index bcbead3746c66..bc06243cf4225 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -61,6 +61,10 @@ el1_sync:                           // Guest trapped into 
EL2
+       /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+       eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+                         ARM_SMCCC_ARCH_WORKAROUND_2)
++      cbz     w1, wa_epilogue
++
++      eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
++                        ARM_SMCCC_ARCH_WORKAROUND_3)
+       cbnz    w1, el1_trap
+ 
+ wa_epilogue:
+diff --git a/arch/arm64/kvm/hyp/smccc_wa.S b/arch/arm64/kvm/hyp/smccc_wa.S
+index b0441dbdf68bd..24b281912463d 100644
+--- a/arch/arm64/kvm/hyp/smccc_wa.S
++++ b/arch/arm64/kvm/hyp/smccc_wa.S
+@@ -30,3 +30,78 @@ SYM_DATA_START(__smccc_workaround_1_smc)
+ 1:    .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
+       .org 1b
+ SYM_DATA_END(__smccc_workaround_1_smc)
++
++      .global         __smccc_workaround_3_smc
++SYM_DATA_START(__smccc_workaround_3_smc)
++      esb
++      sub     sp, sp, #(8 * 4)
++      stp     x2, x3, [sp, #(8 * 0)]
++      stp     x0, x1, [sp, #(8 * 2)]
++      mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
++      smc     #0
++      ldp     x2, x3, [sp, #(8 * 0)]
++      ldp     x0, x1, [sp, #(8 * 2)]
++      add     sp, sp, #(8 * 4)
++1:    .org __smccc_workaround_3_smc + __SMCCC_WORKAROUND_3_SMC_SZ
++      .org 1b
++SYM_DATA_END(__smccc_workaround_3_smc)
++
++      .global __spectre_bhb_loop_k8
++SYM_DATA_START(__spectre_bhb_loop_k8)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #8
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++1:    .org __spectre_bhb_loop_k8 + __SPECTRE_BHB_LOOP_SZ
++      .org 1b
++SYM_DATA_END(__spectre_bhb_loop_k8)
++
++      .global __spectre_bhb_loop_k24
++SYM_DATA_START(__spectre_bhb_loop_k24)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #8
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++1:    .org __spectre_bhb_loop_k24 + __SPECTRE_BHB_LOOP_SZ
++      .org 1b
++SYM_DATA_END(__spectre_bhb_loop_k24)
++
++      .global __spectre_bhb_loop_k32
++SYM_DATA_START(__spectre_bhb_loop_k32)
++      esb
++      sub     sp, sp, #(8 * 2)
++      stp     x0, x1, [sp, #(8 * 0)]
++      mov     x0, #8
++2:    b       . + 4
++      subs    x0, x0, #1
++      b.ne    2b
++      dsb     nsh
++      isb
++      ldp     x0, x1, [sp, #(8 * 0)]
++      add     sp, sp, #(8 * 2)
++1:    .org __spectre_bhb_loop_k32 + __SPECTRE_BHB_LOOP_SZ
++      .org 1b
++SYM_DATA_END(__spectre_bhb_loop_k32)
++
++      .global __spectre_bhb_clearbhb
++SYM_DATA_START(__spectre_bhb_clearbhb)
++      esb
++      clearbhb
++      isb
++1:    .org __spectre_bhb_clearbhb + __SPECTRE_BHB_CLEARBHB_SZ
++      .org 1b
++SYM_DATA_END(__spectre_bhb_clearbhb)
+diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
+index 62546e20b2511..532e687f69366 100644
+--- a/arch/arm64/kvm/hyp/vhe/switch.c
++++ b/arch/arm64/kvm/hyp/vhe/switch.c
+@@ -10,6 +10,7 @@
+ #include <linux/kvm_host.h>
+ #include <linux/types.h>
+ #include <linux/jump_label.h>
++#include <linux/percpu.h>
+ #include <uapi/linux/psci.h>
+ 
+ #include <kvm/arm_psci.h>
+@@ -25,6 +26,7 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/processor.h>
+ #include <asm/thread_info.h>
++#include <asm/vectors.h>
+ 
+ const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx 
ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+ 
+@@ -70,7 +72,7 @@ NOKPROBE_SYMBOL(__activate_traps);
+ 
+ static void __deactivate_traps(struct kvm_vcpu *vcpu)
+ {
+-      extern char vectors[];  /* kernel exception vectors */
++      const char *host_vectors = vectors;
+ 
+       ___deactivate_traps(vcpu);
+ 
+@@ -84,7 +86,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
+       asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+ 
+       write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
+-      write_sysreg(vectors, vbar_el1);
++
++      if (!arm64_kernel_unmapped_at_el0())
++              host_vectors = __this_cpu_read(this_cpu_vector);
++      write_sysreg(host_vectors, vbar_el1);
+ }
+ NOKPROBE_SYMBOL(__deactivate_traps);
+ 
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index 25ea4ecb6449f..bc111a1aff032 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -58,6 +58,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+                               break;
+                       }
+                       break;
++              case ARM_SMCCC_ARCH_WORKAROUND_3:
++                      switch (arm64_get_spectre_bhb_state()) {
++                      case SPECTRE_VULNERABLE:
++                              break;
++                      case SPECTRE_MITIGATED:
++                              val = SMCCC_RET_SUCCESS;
++                              break;
++                      case SPECTRE_UNAFFECTED:
++                              val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
++                              break;
++                      }
++                      break;
+               case ARM_SMCCC_HV_PV_TIME_FEATURES:
+                       val = SMCCC_RET_SUCCESS;
+                       break;
+diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
+index db4056ecccfda..20ba5136ac3dd 100644
+--- a/arch/arm64/kvm/psci.c
++++ b/arch/arm64/kvm/psci.c
+@@ -397,7 +397,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
+ 
+ int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+ {
+-      return 3;               /* PSCI version and two workaround registers */
++      return 4;               /* PSCI version and three workaround registers 
*/
+ }
+ 
+ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+@@ -411,6 +411,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 
__user *uindices)
+       if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
+               return -EFAULT;
+ 
++      if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++))
++              return -EFAULT;
++
+       return 0;
+ }
+ 
+@@ -450,6 +453,17 @@ static int get_kernel_wa_level(u64 regid)
+               case SPECTRE_VULNERABLE:
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
+               }
++              break;
++      case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
++              switch (arm64_get_spectre_bhb_state()) {
++              case SPECTRE_VULNERABLE:
++                      return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
++              case SPECTRE_MITIGATED:
++                      return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
++              case SPECTRE_UNAFFECTED:
++                      return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
++              }
++              return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
+       }
+ 
+       return -EINVAL;
+@@ -466,6 +480,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct 
kvm_one_reg *reg)
+               break;
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
++      case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
+               val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
+               break;
+       default:
+@@ -511,6 +526,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct 
kvm_one_reg *reg)
+       }
+ 
+       case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
++      case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
+               if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
+                       return -EINVAL;
+ 
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 568f11e23830c..835fa036b2d54 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1517,7 +1517,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+       /* CRm=6 */
+       ID_SANITISED(ID_AA64ISAR0_EL1),
+       ID_SANITISED(ID_AA64ISAR1_EL1),
+-      ID_UNALLOCATED(6,2),
++      ID_SANITISED(ID_AA64ISAR2_EL1),
+       ID_UNALLOCATED(6,3),
+       ID_UNALLOCATED(6,4),
+       ID_UNALLOCATED(6,5),
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 2601a514d8c4a..991e599f70577 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -592,6 +592,8 @@ early_param("rodata", parse_rodata);
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static int __init map_entry_trampoline(void)
+ {
++      int i;
++
+       pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+       phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+ 
+@@ -600,11 +602,15 @@ static int __init map_entry_trampoline(void)
+ 
+       /* Map only the text into the trampoline page table */
+       memset(tramp_pg_dir, 0, PGD_SIZE);
+-      __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+-                           prot, __pgd_pgtable_alloc, 0);
++      __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
++                           entry_tramp_text_size(), prot,
++                           __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
+ 
+       /* Map both the text and data into the kernel page table */
+-      __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
++      for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
++              __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
++                           pa_start + i * PAGE_SIZE, prot);
++
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               extern char __entry_tramp_data_start[];
+ 
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index dad350d42ecfb..3b407f46f1a0d 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -204,7 +204,7 @@
+ #define X86_FEATURE_SME                       ( 7*32+10) /* AMD Secure Memory 
Encryption */
+ #define X86_FEATURE_PTI                       ( 7*32+11) /* Kernel Page Table 
Isolation enabled */
+ #define X86_FEATURE_RETPOLINE         ( 7*32+12) /* "" Generic Retpoline 
mitigation for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD     ( 7*32+13) /* "" AMD Retpoline 
mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_LFENCE  ( 7*32+13) /* "" Use LFENCE for Spectre 
variant 2 */
+ #define X86_FEATURE_INTEL_PPIN                ( 7*32+14) /* Intel Processor 
Inventory Number */
+ #define X86_FEATURE_CDP_L2            ( 7*32+15) /* Code and Data 
Prioritization L2 */
+ #define X86_FEATURE_MSR_SPEC_CTRL     ( 7*32+16) /* "" MSR SPEC_CTRL is 
implemented */
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index cb9ad6b739737..4d0f5386e637b 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -82,7 +82,7 @@
+ #ifdef CONFIG_RETPOLINE
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
+                     __stringify(jmp __x86_retpoline_\reg), 
X86_FEATURE_RETPOLINE, \
+-                    __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), 
X86_FEATURE_RETPOLINE_AMD
++                    __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), 
X86_FEATURE_RETPOLINE_LFENCE
+ #else
+       jmp     *%\reg
+ #endif
+@@ -92,7 +92,7 @@
+ #ifdef CONFIG_RETPOLINE
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
+                     __stringify(call __x86_retpoline_\reg), 
X86_FEATURE_RETPOLINE, \
+-                    __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call 
*%\reg), X86_FEATURE_RETPOLINE_AMD
++                    __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call 
*%\reg), X86_FEATURE_RETPOLINE_LFENCE
+ #else
+       call    *%\reg
+ #endif
+@@ -134,7 +134,7 @@
+       "lfence;\n"                                             \
+       ANNOTATE_RETPOLINE_SAFE                                 \
+       "call *%[thunk_target]\n",                              \
+-      X86_FEATURE_RETPOLINE_AMD)
++      X86_FEATURE_RETPOLINE_LFENCE)
+ 
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+ 
+@@ -164,7 +164,7 @@
+       "lfence;\n"                                             \
+       ANNOTATE_RETPOLINE_SAFE                                 \
+       "call *%[thunk_target]\n",                              \
+-      X86_FEATURE_RETPOLINE_AMD)
++      X86_FEATURE_RETPOLINE_LFENCE)
+ 
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+ #endif
+@@ -176,9 +176,11 @@
+ /* The Spectre V2 mitigation variants */
+ enum spectre_v2_mitigation {
+       SPECTRE_V2_NONE,
+-      SPECTRE_V2_RETPOLINE_GENERIC,
+-      SPECTRE_V2_RETPOLINE_AMD,
+-      SPECTRE_V2_IBRS_ENHANCED,
++      SPECTRE_V2_RETPOLINE,
++      SPECTRE_V2_LFENCE,
++      SPECTRE_V2_EIBRS,
++      SPECTRE_V2_EIBRS_RETPOLINE,
++      SPECTRE_V2_EIBRS_LFENCE,
+ };
+ 
+ /* The indirect branch speculation control variants */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d41b70fe4918e..78b9514a38440 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -16,6 +16,7 @@
+ #include <linux/prctl.h>
+ #include <linux/sched/smt.h>
+ #include <linux/pgtable.h>
++#include <linux/bpf.h>
+ 
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -613,6 +614,32 @@ static inline const char *spectre_v2_module_string(void)
+ static inline const char *spectre_v2_module_string(void) { return ""; }
+ #endif
+ 
++#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended 
for this CPU, data leaks possible!\n"
++#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with 
eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
++#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is 
enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre 
v2 BHB attacks!\n"
++
++#ifdef CONFIG_BPF_SYSCALL
++void unpriv_ebpf_notify(int new_state)
++{
++      if (new_state)
++              return;
++
++      /* Unprivileged eBPF is enabled */
++
++      switch (spectre_v2_enabled) {
++      case SPECTRE_V2_EIBRS:
++              pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
++              break;
++      case SPECTRE_V2_EIBRS_LFENCE:
++              if (sched_smt_active())
++                      pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++              break;
++      default:
++              break;
++      }
++}
++#endif
++
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+       int len = strlen(opt);
+@@ -627,7 +654,10 @@ enum spectre_v2_mitigation_cmd {
+       SPECTRE_V2_CMD_FORCE,
+       SPECTRE_V2_CMD_RETPOLINE,
+       SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+-      SPECTRE_V2_CMD_RETPOLINE_AMD,
++      SPECTRE_V2_CMD_RETPOLINE_LFENCE,
++      SPECTRE_V2_CMD_EIBRS,
++      SPECTRE_V2_CMD_EIBRS_RETPOLINE,
++      SPECTRE_V2_CMD_EIBRS_LFENCE,
+ };
+ 
+ enum spectre_v2_user_cmd {
+@@ -700,6 +730,13 @@ spectre_v2_parse_user_cmdline(enum 
spectre_v2_mitigation_cmd v2_cmd)
+       return SPECTRE_V2_USER_CMD_AUTO;
+ }
+ 
++static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
++{
++      return (mode == SPECTRE_V2_EIBRS ||
++              mode == SPECTRE_V2_EIBRS_RETPOLINE ||
++              mode == SPECTRE_V2_EIBRS_LFENCE);
++}
++
+ static void __init
+ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ {
+@@ -767,7 +804,7 @@ spectre_v2_user_select_mitigation(enum 
spectre_v2_mitigation_cmd v2_cmd)
+        */
+       if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+           !smt_possible ||
+-          spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++          spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+               return;
+ 
+       /*
+@@ -787,9 +824,11 @@ set_mode:
+ 
+ static const char * const spectre_v2_strings[] = {
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+-      [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic 
retpoline",
+-      [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD 
retpoline",
+-      [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
++      [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
++      [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
++      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
++      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + 
LFENCE",
++      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + 
Retpolines",
+ };
+ 
+ static const struct {
+@@ -800,8 +839,12 @@ static const struct {
+       { "off",                SPECTRE_V2_CMD_NONE,              false },
+       { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
+       { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
+-      { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
++      { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
++      { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
+       { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
++      { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
++      { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
++      { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
+       { "auto",               SPECTRE_V2_CMD_AUTO,              false },
+ };
+ 
+@@ -838,17 +881,30 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
+       }
+ 
+       if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
+-           cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
+-           cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
++           cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
++           cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !IS_ENABLED(CONFIG_RETPOLINE)) {
+-              pr_err("%s selected but not compiled in. Switching to AUTO 
select\n", mitigation_options[i].option);
++              pr_err("%s selected but not compiled in. Switching to AUTO 
select\n",
++                     mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+ 
+-      if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
+-          boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
+-          boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-              pr_err("retpoline,amd selected but CPU is not AMD. Switching to 
AUTO select\n");
++      if ((cmd == SPECTRE_V2_CMD_EIBRS ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
++          !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
++              pr_err("%s selected but CPU doesn't have eIBRS. Switching to 
AUTO select\n",
++                     mitigation_options[i].option);
++              return SPECTRE_V2_CMD_AUTO;
++      }
++
++      if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
++          !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
++              pr_err("%s selected, but CPU doesn't have a serializing LFENCE. 
Switching to AUTO select\n",
++                     mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+ 
+@@ -857,6 +913,16 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
+       return cmd;
+ }
+ 
++static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
++{
++      if (!IS_ENABLED(CONFIG_RETPOLINE)) {
++              pr_err("Kernel not compiled with retpoline; no mitigation 
available!");
++              return SPECTRE_V2_NONE;
++      }
++
++      return SPECTRE_V2_RETPOLINE;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -877,49 +943,64 @@ static void __init spectre_v2_select_mitigation(void)
+       case SPECTRE_V2_CMD_FORCE:
+       case SPECTRE_V2_CMD_AUTO:
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+-                      mode = SPECTRE_V2_IBRS_ENHANCED;
+-                      /* Force it so VMEXIT will restore correctly */
+-                      x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+-                      wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+-                      goto specv2_set_mode;
++                      mode = SPECTRE_V2_EIBRS;
++                      break;
+               }
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_auto;
++
++              mode = spectre_v2_select_retpoline();
+               break;
+-      case SPECTRE_V2_CMD_RETPOLINE_AMD:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_amd;
++
++      case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
++              pr_err(SPECTRE_V2_LFENCE_MSG);
++              mode = SPECTRE_V2_LFENCE;
+               break;
++
+       case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_generic;
++              mode = SPECTRE_V2_RETPOLINE;
+               break;
++
+       case SPECTRE_V2_CMD_RETPOLINE:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_auto;
++              mode = spectre_v2_select_retpoline();
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS:
++              mode = SPECTRE_V2_EIBRS;
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS_LFENCE:
++              mode = SPECTRE_V2_EIBRS_LFENCE;
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
++              mode = SPECTRE_V2_EIBRS_RETPOLINE;
+               break;
+       }
+-      pr_err("Spectre mitigation: kernel not compiled with retpoline; no 
mitigation available!");
+-      return;
+ 
+-retpoline_auto:
+-      if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+-          boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+-      retpoline_amd:
+-              if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+-                      pr_err("Spectre mitigation: LFENCE not serializing, 
switching to generic retpoline\n");
+-                      goto retpoline_generic;
+-              }
+-              mode = SPECTRE_V2_RETPOLINE_AMD;
+-              setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+-              setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+-      } else {
+-      retpoline_generic:
+-              mode = SPECTRE_V2_RETPOLINE_GENERIC;
++      if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
++              pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
++
++      if (spectre_v2_in_eibrs_mode(mode)) {
++              /* Force it so VMEXIT will restore correctly */
++              x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
++              wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++      }
++
++      switch (mode) {
++      case SPECTRE_V2_NONE:
++      case SPECTRE_V2_EIBRS:
++              break;
++
++      case SPECTRE_V2_LFENCE:
++      case SPECTRE_V2_EIBRS_LFENCE:
++              setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
++              fallthrough;
++
++      case SPECTRE_V2_RETPOLINE:
++      case SPECTRE_V2_EIBRS_RETPOLINE:
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++              break;
+       }
+ 
+-specv2_set_mode:
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
+ 
+@@ -945,7 +1026,7 @@ specv2_set_mode:
+        * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+        * enable IBRS around firmware calls.
+        */
+-      if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) 
{
++      if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+               pr_info("Enabling Restricted Speculation for firmware calls\n");
+       }
+@@ -1015,6 +1096,10 @@ void cpu_bugs_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+ 
++      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++              pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++
+       switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+@@ -1621,7 +1706,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
+ 
+ static char *stibp_state(void)
+ {
+-      if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++      if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+               return "";
+ 
+       switch (spectre_v2_user_stibp) {
+@@ -1651,6 +1736,27 @@ static char *ibpb_state(void)
+       return "";
+ }
+ 
++static ssize_t spectre_v2_show_state(char *buf)
++{
++      if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
++              return sprintf(buf, "Vulnerable: LFENCE\n");
++
++      if (spectre_v2_enabled == SPECTRE_V2_EIBRS && 
unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: eIBRS with unprivileged 
eBPF\n");
++
++      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++              return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged 
eBPF and SMT\n");
++
++      return sprintf(buf, "%s%s%s%s%s%s\n",
++                     spectre_v2_strings[spectre_v2_enabled],
++                     ibpb_state(),
++                     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++                     stibp_state(),
++                     boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : 
"",
++                     spectre_v2_module_string());
++}
++
+ static ssize_t srbds_show_state(char *buf)
+ {
+       return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+@@ -1676,12 +1782,7 @@ static ssize_t cpu_show_common(struct device *dev, 
struct device_attribute *attr
+               return sprintf(buf, "%s\n", 
spectre_v1_strings[spectre_v1_mitigation]);
+ 
+       case X86_BUG_SPECTRE_V2:
+-              return sprintf(buf, "%s%s%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
+-                             ibpb_state(),
+-                             boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", 
IBRS_FW" : "",
+-                             stibp_state(),
+-                             boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB 
filling" : "",
+-                             spectre_v2_module_string());
++              return spectre_v2_show_state(buf);
+ 
+       case X86_BUG_SPEC_STORE_BYPASS:
+               return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 8347eaee679c8..3f2e5ea9ab6b7 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2064,16 +2064,6 @@ bool acpi_ec_dispatch_gpe(void)
+       if (acpi_any_gpe_status_set(first_ec->gpe))
+               return true;
+ 
+-      /*
+-       * Cancel the SCI wakeup and process all pending events in case there
+-       * are any wakeup ones in there.
+-       *
+-       * Note that if any non-EC GPEs are active at this point, the SCI will
+-       * retrigger after the rearming in acpi_s2idle_wake(), so no events
+-       * should be missed by canceling the wakeup here.
+-       */
+-      pm_system_cancel_wakeup();
+-
+       /*
+        * Dispatch the EC GPE in-band, but do not report wakeup in any case
+        * to allow the caller to process events properly after that.
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index e2614ea820bb8..503935b1deeb1 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1012,15 +1012,21 @@ static bool acpi_s2idle_wake(void)
+                       return true;
+               }
+ 
+-              /*
+-               * Check non-EC GPE wakeups and if there are none, cancel the
+-               * SCI-related wakeup and dispatch the EC GPE.
+-               */
++              /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+               if (acpi_ec_dispatch_gpe()) {
+                       pm_pr_dbg("ACPI non-EC GPE wakeup\n");
+                       return true;
+               }
+ 
++              /*
++               * Cancel the SCI wakeup and process all pending events in case
++               * there are any wakeup ones in there.
++               *
++               * Note that if any non-EC GPEs are active at this point, the
++               * SCI will retrigger after the rearming below, so no events
++               * should be missed by canceling the wakeup here.
++               */
++              pm_system_cancel_wakeup();
+               acpi_os_wait_events_complete();
+ 
+               /*
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 22842d2938c28..47d4bb23d6f31 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1352,7 +1352,8 @@ free_shadow:
+                       rinfo->ring_ref[i] = GRANT_INVALID_REF;
+               }
+       }
+-      free_pages((unsigned long)rinfo->ring.sring, 
get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
++      free_pages_exact(rinfo->ring.sring,
++                       info->nr_ring_pages * XEN_PAGE_SIZE);
+       rinfo->ring.sring = NULL;
+ 
+       if (rinfo->irq)
+@@ -1436,9 +1437,15 @@ static int blkif_get_final_status(enum blk_req_status 
s1,
+       return BLKIF_RSP_OKAY;
+ }
+ 
+-static bool blkif_completion(unsigned long *id,
+-                           struct blkfront_ring_info *rinfo,
+-                           struct blkif_response *bret)
++/*
++ * Return values:
++ *  1 response processed.
++ *  0 missing further responses.
++ * -1 error while processing.
++ */
++static int blkif_completion(unsigned long *id,
++                          struct blkfront_ring_info *rinfo,
++                          struct blkif_response *bret)
+ {
+       int i = 0;
+       struct scatterlist *sg;
+@@ -1461,7 +1468,7 @@ static bool blkif_completion(unsigned long *id,
+ 
+               /* Wait the second response if not yet here. */
+               if (s2->status < REQ_DONE)
+-                      return false;
++                      return 0;
+ 
+               bret->status = blkif_get_final_status(s->status,
+                                                     s2->status);
+@@ -1512,42 +1519,43 @@ static bool blkif_completion(unsigned long *id,
+       }
+       /* Add the persistent grant into the list of free grants */
+       for (i = 0; i < num_grant; i++) {
+-              if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
++              if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
+                       /*
+                        * If the grant is still mapped by the backend (the
+                        * backend has chosen to make this grant persistent)
+                        * we add it at the head of the list, so it will be
+                        * reused first.
+                        */
+-                      if (!info->feature_persistent)
+-                              pr_alert_ratelimited("backed has not unmapped 
grant: %u\n",
+-                                                   s->grants_used[i]->gref);
++                      if (!info->feature_persistent) {
++                              pr_alert("backed has not unmapped grant: %u\n",
++                                       s->grants_used[i]->gref);
++                              return -1;
++                      }
+                       list_add(&s->grants_used[i]->node, &rinfo->grants);
+                       rinfo->persistent_gnts_c++;
+               } else {
+                       /*
+-                       * If the grant is not mapped by the backend we end the
+-                       * foreign access and add it to the tail of the list,
+-                       * so it will not be picked again unless we run out of
+-                       * persistent grants.
++                       * If the grant is not mapped by the backend we add it
++                       * to the tail of the list, so it will not be picked
++                       * again unless we run out of persistent grants.
+                        */
+-                      gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 
0UL);
+                       s->grants_used[i]->gref = GRANT_INVALID_REF;
+                       list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
+               }
+       }
+       if (s->req.operation == BLKIF_OP_INDIRECT) {
+               for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
+-                      if 
(gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
+-                              if (!info->feature_persistent)
+-                                      pr_alert_ratelimited("backed has not 
unmapped grant: %u\n",
+-                                                           
s->indirect_grants[i]->gref);
++                      if 
(!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
++                              if (!info->feature_persistent) {
++                                      pr_alert("backed has not unmapped 
grant: %u\n",
++                                               s->indirect_grants[i]->gref);
++                                      return -1;
++                              }
+                               list_add(&s->indirect_grants[i]->node, 
&rinfo->grants);
+                               rinfo->persistent_gnts_c++;
+                       } else {
+                               struct page *indirect_page;
+ 
+-                              
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
+                               /*
+                                * Add the used indirect page back to the list 
of
+                                * available pages for indirect grefs.
+@@ -1562,7 +1570,7 @@ static bool blkif_completion(unsigned long *id,
+               }
+       }
+ 
+-      return true;
++      return 1;
+ }
+ 
+ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+@@ -1628,12 +1636,17 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+               }
+ 
+               if (bret.operation != BLKIF_OP_DISCARD) {
++                      int ret;
++
+                       /*
+                        * We may need to wait for an extra response if the
+                        * I/O request is split in 2
+                        */
+-                      if (!blkif_completion(&id, rinfo, &bret))
++                      ret = blkif_completion(&id, rinfo, &bret);
++                      if (!ret)
+                               continue;
++                      if (unlikely(ret < 0))
++                              goto err;
+               }
+ 
+               if (add_id_to_freelist(rinfo, id)) {
+@@ -1740,8 +1753,7 @@ static int setup_blkring(struct xenbus_device *dev,
+       for (i = 0; i < info->nr_ring_pages; i++)
+               rinfo->ring_ref[i] = GRANT_INVALID_REF;
+ 
+-      sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
+-                                                     get_order(ring_size));
++      sring = alloc_pages_exact(ring_size, GFP_NOIO);
+       if (!sring) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+               return -ENOMEM;
+@@ -1751,7 +1763,7 @@ static int setup_blkring(struct xenbus_device *dev,
+ 
+       err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, 
gref);
+       if (err < 0) {
+-              free_pages((unsigned long)sring, get_order(ring_size));
++              free_pages_exact(sring, ring_size);
+               rinfo->ring.sring = NULL;
+               goto fail;
+       }
+@@ -2729,11 +2741,10 @@ static void purge_persistent_grants(struct 
blkfront_info *info)
+               list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
+                                        node) {
+                       if (gnt_list_entry->gref == GRANT_INVALID_REF ||
+-                          gnttab_query_foreign_access(gnt_list_entry->gref))
++                          
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
+                               continue;
+ 
+                       list_del(&gnt_list_entry->node);
+-                      gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
+                       rinfo->persistent_gnts_c--;
+                       gnt_list_entry->gref = GRANT_INVALID_REF;
+                       list_add_tail(&gnt_list_entry->node, &rinfo->grants);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 7ed8872d08c60..1a69b5246133b 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue 
*queue)
+                       queue->tx_link[id] = TX_LINK_NONE;
+                       skb = queue->tx_skbs[id];
+                       queue->tx_skbs[id] = NULL;
+-                      if (unlikely(gnttab_query_foreign_access(
+-                              queue->grant_tx_ref[id]) != 0)) {
++                      if (unlikely(!gnttab_end_foreign_access_ref(
++                              queue->grant_tx_ref[id], GNTMAP_readonly))) {
+                               dev_alert(dev,
+                                         "Grant still in use by backend 
domain\n");
+                               goto err;
+                       }
+-                      gnttab_end_foreign_access_ref(
+-                              queue->grant_tx_ref[id], GNTMAP_readonly);
+                       gnttab_release_grant_reference(
+                               &queue->gref_tx_head, queue->grant_tx_ref[id]);
+                       queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+@@ -992,7 +990,6 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+       struct device *dev = &queue->info->netdev->dev;
+       struct bpf_prog *xdp_prog;
+       struct xdp_buff xdp;
+-      unsigned long ret;
+       int slots = 1;
+       int err = 0;
+       u32 verdict;
+@@ -1034,8 +1031,13 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+                       goto next;
+               }
+ 
+-              ret = gnttab_end_foreign_access_ref(ref, 0);
+-              BUG_ON(!ret);
++              if (!gnttab_end_foreign_access_ref(ref, 0)) {
++                      dev_alert(dev,
++                                "Grant still in use by backend domain\n");
++                      queue->info->broken = true;
++                      dev_alert(dev, "Disabled for further use\n");
++                      return -EINVAL;
++              }
+ 
+               gnttab_release_grant_reference(&queue->gref_rx_head, ref);
+ 
+@@ -1256,6 +1258,10 @@ static int xennet_poll(struct napi_struct *napi, int 
budget)
+                                          &need_xdp_flush);
+ 
+               if (unlikely(err)) {
++                      if (queue->info->broken) {
++                              spin_unlock(&queue->rx_lock);
++                              return 0;
++                      }
+ err:
+                       while ((skb = __skb_dequeue(&tmpq)))
+                               __skb_queue_tail(&errq, skb);
+@@ -1920,7 +1926,7 @@ static int setup_netfront(struct xenbus_device *dev,
+                       struct netfront_queue *queue, unsigned int 
feature_split_evtchn)
+ {
+       struct xen_netif_tx_sring *txs;
+-      struct xen_netif_rx_sring *rxs;
++      struct xen_netif_rx_sring *rxs = NULL;
+       grant_ref_t gref;
+       int err;
+ 
+@@ -1940,21 +1946,21 @@ static int setup_netfront(struct xenbus_device *dev,
+ 
+       err = xenbus_grant_ring(dev, txs, 1, &gref);
+       if (err < 0)
+-              goto grant_tx_ring_fail;
++              goto fail;
+       queue->tx_ring_ref = gref;
+ 
+       rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | 
__GFP_HIGH);
+       if (!rxs) {
+               err = -ENOMEM;
+               xenbus_dev_fatal(dev, err, "allocating rx ring page");
+-              goto alloc_rx_ring_fail;
++              goto fail;
+       }
+       SHARED_RING_INIT(rxs);
+       FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ 
+       err = xenbus_grant_ring(dev, rxs, 1, &gref);
+       if (err < 0)
+-              goto grant_rx_ring_fail;
++              goto fail;
+       queue->rx_ring_ref = gref;
+ 
+       if (feature_split_evtchn)
+@@ -1967,22 +1973,28 @@ static int setup_netfront(struct xenbus_device *dev,
+               err = setup_netfront_single(queue);
+ 
+       if (err)
+-              goto alloc_evtchn_fail;
++              goto fail;
+ 
+       return 0;
+ 
+       /* If we fail to setup netfront, it is safe to just revoke access to
+        * granted pages because backend is not accessing it at this point.
+        */
+-alloc_evtchn_fail:
+-      gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
+-grant_rx_ring_fail:
+-      free_page((unsigned long)rxs);
+-alloc_rx_ring_fail:
+-      gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
+-grant_tx_ring_fail:
+-      free_page((unsigned long)txs);
+-fail:
++ fail:
++      if (queue->rx_ring_ref != GRANT_INVALID_REF) {
++              gnttab_end_foreign_access(queue->rx_ring_ref, 0,
++                                        (unsigned long)rxs);
++              queue->rx_ring_ref = GRANT_INVALID_REF;
++      } else {
++              free_page((unsigned long)rxs);
++      }
++      if (queue->tx_ring_ref != GRANT_INVALID_REF) {
++              gnttab_end_foreign_access(queue->tx_ring_ref, 0,
++                                        (unsigned long)txs);
++              queue->tx_ring_ref = GRANT_INVALID_REF;
++      } else {
++              free_page((unsigned long)txs);
++      }
+       return err;
+ }
+ 
+diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
+index 259fc248d06cf..a25c9386fdf78 100644
+--- a/drivers/scsi/xen-scsifront.c
++++ b/drivers/scsi/xen-scsifront.c
+@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info 
*info,
+               return;
+ 
+       for (i = 0; i < shadow->nr_grants; i++) {
+-              if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
++              if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
+                       shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
+                                    "grant still in use by backend\n");
+                       BUG();
+               }
+-              gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
+       }
+ 
+       kfree(shadow->sg);
+diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
+index 3fa40c723e8e9..edb0acd0b8323 100644
+--- a/drivers/xen/gntalloc.c
++++ b/drivers/xen/gntalloc.c
+@@ -169,20 +169,14 @@ undo:
+               __del_gref(gref);
+       }
+ 
+-      /* It's possible for the target domain to map the just-allocated grant
+-       * references by blindly guessing their IDs; if this is done, then
+-       * __del_gref will leave them in the queue_gref list. They need to be
+-       * added to the global list so that we can free them when they are no
+-       * longer referenced.
+-       */
+-      if (unlikely(!list_empty(&queue_gref)))
+-              list_splice_tail(&queue_gref, &gref_list);
+       mutex_unlock(&gref_mutex);
+       return rc;
+ }
+ 
+ static void __del_gref(struct gntalloc_gref *gref)
+ {
++      unsigned long addr;
++
+       if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+               uint8_t *tmp = kmap(gref->page);
+               tmp[gref->notify.pgoff] = 0;
+@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
+       gref->notify.flags = 0;
+ 
+       if (gref->gref_id) {
+-              if (gnttab_query_foreign_access(gref->gref_id))
+-                      return;
+-
+-              if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+-                      return;
+-
+-              gnttab_free_grant_reference(gref->gref_id);
++              if (gref->page) {
++                      addr = (unsigned long)page_to_virt(gref->page);
++                      gnttab_end_foreign_access(gref->gref_id, 0, addr);
++              } else
++                      gnttab_free_grant_reference(gref->gref_id);
+       }
+ 
+       gref_size--;
+       list_del(&gref->next_gref);
+ 
+-      if (gref->page)
+-              __free_page(gref->page);
+-
+       kfree(gref);
+ }
+ 
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index 3729bea0c9895..5c83d41766c85 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -134,12 +134,9 @@ struct gnttab_ops {
+        */
+       unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+       /*
+-       * Query the status of a grant entry. Ref parameter is reference of
+-       * queried grant entry, return value is the status of queried entry.
+-       * Detailed status(writing/reading) can be gotten from the return value
+-       * by bit operations.
++       * Read the frame number related to a given grant reference.
+        */
+-      int (*query_foreign_access)(grant_ref_t ref);
++      unsigned long (*read_frame)(grant_ref_t ref);
+ };
+ 
+ struct unmap_refs_callback_data {
+@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned 
long frame,
+ }
+ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
+ 
+-static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+-{
+-      return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+-}
+-
+-static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+-{
+-      return grstatus[ref] & (GTF_reading|GTF_writing);
+-}
+-
+-int gnttab_query_foreign_access(grant_ref_t ref)
+-{
+-      return gnttab_interface->query_foreign_access(ref);
+-}
+-EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
+-
+ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
+ {
+       u16 flags, nflags;
+@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int 
readonly)
+ }
+ EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+ 
++static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
++{
++      return gnttab_shared.v1[ref].frame;
++}
++
++static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
++{
++      return gnttab_shared.v2[ref].full_page.frame;
++}
++
+ struct deferred_entry {
+       struct list_head list;
+       grant_ref_t ref;
+@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list 
*unused)
+               spin_unlock_irqrestore(&gnttab_list_lock, flags);
+               if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+                       put_free_entry(entry->ref);
+-                      if (entry->page) {
+-                              pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+-                                       entry->ref, page_to_pfn(entry->page));
+-                              put_page(entry->page);
+-                      } else
+-                              pr_info("freeing g.e. %#x\n", entry->ref);
++                      pr_debug("freeing g.e. %#x (pfn %#lx)\n",
++                               entry->ref, page_to_pfn(entry->page));
++                      put_page(entry->page);
+                       kfree(entry);
+                       entry = NULL;
+               } else {
+@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list 
*unused)
+ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+                               struct page *page)
+ {
+-      struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++      struct deferred_entry *entry;
++      gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+       const char *what = KERN_WARNING "leaking";
+ 
++      entry = kmalloc(sizeof(*entry), gfp);
++      if (!page) {
++              unsigned long gfn = gnttab_interface->read_frame(ref);
++
++              page = pfn_to_page(gfn_to_pfn(gfn));
++              get_page(page);
++      }
++
+       if (entry) {
+               unsigned long flags;
+ 
+@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool 
readonly,
+              what, ref, page ? page_to_pfn(page) : -1);
+ }
+ 
++int gnttab_try_end_foreign_access(grant_ref_t ref)
++{
++      int ret = _gnttab_end_foreign_access_ref(ref, 0);
++
++      if (ret)
++              put_free_entry(ref);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
++
+ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+                              unsigned long page)
+ {
+-      if (gnttab_end_foreign_access_ref(ref, readonly)) {
+-              put_free_entry(ref);
++      if (gnttab_try_end_foreign_access(ref)) {
+               if (page != 0)
+                       put_page(virt_to_page(page));
+       } else
+@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
+       .update_entry                   = gnttab_update_entry_v1,
+       .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
+       .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v1,
+-      .query_foreign_access           = gnttab_query_foreign_access_v1,
++      .read_frame                     = gnttab_read_frame_v1,
+ };
+ 
+ static const struct gnttab_ops gnttab_v2_ops = {
+@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
+       .update_entry                   = gnttab_update_entry_v2,
+       .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v2,
+       .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v2,
+-      .query_foreign_access           = gnttab_query_foreign_access_v2,
++      .read_frame                     = gnttab_read_frame_v2,
+ };
+ 
+ static bool gnttab_need_v2(void)
+diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
+index 7984645b59563..bbe337dc296e3 100644
+--- a/drivers/xen/pvcalls-front.c
++++ b/drivers/xen/pvcalls-front.c
+@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
+       if (!map->active.ring)
+               return;
+ 
+-      free_pages((unsigned long)map->active.data.in,
+-                      map->active.ring->ring_order);
++      free_pages_exact(map->active.data.in,
++                       PAGE_SIZE << map->active.ring->ring_order);
+       free_page((unsigned long)map->active.ring);
+ }
+ 
+@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
+               goto out;
+ 
+       map->active.ring->ring_order = PVCALLS_RING_ORDER;
+-      bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+-                                      PVCALLS_RING_ORDER);
++      bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
++                                GFP_KERNEL | __GFP_ZERO);
+       if (!bytes)
+               goto out;
+ 
+diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
+index 0cd728961fce9..16cfef0993295 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void 
*vaddr,
+                     unsigned int nr_pages, grant_ref_t *grefs)
+ {
+       int err;
+-      int i, j;
++      unsigned int i;
++      grant_ref_t gref_head;
++
++      err = gnttab_alloc_grant_references(nr_pages, &gref_head);
++      if (err) {
++              xenbus_dev_fatal(dev, err, "granting access to ring page");
++              return err;
++      }
+ 
+       for (i = 0; i < nr_pages; i++) {
+               unsigned long gfn;
+@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void 
*vaddr,
+               else
+                       gfn = virt_to_gfn(vaddr);
+ 
+-              err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
+-              if (err < 0) {
+-                      xenbus_dev_fatal(dev, err,
+-                                       "granting access to ring page");
+-                      goto fail;
+-              }
+-              grefs[i] = err;
++              grefs[i] = gnttab_claim_grant_reference(&gref_head);
++              gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
++                                              gfn, 0);
+ 
+               vaddr = vaddr + XEN_PAGE_SIZE;
+       }
+ 
+       return 0;
+-
+-fail:
+-      for (j = 0; j < i; j++)
+-              gnttab_end_foreign_access_ref(grefs[j], 0);
+-      return err;
+ }
+ EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+ 
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index f860645f65128..ff38737475ecb 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -87,6 +87,11 @@
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x7fff)
+ 
++#define ARM_SMCCC_ARCH_WORKAROUND_3                                   \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0x3fff)
++
+ #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED  1
+ 
+ /* Paravirtualised time calls (defined by ARM DEN0057A) */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index e6ddf5a3beaf8..ea3ff499e94a3 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1485,6 +1485,12 @@ struct bpf_prog *bpf_prog_by_id(u32 id);
+ struct bpf_link *bpf_link_by_id(u32 id);
+ 
+ const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
++
++static inline bool unprivileged_ebpf_enabled(void)
++{
++      return !sysctl_unprivileged_bpf_disabled;
++}
++
+ #else /* !CONFIG_BPF_SYSCALL */
+ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+ {
+@@ -1679,6 +1685,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
+ {
+       return NULL;
+ }
++
++static inline bool unprivileged_ebpf_enabled(void)
++{
++      return false;
++}
++
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
+ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
+index 0b1182a3cf412..57b4ae6a4a186 100644
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -97,17 +97,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int 
readonly);
+  * access has been ended, free the given page too.  Access will be ended
+  * immediately iff the grant entry is not in use, otherwise it will happen
+  * some time later.  page may be 0, in which case no freeing will occur.
++ * Note that the granted page might still be accessed (read or write) by the
++ * other side after gnttab_end_foreign_access() returns, so even if page was
++ * specified as 0 it is not allowed to just reuse the page for other
++ * purposes immediately. gnttab_end_foreign_access() will take an additional
++ * reference to the granted page in this case, which is dropped only after
++ * the grant is no longer in use.
++ * This requires that multi page allocations for areas subject to
++ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
++ * via free_pages_exact()) in order to avoid high order pages.
+  */
+ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+                              unsigned long page);
+ 
++/*
++ * End access through the given grant reference, iff the grant entry is
++ * no longer in use.  In case of success ending foreign access, the
++ * grant reference is deallocated.
++ * Return 1 if the grant entry was freed, 0 if it is still in use.
++ */
++int gnttab_try_end_foreign_access(grant_ref_t ref);
++
+ int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
+ 
+ unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
+ unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+ 
+-int gnttab_query_foreign_access(grant_ref_t ref);
+-
+ /*
+  * operations on reserved batches of grant references
+  */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 72ceb19574d0c..8832440a4938e 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -234,6 +234,10 @@ static int bpf_stats_handler(struct ctl_table *table, int 
write,
+       return ret;
+ }
+ 
++void __weak unpriv_ebpf_notify(int new_state)
++{
++}
++
+ static int bpf_unpriv_handler(struct ctl_table *table, int write,
+                             void *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -251,6 +255,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int 
write,
+                       return -EPERM;
+               *(int *)table->data = unpriv_enable;
+       }
++
++      unpriv_ebpf_notify(unpriv_enable);
++
+       return ret;
+ }
+ #endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index 3ec1a51a6944e..432ac5a16f2e0 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -304,9 +304,9 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv 
*priv)
+                               ref = priv->rings[i].intf->ref[j];
+                               gnttab_end_foreign_access(ref, 0, 0);
+                       }
+-                      free_pages((unsigned long)priv->rings[i].data.in,
+-                                 priv->rings[i].intf->ring_order -
+-                                 (PAGE_SHIFT - XEN_PAGE_SHIFT));
++                      free_pages_exact(priv->rings[i].data.in,
++                                 1UL << (priv->rings[i].intf->ring_order +
++                                         XEN_PAGE_SHIFT));
+               }
+               gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
+               free_page((unsigned long)priv->rings[i].intf);
+@@ -345,8 +345,8 @@ static int xen_9pfs_front_alloc_dataring(struct 
xenbus_device *dev,
+       if (ret < 0)
+               goto out;
+       ring->ref = ret;
+-      bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+-                      order - (PAGE_SHIFT - XEN_PAGE_SHIFT));
++      bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
++                                GFP_KERNEL | __GFP_ZERO);
+       if (!bytes) {
+               ret = -ENOMEM;
+               goto out;
+@@ -377,9 +377,7 @@ out:
+       if (bytes) {
+               for (i--; i >= 0; i--)
+                       gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
+-              free_pages((unsigned long)bytes,
+-                         ring->intf->ring_order -
+-                         (PAGE_SHIFT - XEN_PAGE_SHIFT));
++              free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
+       }
+       gnttab_end_foreign_access(ring->ref, 0, 0);
+       free_page((unsigned long)ring->intf);
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h 
b/tools/arch/x86/include/asm/cpufeatures.h
+index dad350d42ecfb..b58730cc12e83 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -204,7 +204,7 @@
+ #define X86_FEATURE_SME                       ( 7*32+10) /* AMD Secure Memory 
Encryption */
+ #define X86_FEATURE_PTI                       ( 7*32+11) /* Kernel Page Table 
Isolation enabled */
+ #define X86_FEATURE_RETPOLINE         ( 7*32+12) /* "" Generic Retpoline 
mitigation for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD     ( 7*32+13) /* "" AMD Retpoline 
mitigation for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_LFENCE  ( 7*32+13) /* "" Use LFENCEs for 
Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN                ( 7*32+14) /* Intel Processor 
Inventory Number */
+ #define X86_FEATURE_CDP_L2            ( 7*32+15) /* Code and Data 
Prioritization L2 */
+ #define X86_FEATURE_MSR_SPEC_CTRL     ( 7*32+16) /* "" MSR SPEC_CTRL is 
implemented */

Reply via email to