commit:     e09404c093aba34d8645108f68fe19e3f97b1dfa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 11 10:57:33 2022 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 11 10:57:33 2022 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e09404c0

Linux patch 4.9.306

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1305_linux-4.9.306.patch | 3007 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3011 insertions(+)

diff --git a/0000_README b/0000_README
index b59dbb1b..158b818d 100644
--- a/0000_README
+++ b/0000_README
@@ -1263,6 +1263,10 @@ Patch:  1304_linux-4.9.305.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.305
 
+Patch:  1305_linux-4.9.306.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.306
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1305_linux-4.9.306.patch b/1305_linux-4.9.306.patch
new file mode 100644
index 00000000..315a06d2
--- /dev/null
+++ b/1305_linux-4.9.306.patch
@@ -0,0 +1,3007 @@
+diff --git a/Documentation/hw-vuln/index.rst b/Documentation/hw-vuln/index.rst
+index b5fbc6ae9d5fd..74466ba801678 100644
+--- a/Documentation/hw-vuln/index.rst
++++ b/Documentation/hw-vuln/index.rst
+@@ -9,6 +9,7 @@ are configurable at compile, boot or run time.
+ .. toctree::
+    :maxdepth: 1
+ 
++   spectre
+    l1tf
+    mds
+    tsx_async_abort
+diff --git a/Documentation/hw-vuln/spectre.rst 
b/Documentation/hw-vuln/spectre.rst
+new file mode 100644
+index 0000000000000..c6c43ac2ba43d
+--- /dev/null
++++ b/Documentation/hw-vuln/spectre.rst
+@@ -0,0 +1,785 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Spectre Side Channels
++=====================
++
++Spectre is a class of side channel attacks that exploit branch prediction
++and speculative execution on modern CPUs to read memory, possibly
++bypassing access controls. Speculative execution side channel exploits
++do not modify memory but attempt to infer privileged data in the memory.
++
++This document covers Spectre variant 1 and Spectre variant 2.
++
++Affected processors
++-------------------
++
++Speculative execution side channel methods affect a wide range of modern
++high performance processors, since most modern high speed processors
++use branch prediction and speculative execution.
++
++The following CPUs are vulnerable:
++
++    - Intel Core, Atom, Pentium, and Xeon processors
++
++    - AMD Phenom, EPYC, and Zen processors
++
++    - IBM POWER and zSeries processors
++
++    - Higher end ARM processors
++
++    - Apple CPUs
++
++    - Higher end MIPS CPUs
++
++    - Likely most other high performance CPUs. Contact your CPU vendor for 
details.
++
++Whether a processor is affected or not can be read out from the Spectre
++vulnerability files in sysfs. See :ref:`spectre_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries describe Spectre variants:
++
++   =============   =======================  ==========================
++   CVE-2017-5753   Bounds check bypass      Spectre variant 1
++   CVE-2017-5715   Branch target injection  Spectre variant 2
++   CVE-2019-1125   Spectre v1 swapgs        Spectre variant 1 (swapgs)
++   =============   =======================  ==========================
++
++Problem
++-------
++
++CPUs use speculative operations to improve performance. That may leave
++traces of memory accesses or computations in the processor's caches,
++buffers, and branch predictors. Malicious software may be able to
++influence the speculative execution paths, and then use the side effects
++of the speculative execution in the CPUs' caches and buffers to infer
++privileged data touched during the speculative execution.
++
++Spectre variant 1 attacks take advantage of speculative execution of
++conditional branches, while Spectre variant 2 attacks use speculative
++execution of indirect branches to leak privileged memory.
++See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[6] <spec_ref6>`
++:ref:`[7] <spec_ref7>` :ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
++
++Spectre variant 1 (Bounds Check Bypass)
++---------------------------------------
++
++The bounds check bypass attack :ref:`[2] <spec_ref2>` takes advantage
++of speculative execution that bypasses conditional branch instructions
++used for memory access bounds check (e.g. checking if the index of an
++array results in memory access within a valid range). This results in
++memory accesses to invalid memory (with out-of-bound index) that are
++done speculatively before validation checks resolve. Such speculative
++memory accesses can leave side effects, creating side channels which
++leak information to the attacker.
++
++There are some extensions of Spectre variant 1 attacks for reading data
++over the network, see :ref:`[12] <spec_ref12>`. However such attacks
++are difficult, low bandwidth, fragile, and are considered low risk.
++
++Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
++only about user-controlled array bounds checks.  It can affect any
++conditional checks.  The kernel entry code interrupt, exception, and NMI
++handlers all have conditional swapgs checks.  Those may be problematic
++in the context of Spectre v1, as kernel code can speculatively run with
++a user GS.
++
++Spectre variant 2 (Branch Target Injection)
++-------------------------------------------
++
++The branch target injection attack takes advantage of speculative
++execution of indirect branches :ref:`[3] <spec_ref3>`.  The indirect
++branch predictors inside the processor used to guess the target of
++indirect branches can be influenced by an attacker, causing gadget code
++to be speculatively executed, thus exposing sensitive data touched by
++the victim. The side effects left in the CPU's caches during speculative
++execution can be measured to infer data values.
++
++.. _poison_btb:
++
++In Spectre variant 2 attacks, the attacker can steer speculative indirect
++branches in the victim to gadget code by poisoning the branch target
++buffer of a CPU used for predicting indirect branch addresses. Such
++poisoning could be done by indirect branching into existing code,
++with the address offset of the indirect branch under the attacker's
++control. Since the branch prediction on impacted hardware does not
++fully disambiguate branch address and uses the offset for prediction,
++this could cause privileged code's indirect branch to jump to a gadget
++code with the same offset.
++
++The most useful gadgets take an attacker-controlled input parameter (such
++as a register value) so that the memory read can be controlled. Gadgets
++without input parameters might be possible, but the attacker would have
++very little control over what memory can be read, reducing the risk of
++the attack revealing useful data.
++
++One other variant 2 attack vector is for the attacker to poison the
++return stack buffer (RSB) :ref:`[13] <spec_ref13>` to cause speculative
++subroutine return instruction execution to go to a gadget.  An attacker's
++imbalanced subroutine call instructions might "poison" entries in the
++return stack buffer which are later consumed by a victim's subroutine
++return instructions.  This attack can be mitigated by flushing the return
++stack buffer on context switch, or virtual machine (VM) exit.
++
++On systems with simultaneous multi-threading (SMT), attacks are possible
++from the sibling thread, as level 1 cache and branch target buffer
++(BTB) may be shared between hardware threads in a CPU core.  A malicious
++program running on the sibling thread may influence its peer's BTB to
++steer its indirect branch speculations to gadget code, and measure the
++speculative execution's side effects left in level 1 cache to infer the
++victim's data.
++
++Yet another variant 2 attack vector is for the attacker to poison the
++Branch History Buffer (BHB) to speculatively steer an indirect branch
++to a specific Branch Target Buffer (BTB) entry, even if the entry isn't
++associated with the source address of the indirect branch. Specifically,
++the BHB might be shared across privilege levels even in the presence of
++Enhanced IBRS.
++
++Currently the only known real-world BHB attack vector is via
++unprivileged eBPF. Therefore, it's highly recommended to not enable
++unprivileged eBPF, especially when eIBRS is used (without retpolines).
++For a full mitigation against BHB attacks, it's recommended to use
++retpolines (or eIBRS combined with retpolines).
++
++Attack scenarios
++----------------
++
++The following list of attack scenarios have been anticipated, but may
++not cover all possible attack vectors.
++
++1. A user process attacking the kernel
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
++   The attacker passes a parameter to the kernel via a register or
++   via a known address in memory during a syscall. Such parameter may
++   be used later by the kernel as an index to an array or to derive
++   a pointer for a Spectre variant 1 attack.  The index or pointer
++   is invalid, but bound checks are bypassed in the code branch taken
++   for speculative execution. This could cause privileged memory to be
++   accessed and leaked.
++
++   For kernel code that has been identified where data pointers could
++   potentially be influenced for Spectre attacks, new "nospec" accessor
++   macros are used to prevent speculative loading of data.
++
++Spectre variant 1 (swapgs)
++~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++   An attacker can train the branch predictor to speculatively skip the
++   swapgs path for an interrupt or exception.  If they initialize
++   the GS register to a user-space value, if the swapgs is speculatively
++   skipped, subsequent GS-related percpu accesses in the speculation
++   window will be done with the attacker-controlled GS value.  This
++   could cause privileged memory to be accessed and leaked.
++
++   For example:
++
++   ::
++
++     if (coming from user space)
++         swapgs
++     mov %gs:<percpu_offset>, %reg
++     mov (%reg), %reg1
++
++   When coming from user space, the CPU can speculatively skip the
++   swapgs, and then do a speculative percpu load using the user GS
++   value.  So the user can speculatively force a read of any kernel
++   value.  If a gadget exists which uses the percpu value as an address
++   in another load/store, then the contents of the kernel value may
++   become visible via an L1 side channel attack.
++
++   A similar attack exists when coming from kernel space.  The CPU can
++   speculatively do the swapgs, causing the user GS to get used for the
++   rest of the speculative window.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
++   A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
++   target buffer (BTB) before issuing syscall to launch an attack.
++   After entering the kernel, the kernel could use the poisoned branch
++   target buffer on indirect jump and jump to gadget code in speculative
++   execution.
++
++   If an attacker tries to control the memory addresses leaked during
++   speculative execution, he would also need to pass a parameter to the
++   gadget, either through a register or a known address in memory. After
++   the gadget has executed, he can measure the side effect.
++
++   The kernel can protect itself against consuming poisoned branch
++   target buffer entries by using return trampolines (also known as
++   "retpoline") :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` for all
++   indirect branches. Return trampolines trap speculative execution paths
++   to prevent jumping to gadget code during speculative execution.
++   x86 CPUs with Enhanced Indirect Branch Restricted Speculation
++   (Enhanced IBRS) available in hardware should use the feature to
++   mitigate Spectre variant 2 instead of retpoline. Enhanced IBRS is
++   more efficient than retpoline.
++
++   There may be gadget code in firmware which could be exploited with
++   Spectre variant 2 attack by a rogue user process. To mitigate such
++   attacks on x86, Indirect Branch Restricted Speculation (IBRS) feature
++   is turned on before the kernel invokes any firmware code.
++
++2. A user process attacking another user process
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   A malicious user process can try to attack another user process,
++   either via a context switch on the same hardware thread, or from the
++   sibling hyperthread sharing a physical processor core on simultaneous
++   multi-threading (SMT) system.
++
++   Spectre variant 1 attacks generally require passing parameters
++   between the processes, which needs a data passing relationship, such
++   as remote procedure calls (RPC).  Those parameters are used in gadget
++   code to derive invalid data pointers accessing privileged memory in
++   the attacked process.
++
++   Spectre variant 2 attacks can be launched from a rogue process by
++   :ref:`poisoning <poison_btb>` the branch target buffer.  This can
++   influence the indirect branch targets for a victim process that either
++   runs later on the same hardware thread, or running concurrently on
++   a sibling hardware thread sharing the same physical core.
++
++   A user process can protect itself against Spectre variant 2 attacks
++   by using the prctl() syscall to disable indirect branch speculation
++   for itself.  An administrator can also cordon off an unsafe process
++   from polluting the branch target buffer by disabling the process's
++   indirect branch speculation. This comes with a performance cost
++   from not using indirect branch speculation and clearing the branch
++   target buffer.  When SMT is enabled on x86, for a process that has
++   indirect branch speculation disabled, Single Threaded Indirect Branch
++   Predictors (STIBP) :ref:`[4] <spec_ref4>` are turned on to prevent the
++   sibling thread from controlling branch target buffer.  In addition,
++   the Indirect Branch Prediction Barrier (IBPB) is issued to clear the
++   branch target buffer when context switching to and from such process.
++
++   On x86, the return stack buffer is stuffed on context switch.
++   This prevents the branch target buffer from being used for branch
++   prediction when the return stack buffer underflows while switching to
++   a deeper call stack. Any poisoned entries in the return stack buffer
++   left by the previous process will also be cleared.
++
++   User programs should use address space randomization to make attacks
++   more difficult (Set /proc/sys/kernel/randomize_va_space = 1 or 2).
++
++3. A virtualized guest attacking the host
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   The attack mechanism is similar to how user processes attack the
++   kernel.  The kernel is entered via hyper-calls or other virtualization
++   exit paths.
++
++   For Spectre variant 1 attacks, rogue guests can pass parameters
++   (e.g. in registers) via hyper-calls to derive invalid pointers to
++   speculate into privileged memory after entering the kernel.  For places
++   where such kernel code has been identified, nospec accessor macros
++   are used to stop speculative memory access.
++
++   For Spectre variant 2 attacks, rogue guests can :ref:`poison
++   <poison_btb>` the branch target buffer or return stack buffer, causing
++   the kernel to jump to gadget code in the speculative execution paths.
++
++   To mitigate variant 2, the host kernel can use return trampolines
++   for indirect branches to bypass the poisoned branch target buffer,
++   and flushing the return stack buffer on VM exit.  This prevents rogue
++   guests from affecting indirect branching in the host kernel.
++
++   To protect host processes from rogue guests, host processes can have
++   indirect branch speculation disabled via prctl().  The branch target
++   buffer is cleared before context switching to such processes.
++
++4. A virtualized guest attacking other guest
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   A rogue guest may attack another guest to get data accessible by the
++   other guest.
++
++   Spectre variant 1 attacks are possible if parameters can be passed
++   between guests.  This may be done via mechanisms such as shared memory
++   or message passing.  Such parameters could be used to derive data
++   pointers to privileged data in guest.  The privileged data could be
++   accessed by gadget code in the victim's speculation paths.
++
++   Spectre variant 2 attacks can be launched from a rogue guest by
++   :ref:`poisoning <poison_btb>` the branch target buffer or the return
++   stack buffer. Such poisoned entries could be used to influence
++   speculation execution paths in the victim guest.
++
++   Linux kernel mitigates attacks to other guests running in the same
++   CPU hardware thread by flushing the return stack buffer on VM exit,
++   and clearing the branch target buffer before switching to a new guest.
++
++   If SMT is used, Spectre variant 2 attacks from an untrusted guest
++   in the sibling hyperthread can be mitigated by the administrator,
++   by turning off the unsafe guest's indirect branch speculation via
++   prctl().  A guest can also protect itself by turning on microcode
++   based mitigations (such as IBPB or STIBP on x86) within the guest.
++
++.. _spectre_sys_info:
++
++Spectre system information
++--------------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current
++mitigation status of the system for Spectre: whether the system is
++vulnerable, and which mitigations are active.
++
++The sysfs file showing Spectre variant 1 mitigation status is:
++
++   /sys/devices/system/cpu/vulnerabilities/spectre_v1
++
++The possible values in this file are:
++
++  .. list-table::
++
++     * - 'Not affected'
++       - The processor is not vulnerable.
++     * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; 
no swapgs barriers'
++       - The swapgs protections are disabled; otherwise it has
++         protection in the kernel on a case by case base with explicit
++         pointer sanitation and usercopy LFENCE barriers.
++     * - 'Mitigation: usercopy/swapgs barriers and __user pointer 
sanitization'
++       - Protection in the kernel on a case by case base with explicit
++         pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
++         barriers.
++
++However, the protections are put in place on a case by case basis,
++and there is no guarantee that all possible attack vectors for Spectre
++variant 1 are covered.
++
++The spectre_v2 kernel file reports if the kernel has been compiled with
++retpoline mitigation or if the CPU has hardware mitigation, and if the
++CPU has support for additional process-specific mitigation.
++
++This file also reports CPU features enabled by microcode to mitigate
++attack between user processes:
++
++1. Indirect Branch Prediction Barrier (IBPB) to add additional
++   isolation between processes of different users.
++2. Single Thread Indirect Branch Predictors (STIBP) to add additional
++   isolation between CPU threads running on the same core.
++
++These CPU features may impact performance when used and can be enabled
++per process on a case-by-case base.
++
++The sysfs file showing Spectre variant 2 mitigation status is:
++
++   /sys/devices/system/cpu/vulnerabilities/spectre_v2
++
++The possible values in this file are:
++
++  - Kernel status:
++
++  ========================================  =================================
++  'Not affected'                            The processor is not vulnerable
++  'Mitigation: None'                        Vulnerable, no mitigation
++  'Mitigation: Retpolines'                  Use Retpoline thunks
++  'Mitigation: LFENCE'                      Use LFENCE instructions
++  'Mitigation: Enhanced IBRS'               Hardware-focused mitigation
++  'Mitigation: Enhanced IBRS + Retpolines'  Hardware-focused + Retpolines
++  'Mitigation: Enhanced IBRS + LFENCE'      Hardware-focused + LFENCE
++  ========================================  =================================
++
++  - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
++    used to protect against Spectre variant 2 attacks when calling firmware 
(x86 only).
++
++  ========== =============================================================
++  'IBRS_FW'  Protection against user program attacks when calling firmware
++  ========== =============================================================
++
++  - Indirect branch prediction barrier (IBPB) status for protection between
++    processes of different users. This feature can be controlled through
++    prctl() per process, or through kernel command line options. This is
++    an x86 only feature. For more details see below.
++
++  ===================   
========================================================
++  'IBPB: disabled'      IBPB unused
++  'IBPB: always-on'     Use IBPB on all tasks
++  'IBPB: conditional'   Use IBPB on SECCOMP or indirect branch restricted 
tasks
++  ===================   
========================================================
++
++  - Single threaded indirect branch prediction (STIBP) status for protection
++    between different hyper threads. This feature can be controlled through
++    prctl per process, or through kernel command line options. This is x86
++    only feature. For more details see below.
++
++  ====================  
========================================================
++  'STIBP: disabled'     STIBP unused
++  'STIBP: forced'       Use STIBP on all tasks
++  'STIBP: conditional'  Use STIBP on SECCOMP or indirect branch restricted 
tasks
++  ====================  
========================================================
++
++  - Return stack buffer (RSB) protection status:
++
++  =============   ===========================================
++  'RSB filling'   Protection of RSB on context switch enabled
++  =============   ===========================================
++
++Full mitigation might require a microcode update from the CPU
++vendor. When the necessary microcode is not available, the kernel will
++report vulnerability.
++
++Turning on mitigation for Spectre variant 1 and Spectre variant 2
++-----------------------------------------------------------------
++
++1. Kernel mitigation
++^^^^^^^^^^^^^^^^^^^^
++
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
++   For the Spectre variant 1, vulnerable kernel code (as determined
++   by code audit or scanning tools) is annotated on a case by case
++   basis to use nospec accessor macros for bounds clipping :ref:`[2]
++   <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
++   not cover all attack vectors for Spectre variant 1.
++
++   Copy-from-user code has an LFENCE barrier to prevent the access_ok()
++   check from being mis-speculated.  The barrier is done by the
++   barrier_nospec() macro.
++
++   For the swapgs variant of Spectre variant 1, LFENCE barriers are
++   added to interrupt, exception and NMI entry where needed.  These
++   barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
++   FENCE_SWAPGS_USER_ENTRY macros.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
++   For Spectre variant 2 mitigation, the compiler turns indirect calls or
++   jumps in the kernel into equivalent return trampolines (retpolines)
++   :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
++   addresses.  Speculative execution paths under retpolines are trapped
++   in an infinite loop to prevent any speculative execution jumping to
++   a gadget.
++
++   To turn on retpoline mitigation on a vulnerable CPU, the kernel
++   needs to be compiled with a gcc compiler that supports the
++   -mindirect-branch=thunk-extern -mindirect-branch-register options.
++   If the kernel is compiled with a Clang compiler, the compiler needs
++   to support -mretpoline-external-thunk option.  The kernel config
++   CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with
++   the latest updated microcode.
++
++   On Intel Skylake-era systems the mitigation covers most, but not all,
++   cases. See :ref:`[3] <spec_ref3>` for more details.
++
++   On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
++   IBRS on x86), retpoline is automatically disabled at run time.
++
++   The retpoline mitigation is turned on by default on vulnerable
++   CPUs. It can be forced on or off by the administrator
++   via the kernel command line and sysfs control files. See
++   :ref:`spectre_mitigation_control_command_line`.
++
++   On x86, indirect branch restricted speculation is turned on by default
++   before invoking any firmware code to prevent Spectre variant 2 exploits
++   using the firmware.
++
++   Using kernel address space randomization (CONFIG_RANDOMIZE_BASE=y
++   and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes
++   attacks on the kernel generally more difficult.
++
++2. User program mitigation
++^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   User programs can mitigate Spectre variant 1 using LFENCE or "bounds
++   clipping". For more details see :ref:`[2] <spec_ref2>`.
++
++   For Spectre variant 2 mitigation, individual user programs
++   can be compiled with return trampolines for indirect branches.
++   This protects them from consuming poisoned entries in the branch
++   target buffer left by malicious software.  Alternatively, the
++   programs can disable their indirect branch speculation via prctl()
++   (See Documentation/spec_ctrl.txt).
++   On x86, this will turn on STIBP to guard against attacks from the
++   sibling thread when the user program is running, and use IBPB to
++   flush the branch target buffer when switching to/from the program.
++
++   Restricting indirect branch speculation on a user program will
++   also prevent the program from launching a variant 2 attack
++   on x86.  All sand-boxed SECCOMP programs have indirect branch
++   speculation restricted by default.  Administrators can change
++   that behavior via the kernel command line and sysfs control files.
++   See :ref:`spectre_mitigation_control_command_line`.
++
++   Programs that disable their indirect branch speculation will have
++   more overhead and run slower.
++
++   User programs should use address space randomization
++   (/proc/sys/kernel/randomize_va_space = 1 or 2) to make attacks more
++   difficult.
++
++3. VM mitigation
++^^^^^^^^^^^^^^^^
++
++   Within the kernel, Spectre variant 1 attacks from rogue guests are
++   mitigated on a case by case basis in VM exit paths. Vulnerable code
++   uses nospec accessor macros for "bounds clipping", to avoid any
++   usable disclosure gadgets.  However, this may not cover all variant
++   1 attack vectors.
++
++   For Spectre variant 2 attacks from rogue guests to the kernel, the
++   Linux kernel uses retpoline or Enhanced IBRS to prevent consumption of
++   poisoned entries in branch target buffer left by rogue guests.  It also
++   flushes the return stack buffer on every VM exit to prevent a return
++   stack buffer underflow so poisoned branch target buffer could be used,
++   or attacker guests leaving poisoned entries in the return stack buffer.
++
++   To mitigate guest-to-guest attacks in the same CPU hardware thread,
++   the branch target buffer is sanitized by flushing before switching
++   to a new guest on a CPU.
++
++   The above mitigations are turned on by default on vulnerable CPUs.
++
++   To mitigate guest-to-guest attacks from sibling thread when SMT is
++   in use, an untrusted guest running in the sibling thread can have
++   its indirect branch speculation disabled by administrator via prctl().
++
++   The kernel also allows guests to use any microcode based mitigation
++   they choose to use (such as IBPB or STIBP on x86) to protect themselves.
++
++.. _spectre_mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++Spectre variant 2 mitigation can be disabled or force enabled at the
++kernel command line.
++
++      nospectre_v1
++
++              [X86,PPC] Disable mitigations for Spectre Variant 1
++              (bounds check bypass). With this option data leaks are
++              possible in the system.
++
++      nospectre_v2
++
++              [X86] Disable all mitigations for the Spectre variant 2
++              (indirect branch prediction) vulnerability. System may
++              allow data leaks with this option, which is equivalent
++              to spectre_v2=off.
++
++
++        spectre_v2=
++
++              [X86] Control mitigation of Spectre variant 2
++              (indirect branch speculation) vulnerability.
++              The default operation protects the kernel from
++              user space attacks.
++
++              on
++                      unconditionally enable, implies
++                      spectre_v2_user=on
++              off
++                      unconditionally disable, implies
++                      spectre_v2_user=off
++              auto
++                      kernel detects whether your CPU model is
++                      vulnerable
++
++              Selecting 'on' will, and 'auto' may, choose a
++              mitigation method at run time according to the
++              CPU, the available microcode, the setting of the
++              CONFIG_RETPOLINE configuration option, and the
++              compiler with which the kernel was built.
++
++              Selecting 'on' will also enable the mitigation
++              against user space to user space task attacks.
++
++              Selecting 'off' will disable both the kernel and
++              the user space protections.
++
++              Specific mitigations can also be selected manually:
++
++                retpoline               auto pick between generic,lfence
++                retpoline,generic       Retpolines
++                retpoline,lfence        LFENCE; indirect branch
++                retpoline,amd           alias for retpoline,lfence
++                eibrs                   enhanced IBRS
++                eibrs,retpoline         enhanced IBRS + Retpolines
++                eibrs,lfence            enhanced IBRS + LFENCE
++
++              Not specifying this option is equivalent to
++              spectre_v2=auto.
++
++For user space mitigation:
++
++        spectre_v2_user=
++
++              [X86] Control mitigation of Spectre variant 2
++              (indirect branch speculation) vulnerability between
++              user space tasks
++
++              on
++                      Unconditionally enable mitigations. Is
++                      enforced by spectre_v2=on
++
++              off
++                      Unconditionally disable mitigations. Is
++                      enforced by spectre_v2=off
++
++              prctl
++                      Indirect branch speculation is enabled,
++                      but mitigation can be enabled via prctl
++                      per thread. The mitigation control state
++                      is inherited on fork.
++
++              prctl,ibpb
++                      Like "prctl" above, but only STIBP is
++                      controlled per thread. IBPB is issued
++                      always when switching between different user
++                      space processes.
++
++              seccomp
++                      Same as "prctl" above, but all seccomp
++                      threads will enable the mitigation unless
++                      they explicitly opt out.
++
++              seccomp,ibpb
++                      Like "seccomp" above, but only STIBP is
++                      controlled per thread. IBPB is issued
++                      always when switching between different
++                      user space processes.
++
++              auto
++                      Kernel selects the mitigation depending on
++                      the available CPU features and vulnerability.
++
++              Default mitigation:
++              If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
++
++              Not specifying this option is equivalent to
++              spectre_v2_user=auto.
++
++              In general the kernel by default selects
++              reasonable mitigations for the current CPU. To
++              disable Spectre variant 2 mitigations, boot with
++              spectre_v2=off. Spectre variant 1 mitigations
++              cannot be disabled.
++
++Mitigation selection guide
++--------------------------
++
++1. Trusted userspace
++^^^^^^^^^^^^^^^^^^^^
++
++   If all userspace applications are from trusted sources and do not
++   execute externally supplied untrusted code, then the mitigations can
++   be disabled.
++
++2. Protect sensitive programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   For security-sensitive programs that have secrets (e.g. crypto
++   keys), protection against Spectre variant 2 can be put in place by
++   disabling indirect branch speculation when the program is running
++   (See Documentation/spec_ctrl.txt).
++
++3. Sandbox untrusted programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++   Untrusted programs that could be a source of attacks can be cordoned
++   off by disabling their indirect branch speculation when they are run
++   (See Documentation/spec_ctrl.txt).
++   This prevents untrusted programs from polluting the branch target
++   buffer.  All programs running in SECCOMP sandboxes have indirect
++   branch speculation restricted by default. This behavior can be
++   changed via the kernel command line and sysfs control files. See
++   :ref:`spectre_mitigation_control_command_line`.
++
++3. High security mode
++^^^^^^^^^^^^^^^^^^^^^
++
++   All Spectre variant 2 mitigations can be forced on
++   at boot time for all programs (See the "on" option in
++   :ref:`spectre_mitigation_control_command_line`).  This will add
++   overhead as indirect branch speculations for all programs will be
++   restricted.
++
++   On x86, branch target buffer will be flushed with IBPB when switching
++   to a new program. STIBP is left on all the time to protect programs
++   against variant 2 attacks originating from programs running on
++   sibling threads.
++
++   Alternatively, STIBP can be used only when running programs
++   whose indirect branch speculation is explicitly disabled,
++   while IBPB is still used all the time when switching to a new
++   program to clear the branch target buffer (See "ibpb" option in
++   :ref:`spectre_mitigation_control_command_line`).  This "ibpb" option
++   has less performance cost than the "on" option, which leaves STIBP
++   on all the time.
++
++References on Spectre
++---------------------
++
++Intel white papers:
++
++.. _spec_ref1:
++
++[1] `Intel analysis of speculative execution side channels 
<https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/Intel-Analysis-of-Speculative-Execution-Side-Channels.pdf>`_.
++
++.. _spec_ref2:
++
++[2] `Bounds check bypass 
<https://software.intel.com/security-software-guidance/software-guidance/bounds-check-bypass>`_.
++
++.. _spec_ref3:
++
++[3] `Deep dive: Retpoline: A branch target injection mitigation 
<https://software.intel.com/security-software-guidance/insights/deep-dive-retpoline-branch-target-injection-mitigation>`_.
++
++.. _spec_ref4:
++
++[4] `Deep Dive: Single Thread Indirect Branch Predictors 
<https://software.intel.com/security-software-guidance/insights/deep-dive-single-thread-indirect-branch-predictors>`_.
++
++AMD white papers:
++
++.. _spec_ref5:
++
++[5] `AMD64 technology indirect branch control extension 
<https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf>`_.
++
++.. _spec_ref6:
++
++[6] `Software techniques for managing speculation on AMD processors 
<https://developer.amd.com/wp-content/resources/Managing-Speculation-on-AMD-Processors.pdf>`_.
++
++ARM white papers:
++
++.. _spec_ref7:
++
++[7] `Cache speculation side-channels 
<https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/download-the-whitepaper>`_.
++
++.. _spec_ref8:
++
++[8] `Cache speculation issues update 
<https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/latest-updates/cache-speculation-issues-update>`_.
++
++Google white paper:
++
++.. _spec_ref9:
++
++[9] `Retpoline: a software construct for preventing branch-target-injection 
<https://support.google.com/faqs/answer/7625886>`_.
++
++MIPS white paper:
++
++.. _spec_ref10:
++
++[10] `MIPS: response on speculative execution and side channel 
vulnerabilities 
<https://www.mips.com/blog/mips-response-on-speculative-execution-and-side-channel-vulnerabilities/>`_.
++
++Academic papers:
++
++.. _spec_ref11:
++
++[11] `Spectre Attacks: Exploiting Speculative Execution 
<https://spectreattack.com/spectre.pdf>`_.
++
++.. _spec_ref12:
++
++[12] `NetSpectre: Read Arbitrary Memory over Network 
<https://arxiv.org/abs/1807.10535>`_.
++
++.. _spec_ref13:
++
++[13] `Spectre Returns! Speculation Attacks using the Return Stack Buffer 
<https://www.usenix.org/system/files/conference/woot18/woot18-paper-koruyeh.pdf>`_.
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 713765521c451..6c0957c67d207 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -4174,8 +4174,12 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+                       Specific mitigations can also be selected manually:
+ 
+                       retpoline         - replace indirect branches
+-                      retpoline,generic - google's original retpoline
+-                      retpoline,amd     - AMD-specific minimal thunk
++                      retpoline,generic - Retpolines
++                      retpoline,lfence  - LFENCE; indirect branch
++                      retpoline,amd     - alias for retpoline,lfence
++                      eibrs             - enhanced IBRS
++                      eibrs,retpoline   - enhanced IBRS + Retpolines
++                      eibrs,lfence      - enhanced IBRS + LFENCE
+ 
+                       Not specifying this option is equivalent to
+                       spectre_v2=auto.
+diff --git a/Makefile b/Makefile
+index 308c848b01dc2..482b841188572 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 305
++SUBLEVEL = 306
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/include/asm/assembler.h 
b/arch/arm/include/asm/assembler.h
+index 7d727506096f6..2fa3fd30a9d61 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -108,6 +108,16 @@
+       .endm
+ #endif
+ 
++#if __LINUX_ARM_ARCH__ < 7
++      .macro  dsb, args
++      mcr     p15, 0, r0, c7, c10, 4
++      .endm
++
++      .macro  isb, args
++      mcr     p15, 0, r0, c7, c5, 4
++      .endm
++#endif
++
+       .macro asm_trace_hardirqs_off, save=1
+ #if defined(CONFIG_TRACE_IRQFLAGS)
+       .if \save
+diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h
+new file mode 100644
+index 0000000000000..d1fa5607d3aa3
+--- /dev/null
++++ b/arch/arm/include/asm/spectre.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef __ASM_SPECTRE_H
++#define __ASM_SPECTRE_H
++
++enum {
++      SPECTRE_UNAFFECTED,
++      SPECTRE_MITIGATED,
++      SPECTRE_VULNERABLE,
++};
++
++enum {
++      __SPECTRE_V2_METHOD_BPIALL,
++      __SPECTRE_V2_METHOD_ICIALLU,
++      __SPECTRE_V2_METHOD_SMC,
++      __SPECTRE_V2_METHOD_HVC,
++      __SPECTRE_V2_METHOD_LOOP8,
++};
++
++enum {
++      SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL),
++      SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU),
++      SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC),
++      SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC),
++      SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8),
++};
++
++void spectre_v2_update_state(unsigned int state, unsigned int methods);
++
++int spectre_bhb_update_vectors(unsigned int method);
++
++#endif
+diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
+index 9bddd762880cf..1738d5b61eaa1 100644
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -100,4 +100,6 @@ endif
+ 
+ obj-$(CONFIG_HAVE_ARM_SMCCC)  += smccc-call.o
+ 
++obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
++
+ extra-y := $(head-y) vmlinux.lds
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 2cac25a69a85d..1040efcb98db6 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1036,12 +1036,11 @@ vector_\name:
+       sub     lr, lr, #\correction
+       .endif
+ 
+-      @
+-      @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
+-      @ (parent CPSR)
+-      @
++      @ Save r0, lr_<exception> (parent PC)
+       stmia   sp, {r0, lr}            @ save r0, lr
+-      mrs     lr, spsr
++
++      @ Save spsr_<exception> (parent CPSR)
++2:    mrs     lr, spsr
+       str     lr, [sp, #8]            @ save spsr
+ 
+       @
+@@ -1062,6 +1061,44 @@ vector_\name:
+       movs    pc, lr                  @ branch to handler in SVC mode
+ ENDPROC(vector_\name)
+ 
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .subsection 1
++      .align 5
++vector_bhb_loop8_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mov     r0, #8
++1:    b       . + 4
++      subs    r0, r0, #1
++      bne     1b
++      dsb
++      isb
++      b       2b
++ENDPROC(vector_bhb_loop8_\name)
++
++vector_bhb_bpiall_\name:
++      .if \correction
++      sub     lr, lr, #\correction
++      .endif
++
++      @ Save r0, lr_<exception> (parent PC)
++      stmia   sp, {r0, lr}
++
++      @ bhb workaround
++      mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
++      @ isb not needed due to "movs pc, lr" in the vector stub
++      @ which gives a "context synchronisation".
++      b       2b
++ENDPROC(vector_bhb_bpiall_\name)
++      .previous
++#endif
++
+       .align  2
+       @ handler addresses follow this label
+ 1:
+@@ -1070,6 +1107,10 @@ ENDPROC(vector_\name)
+       .section .stubs, "ax", %progbits
+       @ This must be the first word
+       .word   vector_swi
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .word   vector_bhb_loop8_swi
++      .word   vector_bhb_bpiall_swi
++#endif
+ 
+ vector_rst:
+  ARM( swi     SYS_ERROR0      )
+@@ -1184,8 +1225,10 @@ vector_addrexcptn:
+  * FIQ "NMI" handler
+  
*-----------------------------------------------------------------------------
+  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
+- * systems.
++ * systems. This must be the last vector stub, so lets place it in its own
++ * subsection.
+  */
++      .subsection 2
+       vector_stub     fiq, FIQ_MODE, 4
+ 
+       .long   __fiq_usr                       @  0  (USR_26 / USR_32)
+@@ -1218,6 +1261,30 @@ vector_addrexcptn:
+       W(b)    vector_irq
+       W(b)    vector_fiq
+ 
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++      .section .vectors.bhb.loop8, "ax", %progbits
++.L__vectors_bhb_loop8_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_loop8_und
++      W(ldr)  pc, .L__vectors_bhb_loop8_start + 0x1004
++      W(b)    vector_bhb_loop8_pabt
++      W(b)    vector_bhb_loop8_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_loop8_irq
++      W(b)    vector_bhb_loop8_fiq
++
++      .section .vectors.bhb.bpiall, "ax", %progbits
++.L__vectors_bhb_bpiall_start:
++      W(b)    vector_rst
++      W(b)    vector_bhb_bpiall_und
++      W(ldr)  pc, .L__vectors_bhb_bpiall_start + 0x1008
++      W(b)    vector_bhb_bpiall_pabt
++      W(b)    vector_bhb_bpiall_dabt
++      W(b)    vector_addrexcptn
++      W(b)    vector_bhb_bpiall_irq
++      W(b)    vector_bhb_bpiall_fiq
++#endif
++
+       .data
+ 
+       .globl  cr_alignment
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 178a2a9606595..fb0f505c9924f 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -142,6 +142,29 @@ ENDPROC(ret_from_fork)
+  
*-----------------------------------------------------------------------------
+  */
+ 
++      .align  5
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++ENTRY(vector_bhb_loop8_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mov     r8, #8
++1:    b       2f
++2:    subs    r8, r8, #1
++      bne     1b
++      dsb
++      isb
++      b       3f
++ENDPROC(vector_bhb_loop8_swi)
++
++      .align  5
++ENTRY(vector_bhb_bpiall_swi)
++      sub     sp, sp, #PT_REGS_SIZE
++      stmia   sp, {r0 - r12}
++      mcr     p15, 0, r8, c7, c5, 6   @ BPIALL
++      isb
++      b       3f
++ENDPROC(vector_bhb_bpiall_swi)
++#endif
+       .align  5
+ ENTRY(vector_swi)
+ #ifdef CONFIG_CPU_V7M
+@@ -149,6 +172,7 @@ ENTRY(vector_swi)
+ #else
+       sub     sp, sp, #PT_REGS_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
++3:
+  ARM( add     r8, sp, #S_PC           )
+  ARM( stmdb   r8, {sp, lr}^           )       @ Calling sp, lr
+  THUMB(       mov     r8, sp                  )
+diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
+new file mode 100644
+index 0000000000000..0dcefc36fb7a0
+--- /dev/null
++++ b/arch/arm/kernel/spectre.c
+@@ -0,0 +1,71 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/bpf.h>
++#include <linux/cpu.h>
++#include <linux/device.h>
++
++#include <asm/spectre.h>
++
++static bool _unprivileged_ebpf_enabled(void)
++{
++#ifdef CONFIG_BPF_SYSCALL
++      return !sysctl_unprivileged_bpf_disabled;
++#else
++      return false;
++#endif
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++static unsigned int spectre_v2_state;
++static unsigned int spectre_v2_methods;
++
++void spectre_v2_update_state(unsigned int state, unsigned int method)
++{
++      if (state > spectre_v2_state)
++              spectre_v2_state = state;
++      spectre_v2_methods |= method;
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
++                          char *buf)
++{
++      const char *method;
++
++      if (spectre_v2_state == SPECTRE_UNAFFECTED)
++              return sprintf(buf, "%s\n", "Not affected");
++
++      if (spectre_v2_state != SPECTRE_MITIGATED)
++              return sprintf(buf, "%s\n", "Vulnerable");
++
++      if (_unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
++
++      switch (spectre_v2_methods) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              method = "Branch predictor hardening";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              method = "I-cache invalidation";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++      case SPECTRE_V2_METHOD_HVC:
++              method = "Firmware call";
++              break;
++
++      case SPECTRE_V2_METHOD_LOOP8:
++              method = "History overwrite";
++              break;
++
++      default:
++              method = "Multiple mitigations";
++              break;
++      }
++
++      return sprintf(buf, "Mitigation: %s\n", method);
++}
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index aa316a7562b1f..7fca7ece8f979 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -31,6 +31,7 @@
+ #include <linux/atomic.h>
+ #include <asm/cacheflush.h>
+ #include <asm/exception.h>
++#include <asm/spectre.h>
+ #include <asm/unistd.h>
+ #include <asm/traps.h>
+ #include <asm/ptrace.h>
+@@ -819,10 +820,59 @@ static inline void __init kuser_init(void *vectors)
+ }
+ #endif
+ 
++#ifndef CONFIG_CPU_V7M
++static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
++{
++      memcpy(vma, lma_start, lma_end - lma_start);
++}
++
++static void flush_vectors(void *vma, size_t offset, size_t size)
++{
++      unsigned long start = (unsigned long)vma + offset;
++      unsigned long end = start + size;
++
++      flush_icache_range(start, end);
++}
++
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++int spectre_bhb_update_vectors(unsigned int method)
++{
++      extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
++      extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
++      void *vec_start, *vec_end;
++
++      if (system_state >= SYSTEM_RUNNING) {
++              pr_err("CPU%u: Spectre BHB workaround too late - system 
vulnerable\n",
++                     smp_processor_id());
++              return SPECTRE_VULNERABLE;
++      }
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              vec_start = __vectors_bhb_loop8_start;
++              vec_end = __vectors_bhb_loop8_end;
++              break;
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              vec_start = __vectors_bhb_bpiall_start;
++              vec_end = __vectors_bhb_bpiall_end;
++              break;
++
++      default:
++              pr_err("CPU%u: unknown Spectre BHB state %d\n",
++                     smp_processor_id(), method);
++              return SPECTRE_VULNERABLE;
++      }
++
++      copy_from_lma(vectors_page, vec_start, vec_end);
++      flush_vectors(vectors_page, 0, vec_end - vec_start);
++
++      return SPECTRE_MITIGATED;
++}
++#endif
++
+ void __init early_trap_init(void *vectors_base)
+ {
+-#ifndef CONFIG_CPU_V7M
+-      unsigned long vectors = (unsigned long)vectors_base;
+       extern char __stubs_start[], __stubs_end[];
+       extern char __vectors_start[], __vectors_end[];
+       unsigned i;
+@@ -843,17 +893,20 @@ void __init early_trap_init(void *vectors_base)
+        * into the vector page, mapped at 0xffff0000, and ensure these
+        * are visible to the instruction stream.
+        */
+-      memcpy((void *)vectors, __vectors_start, __vectors_end - 
__vectors_start);
+-      memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - 
__stubs_start);
++      copy_from_lma(vectors_base, __vectors_start, __vectors_end);
++      copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
+ 
+       kuser_init(vectors_base);
+ 
+-      flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
++      flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
++}
+ #else /* ifndef CONFIG_CPU_V7M */
++void __init early_trap_init(void *vectors_base)
++{
+       /*
+        * on V7-M there is no need to copy the vector table to a dedicated
+        * memory area. The address is configurable and so a table in the kernel
+        * image can be used.
+        */
+-#endif
+ }
++#endif
+diff --git a/arch/arm/kernel/vmlinux-xip.lds.S 
b/arch/arm/kernel/vmlinux-xip.lds.S
+index 37b2a11af3459..d80ef8c2bb461 100644
+--- a/arch/arm/kernel/vmlinux-xip.lds.S
++++ b/arch/arm/kernel/vmlinux-xip.lds.S
+@@ -12,6 +12,19 @@
+ #include <asm/memory.h>
+ #include <asm/page.h>
+ 
++/*
++ * ld.lld does not support NOCROSSREFS:
++ * https://github.com/ClangBuiltLinux/linux/issues/1609
++ */
++#ifdef CONFIG_LD_IS_LLD
++#define NOCROSSREFS
++#endif
++
++/* Set start/end symbol names to the LMA for the section */
++#define ARM_LMA(sym, section)                                         \
++      sym##_start = LOADADDR(section);                                \
++      sym##_end = LOADADDR(section) + SIZEOF(section)
++
+ #define PROC_INFO                                                     \
+       . = ALIGN(4);                                                   \
+       VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
+@@ -148,19 +161,31 @@ SECTIONS
+        * The vectors and stubs are relocatable code, and the
+        * only thing that matters is their relative offsets
+        */
+-      __vectors_start = .;
+-      .vectors 0xffff0000 : AT(__vectors_start) {
+-              *(.vectors)
++      __vectors_lma = .;
++      OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {
++              .vectors {
++                      *(.vectors)
++              }
++              .vectors.bhb.loop8 {
++                      *(.vectors.bhb.loop8)
++              }
++              .vectors.bhb.bpiall {
++                      *(.vectors.bhb.bpiall)
++              }
+       }
+-      . = __vectors_start + SIZEOF(.vectors);
+-      __vectors_end = .;
+-
+-      __stubs_start = .;
+-      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
++      ARM_LMA(__vectors, .vectors);
++      ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);
++      ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);
++      . = __vectors_lma + SIZEOF(.vectors) +
++              SIZEOF(.vectors.bhb.loop8) +
++              SIZEOF(.vectors.bhb.bpiall);
++
++      __stubs_lma = .;
++      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {
+               *(.stubs)
+       }
+-      . = __stubs_start + SIZEOF(.stubs);
+-      __stubs_end = .;
++      ARM_LMA(__stubs, .stubs);
++      . = __stubs_lma + SIZEOF(.stubs);
+ 
+       PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+ 
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index f7f55df0bf7b3..0d560a24408f0 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -14,6 +14,19 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ 
++/*
++ * ld.lld does not support NOCROSSREFS:
++ * https://github.com/ClangBuiltLinux/linux/issues/1609
++ */
++#ifdef CONFIG_LD_IS_LLD
++#define NOCROSSREFS
++#endif
++
++/* Set start/end symbol names to the LMA for the section */
++#define ARM_LMA(sym, section)                                         \
++      sym##_start = LOADADDR(section);                                \
++      sym##_end = LOADADDR(section) + SIZEOF(section)
++
+ #define PROC_INFO                                                     \
+       . = ALIGN(4);                                                   \
+       VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
+@@ -169,19 +182,31 @@ SECTIONS
+        * The vectors and stubs are relocatable code, and the
+        * only thing that matters is their relative offsets
+        */
+-      __vectors_start = .;
+-      .vectors 0xffff0000 : AT(__vectors_start) {
+-              *(.vectors)
++      __vectors_lma = .;
++      OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {
++              .vectors {
++                      *(.vectors)
++              }
++              .vectors.bhb.loop8 {
++                      *(.vectors.bhb.loop8)
++              }
++              .vectors.bhb.bpiall {
++                      *(.vectors.bhb.bpiall)
++              }
+       }
+-      . = __vectors_start + SIZEOF(.vectors);
+-      __vectors_end = .;
+-
+-      __stubs_start = .;
+-      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
++      ARM_LMA(__vectors, .vectors);
++      ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);
++      ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);
++      . = __vectors_lma + SIZEOF(.vectors) +
++              SIZEOF(.vectors.bhb.loop8) +
++              SIZEOF(.vectors.bhb.bpiall);
++
++      __stubs_lma = .;
++      .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {
+               *(.stubs)
+       }
+-      . = __stubs_start + SIZEOF(.stubs);
+-      __stubs_end = .;
++      ARM_LMA(__stubs, .stubs);
++      . = __stubs_lma + SIZEOF(.stubs);
+ 
+       PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+ 
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 93623627a0b68..5c98074010d25 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -803,6 +803,7 @@ config CPU_BPREDICT_DISABLE
+ 
+ config CPU_SPECTRE
+       bool
++      select GENERIC_CPU_VULNERABILITIES
+ 
+ config HARDEN_BRANCH_PREDICTOR
+       bool "Harden the branch predictor against aliasing attacks" if EXPERT
+@@ -823,6 +824,16 @@ config HARDEN_BRANCH_PREDICTOR
+ 
+          If unsure, say Y.
+ 
++config HARDEN_BRANCH_HISTORY
++      bool "Harden Spectre style attacks against branch history" if EXPERT
++      depends on CPU_SPECTRE
++      default y
++      help
++        Speculation attacks against some high-performance processors can
++        make use of branch history to influence future speculation. When
++        taking an exception, a sequence of branches overwrites the branch
++        history, or branch history is invalidated.
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+index 9a07916af8dd2..1b6e770bc1cd3 100644
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -7,8 +7,36 @@
+ #include <asm/cp15.h>
+ #include <asm/cputype.h>
+ #include <asm/proc-fns.h>
++#include <asm/spectre.h>
+ #include <asm/system_misc.h>
+ 
++#ifdef CONFIG_ARM_PSCI
++#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED  1
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                           ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++
++      switch ((int)res.a0) {
++      case SMCCC_RET_SUCCESS:
++              return SPECTRE_MITIGATED;
++
++      case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
++              return SPECTRE_UNAFFECTED;
++
++      default:
++              return SPECTRE_VULNERABLE;
++      }
++}
++#else
++static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
++{
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+ 
+@@ -37,13 +65,61 @@ static void __maybe_unused call_hvc_arch_workaround_1(void)
+       arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+ 
+-static void cpu_v7_spectre_init(void)
++static unsigned int spectre_v2_install_workaround(unsigned int method)
+ {
+       const char *spectre_v2_method = NULL;
+       int cpu = smp_processor_id();
+ 
+       if (per_cpu(harden_branch_predictor_fn, cpu))
+-              return;
++              return SPECTRE_MITIGATED;
++
++      switch (method) {
++      case SPECTRE_V2_METHOD_BPIALL:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_bpiall;
++              spectre_v2_method = "BPIALL";
++              break;
++
++      case SPECTRE_V2_METHOD_ICIALLU:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_iciallu;
++              spectre_v2_method = "ICIALLU";
++              break;
++
++      case SPECTRE_V2_METHOD_HVC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_hvc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
++              spectre_v2_method = "hypervisor";
++              break;
++
++      case SPECTRE_V2_METHOD_SMC:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      call_smc_arch_workaround_1;
++              cpu_do_switch_mm = cpu_v7_smc_switch_mm;
++              spectre_v2_method = "firmware";
++              break;
++      }
++
++      if (spectre_v2_method)
++              pr_info("CPU%u: Spectre v2: using %s workaround\n",
++                      smp_processor_id(), spectre_v2_method);
++
++      return SPECTRE_MITIGATED;
++}
++#else
++static unsigned int spectre_v2_install_workaround(unsigned int method)
++{
++      pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
++              smp_processor_id());
++
++      return SPECTRE_VULNERABLE;
++}
++#endif
++
++static void cpu_v7_spectre_v2_init(void)
++{
++      unsigned int state, method = 0;
+ 
+       switch (read_cpuid_part()) {
+       case ARM_CPU_PART_CORTEX_A8:
+@@ -52,29 +128,32 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_bpiall;
+-              spectre_v2_method = "BPIALL";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
+               break;
+ 
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+-              per_cpu(harden_branch_predictor_fn, cpu) =
+-                      harden_branch_predictor_iciallu;
+-              spectre_v2_method = "ICIALLU";
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_ICIALLU;
+               break;
+ 
+-#ifdef CONFIG_ARM_PSCI
+       default:
+               /* Other ARM CPUs require no workaround */
+-              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
++                      state = SPECTRE_UNAFFECTED;
+                       break;
++              }
+               /* fallthrough */
+-              /* Cortex A57/A72 require firmware workaround */
++      /* Cortex A57/A72 require firmware workaround */
+       case ARM_CPU_PART_CORTEX_A57:
+       case ARM_CPU_PART_CORTEX_A72: {
+               struct arm_smccc_res res;
+ 
++              state = spectre_v2_get_cpu_fw_mitigation_state();
++              if (state != SPECTRE_MITIGATED)
++                      break;
++
+               if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+                       break;
+ 
+@@ -84,10 +163,7 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_hvc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+-                      spectre_v2_method = "hypervisor";
++                      method = SPECTRE_V2_METHOD_HVC;
+                       break;
+ 
+               case PSCI_CONDUIT_SMC:
+@@ -95,29 +171,97 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      per_cpu(harden_branch_predictor_fn, cpu) =
+-                              call_smc_arch_workaround_1;
+-                      cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+-                      spectre_v2_method = "firmware";
++                      method = SPECTRE_V2_METHOD_SMC;
+                       break;
+ 
+               default:
++                      state = SPECTRE_VULNERABLE;
+                       break;
+               }
+       }
+-#endif
+       }
+ 
+-      if (spectre_v2_method)
+-              pr_info("CPU%u: Spectre v2: using %s workaround\n",
+-                      smp_processor_id(), spectre_v2_method);
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_v2_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
++#ifdef CONFIG_HARDEN_BRANCH_HISTORY
++static int spectre_bhb_method;
++
++static const char *spectre_bhb_method_name(int method)
++{
++      switch (method) {
++      case SPECTRE_V2_METHOD_LOOP8:
++              return "loop";
++
++      case SPECTRE_V2_METHOD_BPIALL:
++              return "BPIALL";
++
++      default:
++              return "unknown";
++      }
++}
++
++static int spectre_bhb_install_workaround(int method)
++{
++      if (spectre_bhb_method != method) {
++              if (spectre_bhb_method) {
++                      pr_err("CPU%u: Spectre BHB: method disagreement, system 
vulnerable\n",
++                             smp_processor_id());
++
++                      return SPECTRE_VULNERABLE;
++              }
++
++              if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
++                      return SPECTRE_VULNERABLE;
++
++              spectre_bhb_method = method;
++      }
++
++      pr_info("CPU%u: Spectre BHB: using %s workaround\n",
++              smp_processor_id(), spectre_bhb_method_name(method));
++
++      return SPECTRE_MITIGATED;
+ }
+ #else
+-static void cpu_v7_spectre_init(void)
++static int spectre_bhb_install_workaround(int method)
+ {
++      return SPECTRE_VULNERABLE;
+ }
+ #endif
+ 
++static void cpu_v7_spectre_bhb_init(void)
++{
++      unsigned int state, method = 0;
++
++      switch (read_cpuid_part()) {
++      case ARM_CPU_PART_CORTEX_A15:
++      case ARM_CPU_PART_BRAHMA_B15:
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_LOOP8;
++              break;
++
++      case ARM_CPU_PART_CORTEX_A73:
++      case ARM_CPU_PART_CORTEX_A75:
++              state = SPECTRE_MITIGATED;
++              method = SPECTRE_V2_METHOD_BPIALL;
++              break;
++
++      default:
++              state = SPECTRE_UNAFFECTED;
++              break;
++      }
++
++      if (state == SPECTRE_MITIGATED)
++              state = spectre_bhb_install_workaround(method);
++
++      spectre_v2_update_state(state, method);
++}
++
+ static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+ {
+@@ -146,16 +290,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit)
+ void cpu_v7_ca8_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ 
+ void cpu_v7_ca15_ibe(void)
+ {
+       if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
+-              cpu_v7_spectre_init();
++              cpu_v7_spectre_v2_init();
+ }
+ 
+ void cpu_v7_bugs_init(void)
+ {
+-      cpu_v7_spectre_init();
++      cpu_v7_spectre_v2_init();
++      cpu_v7_spectre_bhb_init();
+ }
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 3ce5b5bd1dc45..fa202cd53b619 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -418,10 +418,6 @@ config RETPOLINE
+         branches. Requires a compiler with -mindirect-branch=thunk-extern
+         support for full protection. The kernel may run slower.
+ 
+-        Without compiler support, at least indirect branches in assembler
+-        code are eliminated. Since this includes the syscall entry path,
+-        it is not entirely pointless.
+-
+ if X86_32
+ config X86_EXTENDED_PLATFORM
+       bool "Support for extended (non-PC) x86 platforms"
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 0bc35e3e6c5cd..a77737a979c8c 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -221,9 +221,7 @@ ifdef CONFIG_RETPOLINE
+     RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+ 
+     RETPOLINE_CFLAGS += $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call 
cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+-    ifneq ($(RETPOLINE_CFLAGS),)
+-        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+-    endif
++    KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ endif
+ 
+ archscripts: scripts_basic
+@@ -239,6 +237,13 @@ archprepare:
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+       $(Q)$(MAKE) $(build)=arch/x86/purgatory 
arch/x86/purgatory/kexec-purgatory.c
+ endif
++ifdef CONFIG_RETPOLINE
++ifeq ($(RETPOLINE_CFLAGS),)
++      @echo "You are building kernel with non-retpoline compiler." >&2
++      @echo "Please update your compiler." >&2
++      @false
++endif
++endif
+ 
+ ###
+ # Kernel objects
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index 8ceb7a8a249c7..5b197248d5465 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -195,7 +195,7 @@
+ #define X86_FEATURE_FENCE_SWAPGS_USER ( 7*32+10) /* "" LFENCE in user entry 
SWAPGS path */
+ #define X86_FEATURE_FENCE_SWAPGS_KERNEL       ( 7*32+11) /* "" LFENCE in 
kernel entry SWAPGS path */
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation 
for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation 
for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_LFENCE  ( 7*32+13) /* "" Use LFENCE for Spectre 
variant 2 */
+ 
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is 
implemented */
+ #define X86_FEATURE_SSBD      ( 7*32+17) /* Speculative Store Bypass Disable 
*/
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index 204a5ce65afda..19829b00e4fed 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -119,7 +119,7 @@
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),  \
+               __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
+-              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), 
X86_FEATURE_RETPOLINE_AMD
++              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), 
X86_FEATURE_RETPOLINE_LFENCE
+ #else
+       jmp     *\reg
+ #endif
+@@ -130,7 +130,7 @@
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
+               __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+-              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), 
X86_FEATURE_RETPOLINE_AMD
++              __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), 
X86_FEATURE_RETPOLINE_LFENCE
+ #else
+       call    *\reg
+ #endif
+@@ -164,29 +164,35 @@
+       _ASM_PTR " 999b\n\t"                                    \
+       ".popsection\n\t"
+ 
+-#if defined(CONFIG_X86_64) && defined(RETPOLINE)
++#ifdef CONFIG_RETPOLINE
++#ifdef CONFIG_X86_64
+ 
+ /*
+- * Since the inline asm uses the %V modifier which is only in newer GCC,
+- * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
++ * Inline asm uses the %V modifier which is only in newer GCC
++ * which is ensured when CONFIG_RETPOLINE is defined.
+  */
+ # define CALL_NOSPEC                                          \
+       ANNOTATE_NOSPEC_ALTERNATIVE                             \
+-      ALTERNATIVE(                                            \
++      ALTERNATIVE_2(                                          \
+       ANNOTATE_RETPOLINE_SAFE                                 \
+       "call *%[thunk_target]\n",                              \
+       "call __x86_indirect_thunk_%V[thunk_target]\n",         \
+-      X86_FEATURE_RETPOLINE)
++      X86_FEATURE_RETPOLINE,                                  \
++      "lfence;\n"                                             \
++      ANNOTATE_RETPOLINE_SAFE                                 \
++      "call *%[thunk_target]\n",                              \
++      X86_FEATURE_RETPOLINE_LFENCE)
+ # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+ 
+-#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
++#else /* CONFIG_X86_32 */
+ /*
+  * For i386 we use the original ret-equivalent retpoline, because
+  * otherwise we'll run out of registers. We don't care about CET
+  * here, anyway.
+  */
+ # define CALL_NOSPEC                                          \
+-      ALTERNATIVE(                                            \
++      ANNOTATE_NOSPEC_ALTERNATIVE                             \
++      ALTERNATIVE_2(                                          \
+       ANNOTATE_RETPOLINE_SAFE                                 \
+       "call *%[thunk_target]\n",                              \
+       "       jmp    904f;\n"                                 \
+@@ -201,9 +207,14 @@
+       "       ret;\n"                                         \
+       "       .align 16\n"                                    \
+       "904:   call   901b;\n",                                \
+-      X86_FEATURE_RETPOLINE)
++      X86_FEATURE_RETPOLINE,                                  \
++      "lfence;\n"                                             \
++      ANNOTATE_RETPOLINE_SAFE                                 \
++      "call *%[thunk_target]\n",                              \
++      X86_FEATURE_RETPOLINE_LFENCE)
+ 
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
++#endif
+ #else /* No retpoline for C / inline asm */
+ # define CALL_NOSPEC "call *%[thunk_target]\n"
+ # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+@@ -212,11 +223,11 @@
+ /* The Spectre V2 mitigation variants */
+ enum spectre_v2_mitigation {
+       SPECTRE_V2_NONE,
+-      SPECTRE_V2_RETPOLINE_MINIMAL,
+-      SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
+-      SPECTRE_V2_RETPOLINE_GENERIC,
+-      SPECTRE_V2_RETPOLINE_AMD,
+-      SPECTRE_V2_IBRS_ENHANCED,
++      SPECTRE_V2_RETPOLINE,
++      SPECTRE_V2_LFENCE,
++      SPECTRE_V2_EIBRS,
++      SPECTRE_V2_EIBRS_RETPOLINE,
++      SPECTRE_V2_EIBRS_LFENCE,
+ };
+ 
+ /* The indirect branch speculation control variants */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index a884bb7e7b01d..94aa0206b1f98 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -30,6 +30,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/intel-family.h>
+ #include <asm/e820.h>
++#include <linux/bpf.h>
+ 
+ #include "cpu.h"
+ 
+@@ -585,7 +586,7 @@ static enum spectre_v2_user_mitigation 
spectre_v2_user_stibp __ro_after_init =
+ static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
+       SPECTRE_V2_USER_NONE;
+ 
+-#ifdef RETPOLINE
++#ifdef CONFIG_RETPOLINE
+ static bool spectre_v2_bad_module;
+ 
+ bool retpoline_module_ok(bool has_retpoline)
+@@ -606,6 +607,32 @@ static inline const char *spectre_v2_module_string(void)
+ static inline const char *spectre_v2_module_string(void) { return ""; }
+ #endif
+ 
++#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended 
for this CPU, data leaks possible!\n"
++#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with 
eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
++#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is 
enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre 
v2 BHB attacks!\n"
++
++#ifdef CONFIG_BPF_SYSCALL
++void unpriv_ebpf_notify(int new_state)
++{
++      if (new_state)
++              return;
++
++      /* Unprivileged eBPF is enabled */
++
++      switch (spectre_v2_enabled) {
++      case SPECTRE_V2_EIBRS:
++              pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
++              break;
++      case SPECTRE_V2_EIBRS_LFENCE:
++              if (sched_smt_active())
++                      pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++              break;
++      default:
++              break;
++      }
++}
++#endif
++
+ static inline bool match_option(const char *arg, int arglen, const char *opt)
+ {
+       int len = strlen(opt);
+@@ -620,7 +647,10 @@ enum spectre_v2_mitigation_cmd {
+       SPECTRE_V2_CMD_FORCE,
+       SPECTRE_V2_CMD_RETPOLINE,
+       SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+-      SPECTRE_V2_CMD_RETPOLINE_AMD,
++      SPECTRE_V2_CMD_RETPOLINE_LFENCE,
++      SPECTRE_V2_CMD_EIBRS,
++      SPECTRE_V2_CMD_EIBRS_RETPOLINE,
++      SPECTRE_V2_CMD_EIBRS_LFENCE,
+ };
+ 
+ enum spectre_v2_user_cmd {
+@@ -693,6 +723,13 @@ spectre_v2_parse_user_cmdline(enum 
spectre_v2_mitigation_cmd v2_cmd)
+       return SPECTRE_V2_USER_CMD_AUTO;
+ }
+ 
++static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
++{
++      return (mode == SPECTRE_V2_EIBRS ||
++              mode == SPECTRE_V2_EIBRS_RETPOLINE ||
++              mode == SPECTRE_V2_EIBRS_LFENCE);
++}
++
+ static void __init
+ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ {
+@@ -755,10 +792,12 @@ spectre_v2_user_select_mitigation(enum 
spectre_v2_mitigation_cmd v2_cmd)
+       }
+ 
+       /*
+-       * If enhanced IBRS is enabled or SMT impossible, STIBP is not
++       * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
+        * required.
+        */
+-      if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++      if (!boot_cpu_has(X86_FEATURE_STIBP) ||
++          !smt_possible ||
++          spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+               return;
+ 
+       /*
+@@ -770,12 +809,6 @@ spectre_v2_user_select_mitigation(enum 
spectre_v2_mitigation_cmd v2_cmd)
+           boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+               mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+ 
+-      /*
+-       * If STIBP is not available, clear the STIBP mode.
+-       */
+-      if (!boot_cpu_has(X86_FEATURE_STIBP))
+-              mode = SPECTRE_V2_USER_NONE;
+-
+       spectre_v2_user_stibp = mode;
+ 
+ set_mode:
+@@ -784,11 +817,11 @@ set_mode:
+ 
+ static const char * const spectre_v2_strings[] = {
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+-      [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic 
ASM retpoline",
+-      [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM 
retpoline",
+-      [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic 
retpoline",
+-      [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD 
retpoline",
+-      [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
++      [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
++      [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
++      [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
++      [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + 
LFENCE",
++      [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + 
Retpolines",
+ };
+ 
+ static const struct {
+@@ -799,8 +832,12 @@ static const struct {
+       { "off",                SPECTRE_V2_CMD_NONE,              false },
+       { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
+       { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
+-      { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
++      { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
++      { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
+       { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
++      { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
++      { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
++      { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
+       { "auto",               SPECTRE_V2_CMD_AUTO,              false },
+ };
+ 
+@@ -810,11 +847,6 @@ static void __init spec_v2_print_cond(const char *reason, 
bool secure)
+               pr_info("%s selected on command line.\n", reason);
+ }
+ 
+-static inline bool retp_compiler(void)
+-{
+-      return __is_defined(RETPOLINE);
+-}
+-
+ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
+@@ -842,16 +874,30 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
+       }
+ 
+       if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
+-           cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
+-           cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
++           cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
++           cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
+           !IS_ENABLED(CONFIG_RETPOLINE)) {
+-              pr_err("%s selected but not compiled in. Switching to AUTO 
select\n", mitigation_options[i].option);
++              pr_err("%s selected but not compiled in. Switching to AUTO 
select\n",
++                     mitigation_options[i].option);
++              return SPECTRE_V2_CMD_AUTO;
++      }
++
++      if ((cmd == SPECTRE_V2_CMD_EIBRS ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
++          !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
++              pr_err("%s selected but CPU doesn't have eIBRS. Switching to 
AUTO select\n",
++                     mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+ 
+-      if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
+-          boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-              pr_err("retpoline,amd selected but CPU is not AMD. Switching to 
AUTO select\n");
++      if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
++           cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
++          !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
++              pr_err("%s selected, but CPU doesn't have a serializing LFENCE. 
Switching to AUTO select\n",
++                     mitigation_options[i].option);
+               return SPECTRE_V2_CMD_AUTO;
+       }
+ 
+@@ -860,6 +906,16 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
+       return cmd;
+ }
+ 
++static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
++{
++      if (!IS_ENABLED(CONFIG_RETPOLINE)) {
++              pr_err("Kernel not compiled with retpoline; no mitigation 
available!");
++              return SPECTRE_V2_NONE;
++      }
++
++      return SPECTRE_V2_RETPOLINE;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -880,50 +936,64 @@ static void __init spectre_v2_select_mitigation(void)
+       case SPECTRE_V2_CMD_FORCE:
+       case SPECTRE_V2_CMD_AUTO:
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
+-                      mode = SPECTRE_V2_IBRS_ENHANCED;
+-                      /* Force it so VMEXIT will restore correctly */
+-                      x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+-                      wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+-                      goto specv2_set_mode;
++                      mode = SPECTRE_V2_EIBRS;
++                      break;
+               }
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_auto;
++
++              mode = spectre_v2_select_retpoline();
+               break;
+-      case SPECTRE_V2_CMD_RETPOLINE_AMD:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_amd;
++
++      case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
++              pr_err(SPECTRE_V2_LFENCE_MSG);
++              mode = SPECTRE_V2_LFENCE;
+               break;
++
+       case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_generic;
++              mode = SPECTRE_V2_RETPOLINE;
+               break;
++
+       case SPECTRE_V2_CMD_RETPOLINE:
+-              if (IS_ENABLED(CONFIG_RETPOLINE))
+-                      goto retpoline_auto;
++              mode = spectre_v2_select_retpoline();
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS:
++              mode = SPECTRE_V2_EIBRS;
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS_LFENCE:
++              mode = SPECTRE_V2_EIBRS_LFENCE;
++              break;
++
++      case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
++              mode = SPECTRE_V2_EIBRS_RETPOLINE;
+               break;
+       }
+-      pr_err("Spectre mitigation: kernel not compiled with retpoline; no 
mitigation available!");
+-      return;
+ 
+-retpoline_auto:
+-      if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+-      retpoline_amd:
+-              if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+-                      pr_err("Spectre mitigation: LFENCE not serializing, 
switching to generic retpoline\n");
+-                      goto retpoline_generic;
+-              }
+-              mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+-                                       SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+-              setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
+-              setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+-      } else {
+-      retpoline_generic:
+-              mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
+-                                       SPECTRE_V2_RETPOLINE_MINIMAL;
++      if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
++              pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
++
++      if (spectre_v2_in_eibrs_mode(mode)) {
++              /* Force it so VMEXIT will restore correctly */
++              x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
++              wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++      }
++
++      switch (mode) {
++      case SPECTRE_V2_NONE:
++      case SPECTRE_V2_EIBRS:
++              break;
++
++      case SPECTRE_V2_LFENCE:
++      case SPECTRE_V2_EIBRS_LFENCE:
++              setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
++              /* fallthrough */
++
++      case SPECTRE_V2_RETPOLINE:
++      case SPECTRE_V2_EIBRS_RETPOLINE:
+               setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
++              break;
+       }
+ 
+-specv2_set_mode:
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
+ 
+@@ -949,7 +1019,7 @@ specv2_set_mode:
+        * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+        * enable IBRS around firmware calls.
+        */
+-      if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) 
{
++      if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+               pr_info("Enabling Restricted Speculation for firmware calls\n");
+       }
+@@ -1019,6 +1089,10 @@ void arch_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+ 
++      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++              pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++
+       switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+@@ -1263,7 +1337,6 @@ static int ib_prctl_set(struct task_struct *task, 
unsigned long ctrl)
+               if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+                   spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+                       return 0;
+-
+               /*
+                * With strict mode for both IBPB and STIBP, the instruction
+                * code paths avoid checking this task flag and instead,
+@@ -1610,7 +1683,7 @@ static ssize_t tsx_async_abort_show_state(char *buf)
+ 
+ static char *stibp_state(void)
+ {
+-      if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
++      if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+               return "";
+ 
+       switch (spectre_v2_user_stibp) {
+@@ -1640,6 +1713,27 @@ static char *ibpb_state(void)
+       return "";
+ }
+ 
++static ssize_t spectre_v2_show_state(char *buf)
++{
++      if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
++              return sprintf(buf, "Vulnerable: LFENCE\n");
++
++      if (spectre_v2_enabled == SPECTRE_V2_EIBRS && 
unprivileged_ebpf_enabled())
++              return sprintf(buf, "Vulnerable: eIBRS with unprivileged 
eBPF\n");
++
++      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++              return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged 
eBPF and SMT\n");
++
++      return sprintf(buf, "%s%s%s%s%s%s\n",
++                     spectre_v2_strings[spectre_v2_enabled],
++                     ibpb_state(),
++                     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++                     stibp_state(),
++                     boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : 
"",
++                     spectre_v2_module_string());
++}
++
+ static ssize_t srbds_show_state(char *buf)
+ {
+       return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+@@ -1662,12 +1756,7 @@ static ssize_t cpu_show_common(struct device *dev, 
struct device_attribute *attr
+               return sprintf(buf, "%s\n", 
spectre_v1_strings[spectre_v1_mitigation]);
+ 
+       case X86_BUG_SPECTRE_V2:
+-              return sprintf(buf, "%s%s%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
+-                             ibpb_state(),
+-                             boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", 
IBRS_FW" : "",
+-                             stibp_state(),
+-                             boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB 
filling" : "",
+-                             spectre_v2_module_string());
++              return spectre_v2_show_state(buf);
+ 
+       case X86_BUG_SPEC_STORE_BYPASS:
+               return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index d420597b0d2b4..17ea0ba50278d 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1266,17 +1266,16 @@ static void blkif_free_ring(struct blkfront_ring_info 
*rinfo)
+               list_for_each_entry_safe(persistent_gnt, n,
+                                        &rinfo->grants, node) {
+                       list_del(&persistent_gnt->node);
+-                      if (persistent_gnt->gref != GRANT_INVALID_REF) {
+-                              gnttab_end_foreign_access(persistent_gnt->gref,
+-                                                        0, 0UL);
+-                              rinfo->persistent_gnts_c--;
+-                      }
++                      if (persistent_gnt->gref == GRANT_INVALID_REF ||
++                          
!gnttab_try_end_foreign_access(persistent_gnt->gref))
++                              continue;
++
++                      rinfo->persistent_gnts_c--;
+                       if (info->feature_persistent)
+                               __free_page(persistent_gnt->page);
+                       kfree(persistent_gnt);
+               }
+       }
+-      BUG_ON(rinfo->persistent_gnts_c != 0);
+ 
+       for (i = 0; i < BLK_RING_SIZE(info); i++) {
+               /*
+@@ -1333,7 +1332,8 @@ free_shadow:
+                       rinfo->ring_ref[i] = GRANT_INVALID_REF;
+               }
+       }
+-      free_pages((unsigned long)rinfo->ring.sring, 
get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
++      free_pages_exact(rinfo->ring.sring,
++                       info->nr_ring_pages * XEN_PAGE_SIZE);
+       rinfo->ring.sring = NULL;
+ 
+       if (rinfo->irq)
+@@ -1417,9 +1417,15 @@ static int blkif_get_final_status(enum blk_req_status 
s1,
+       return BLKIF_RSP_OKAY;
+ }
+ 
+-static bool blkif_completion(unsigned long *id,
+-                           struct blkfront_ring_info *rinfo,
+-                           struct blkif_response *bret)
++/*
++ * Return values:
++ *  1 response processed.
++ *  0 missing further responses.
++ * -1 error while processing.
++ */
++static int blkif_completion(unsigned long *id,
++                          struct blkfront_ring_info *rinfo,
++                          struct blkif_response *bret)
+ {
+       int i = 0;
+       struct scatterlist *sg;
+@@ -1493,42 +1499,43 @@ static bool blkif_completion(unsigned long *id,
+       }
+       /* Add the persistent grant into the list of free grants */
+       for (i = 0; i < num_grant; i++) {
+-              if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
++              if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
+                       /*
+                        * If the grant is still mapped by the backend (the
+                        * backend has chosen to make this grant persistent)
+                        * we add it at the head of the list, so it will be
+                        * reused first.
+                        */
+-                      if (!info->feature_persistent)
+-                              pr_alert_ratelimited("backed has not unmapped 
grant: %u\n",
+-                                                   s->grants_used[i]->gref);
++                      if (!info->feature_persistent) {
++                              pr_alert("backed has not unmapped grant: %u\n",
++                                       s->grants_used[i]->gref);
++                              return -1;
++                      }
+                       list_add(&s->grants_used[i]->node, &rinfo->grants);
+                       rinfo->persistent_gnts_c++;
+               } else {
+                       /*
+-                       * If the grant is not mapped by the backend we end the
+-                       * foreign access and add it to the tail of the list,
+-                       * so it will not be picked again unless we run out of
+-                       * persistent grants.
++                       * If the grant is not mapped by the backend we add it
++                       * to the tail of the list, so it will not be picked
++                       * again unless we run out of persistent grants.
+                        */
+-                      gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 
0UL);
+                       s->grants_used[i]->gref = GRANT_INVALID_REF;
+                       list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
+               }
+       }
+       if (s->req.operation == BLKIF_OP_INDIRECT) {
+               for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
+-                      if 
(gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
+-                              if (!info->feature_persistent)
+-                                      pr_alert_ratelimited("backed has not 
unmapped grant: %u\n",
+-                                                           
s->indirect_grants[i]->gref);
++                      if 
(!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
++                              if (!info->feature_persistent) {
++                                      pr_alert("backed has not unmapped 
grant: %u\n",
++                                               s->indirect_grants[i]->gref);
++                                      return -1;
++                              }
+                               list_add(&s->indirect_grants[i]->node, 
&rinfo->grants);
+                               rinfo->persistent_gnts_c++;
+                       } else {
+                               struct page *indirect_page;
+ 
+-                              
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
+                               /*
+                                * Add the used indirect page back to the list 
of
+                                * available pages for indirect grefs.
+@@ -1610,12 +1617,17 @@ static irqreturn_t blkif_interrupt(int irq, void 
*dev_id)
+               }
+ 
+               if (bret.operation != BLKIF_OP_DISCARD) {
++                      int ret;
++
+                       /*
+                        * We may need to wait for an extra response if the
+                        * I/O request is split in 2
+                        */
+-                      if (!blkif_completion(&id, rinfo, &bret))
++                      ret = blkif_completion(&id, rinfo, &bret);
++                      if (!ret)
+                               continue;
++                      if (unlikely(ret < 0))
++                              goto err;
+               }
+ 
+               if (add_id_to_freelist(rinfo, id)) {
+@@ -1717,8 +1729,7 @@ static int setup_blkring(struct xenbus_device *dev,
+       for (i = 0; i < info->nr_ring_pages; i++)
+               rinfo->ring_ref[i] = GRANT_INVALID_REF;
+ 
+-      sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
+-                                                     get_order(ring_size));
++      sring = alloc_pages_exact(ring_size, GFP_NOIO);
+       if (!sring) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+               return -ENOMEM;
+@@ -1728,7 +1739,7 @@ static int setup_blkring(struct xenbus_device *dev,
+ 
+       err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, 
gref);
+       if (err < 0) {
+-              free_pages((unsigned long)sring, get_order(ring_size));
++              free_pages_exact(sring, ring_size);
+               rinfo->ring.sring = NULL;
+               goto fail;
+       }
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
+index 79a48c37fb35b..2a6d9572d6397 100644
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -64,6 +64,21 @@ struct psci_operations psci_ops = {
+       .smccc_version = SMCCC_VERSION_1_0,
+ };
+ 
++enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
++{
++      if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
++              return SMCCC_CONDUIT_NONE;
++
++      switch (psci_ops.conduit) {
++      case PSCI_CONDUIT_SMC:
++              return SMCCC_CONDUIT_SMC;
++      case PSCI_CONDUIT_HVC:
++              return SMCCC_CONDUIT_HVC;
++      default:
++              return SMCCC_CONDUIT_NONE;
++      }
++}
++
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+ static psci_fn *invoke_psci_fn;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 65a50bc5661d2..82dcd44b3e5e2 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -413,14 +413,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue 
*queue)
+                       queue->tx_link[id] = TX_LINK_NONE;
+                       skb = queue->tx_skbs[id];
+                       queue->tx_skbs[id] = NULL;
+-                      if (unlikely(gnttab_query_foreign_access(
+-                              queue->grant_tx_ref[id]) != 0)) {
++                      if (unlikely(!gnttab_end_foreign_access_ref(
++                              queue->grant_tx_ref[id], GNTMAP_readonly))) {
+                               dev_alert(dev,
+                                         "Grant still in use by backend 
domain\n");
+                               goto err;
+                       }
+-                      gnttab_end_foreign_access_ref(
+-                              queue->grant_tx_ref[id], GNTMAP_readonly);
+                       gnttab_release_grant_reference(
+                               &queue->gref_tx_head, queue->grant_tx_ref[id]);
+                       queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+@@ -840,7 +838,6 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+       int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
+       int slots = 1;
+       int err = 0;
+-      unsigned long ret;
+ 
+       if (rx->flags & XEN_NETRXF_extra_info) {
+               err = xennet_get_extras(queue, extras, rp);
+@@ -871,8 +868,13 @@ static int xennet_get_responses(struct netfront_queue 
*queue,
+                       goto next;
+               }
+ 
+-              ret = gnttab_end_foreign_access_ref(ref, 0);
+-              BUG_ON(!ret);
++              if (!gnttab_end_foreign_access_ref(ref, 0)) {
++                      dev_alert(dev,
++                                "Grant still in use by backend domain\n");
++                      queue->info->broken = true;
++                      dev_alert(dev, "Disabled for further use\n");
++                      return -EINVAL;
++              }
+ 
+               gnttab_release_grant_reference(&queue->gref_rx_head, ref);
+ 
+@@ -1076,6 +1078,10 @@ static int xennet_poll(struct napi_struct *napi, int 
budget)
+               err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
+ 
+               if (unlikely(err)) {
++                      if (queue->info->broken) {
++                              spin_unlock(&queue->rx_lock);
++                              return 0;
++                      }
+ err:
+                       while ((skb = __skb_dequeue(&tmpq)))
+                               __skb_queue_tail(&errq, skb);
+@@ -1673,7 +1679,7 @@ static int setup_netfront(struct xenbus_device *dev,
+                       struct netfront_queue *queue, unsigned int 
feature_split_evtchn)
+ {
+       struct xen_netif_tx_sring *txs;
+-      struct xen_netif_rx_sring *rxs;
++      struct xen_netif_rx_sring *rxs = NULL;
+       grant_ref_t gref;
+       int err;
+ 
+@@ -1693,21 +1699,21 @@ static int setup_netfront(struct xenbus_device *dev,
+ 
+       err = xenbus_grant_ring(dev, txs, 1, &gref);
+       if (err < 0)
+-              goto grant_tx_ring_fail;
++              goto fail;
+       queue->tx_ring_ref = gref;
+ 
+       rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | 
__GFP_HIGH);
+       if (!rxs) {
+               err = -ENOMEM;
+               xenbus_dev_fatal(dev, err, "allocating rx ring page");
+-              goto alloc_rx_ring_fail;
++              goto fail;
+       }
+       SHARED_RING_INIT(rxs);
+       FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
+ 
+       err = xenbus_grant_ring(dev, rxs, 1, &gref);
+       if (err < 0)
+-              goto grant_rx_ring_fail;
++              goto fail;
+       queue->rx_ring_ref = gref;
+ 
+       if (feature_split_evtchn)
+@@ -1720,22 +1726,28 @@ static int setup_netfront(struct xenbus_device *dev,
+               err = setup_netfront_single(queue);
+ 
+       if (err)
+-              goto alloc_evtchn_fail;
++              goto fail;
+ 
+       return 0;
+ 
+       /* If we fail to setup netfront, it is safe to just revoke access to
+        * granted pages because backend is not accessing it at this point.
+        */
+-alloc_evtchn_fail:
+-      gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
+-grant_rx_ring_fail:
+-      free_page((unsigned long)rxs);
+-alloc_rx_ring_fail:
+-      gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
+-grant_tx_ring_fail:
+-      free_page((unsigned long)txs);
+-fail:
++ fail:
++      if (queue->rx_ring_ref != GRANT_INVALID_REF) {
++              gnttab_end_foreign_access(queue->rx_ring_ref, 0,
++                                        (unsigned long)rxs);
++              queue->rx_ring_ref = GRANT_INVALID_REF;
++      } else {
++              free_page((unsigned long)rxs);
++      }
++      if (queue->tx_ring_ref != GRANT_INVALID_REF) {
++              gnttab_end_foreign_access(queue->tx_ring_ref, 0,
++                                        (unsigned long)txs);
++              queue->tx_ring_ref = GRANT_INVALID_REF;
++      } else {
++              free_page((unsigned long)txs);
++      }
+       return err;
+ }
+ 
+diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
+index e1b32ed0aa205..bdfe94c023dcd 100644
+--- a/drivers/scsi/xen-scsifront.c
++++ b/drivers/scsi/xen-scsifront.c
+@@ -210,12 +210,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info 
*info, uint32_t id)
+               return;
+ 
+       for (i = 0; i < s->nr_grants; i++) {
+-              if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
++              if (unlikely(!gnttab_try_end_foreign_access(s->gref[i]))) {
+                       shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
+                                    "grant still in use by backend\n");
+                       BUG();
+               }
+-              gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+       }
+ 
+       kfree(s->sg);
+diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
+index 7a47c4c9fb1bb..24f8900eccadd 100644
+--- a/drivers/xen/gntalloc.c
++++ b/drivers/xen/gntalloc.c
+@@ -166,20 +166,14 @@ undo:
+               __del_gref(gref);
+       }
+ 
+-      /* It's possible for the target domain to map the just-allocated grant
+-       * references by blindly guessing their IDs; if this is done, then
+-       * __del_gref will leave them in the queue_gref list. They need to be
+-       * added to the global list so that we can free them when they are no
+-       * longer referenced.
+-       */
+-      if (unlikely(!list_empty(&queue_gref)))
+-              list_splice_tail(&queue_gref, &gref_list);
+       mutex_unlock(&gref_mutex);
+       return rc;
+ }
+ 
+ static void __del_gref(struct gntalloc_gref *gref)
+ {
++      unsigned long addr;
++
+       if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+               uint8_t *tmp = kmap(gref->page);
+               tmp[gref->notify.pgoff] = 0;
+@@ -193,21 +187,16 @@ static void __del_gref(struct gntalloc_gref *gref)
+       gref->notify.flags = 0;
+ 
+       if (gref->gref_id) {
+-              if (gnttab_query_foreign_access(gref->gref_id))
+-                      return;
+-
+-              if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+-                      return;
+-
+-              gnttab_free_grant_reference(gref->gref_id);
++              if (gref->page) {
++                      addr = (unsigned long)page_to_virt(gref->page);
++                      gnttab_end_foreign_access(gref->gref_id, 0, addr);
++              } else
++                      gnttab_free_grant_reference(gref->gref_id);
+       }
+ 
+       gref_size--;
+       list_del(&gref->next_gref);
+ 
+-      if (gref->page)
+-              __free_page(gref->page);
+-
+       kfree(gref);
+ }
+ 
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index 775d4195966c4..02754b4923e96 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -114,12 +114,9 @@ struct gnttab_ops {
+        */
+       unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+       /*
+-       * Query the status of a grant entry. Ref parameter is reference of
+-       * queried grant entry, return value is the status of queried entry.
+-       * Detailed status(writing/reading) can be gotten from the return value
+-       * by bit operations.
++       * Read the frame number related to a given grant reference.
+        */
+-      int (*query_foreign_access)(grant_ref_t ref);
++      unsigned long (*read_frame)(grant_ref_t ref);
+ };
+ 
+ struct unmap_refs_callback_data {
+@@ -254,17 +251,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned 
long frame,
+ }
+ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
+ 
+-static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+-{
+-      return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+-}
+-
+-int gnttab_query_foreign_access(grant_ref_t ref)
+-{
+-      return gnttab_interface->query_foreign_access(ref);
+-}
+-EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
+-
+ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
+ {
+       u16 flags, nflags;
+@@ -295,6 +281,11 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int 
readonly)
+ }
+ EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+ 
++static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
++{
++      return gnttab_shared.v1[ref].frame;
++}
++
+ struct deferred_entry {
+       struct list_head list;
+       grant_ref_t ref;
+@@ -324,12 +315,9 @@ static void gnttab_handle_deferred(unsigned long unused)
+               spin_unlock_irqrestore(&gnttab_list_lock, flags);
+               if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+                       put_free_entry(entry->ref);
+-                      if (entry->page) {
+-                              pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+-                                       entry->ref, page_to_pfn(entry->page));
+-                              put_page(entry->page);
+-                      } else
+-                              pr_info("freeing g.e. %#x\n", entry->ref);
++                      pr_debug("freeing g.e. %#x (pfn %#lx)\n",
++                               entry->ref, page_to_pfn(entry->page));
++                      put_page(entry->page);
+                       kfree(entry);
+                       entry = NULL;
+               } else {
+@@ -354,9 +342,18 @@ static void gnttab_handle_deferred(unsigned long unused)
+ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+                               struct page *page)
+ {
+-      struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++      struct deferred_entry *entry;
++      gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+       const char *what = KERN_WARNING "leaking";
+ 
++      entry = kmalloc(sizeof(*entry), gfp);
++      if (!page) {
++              unsigned long gfn = gnttab_interface->read_frame(ref);
++
++              page = pfn_to_page(gfn_to_pfn(gfn));
++              get_page(page);
++      }
++
+       if (entry) {
+               unsigned long flags;
+ 
+@@ -377,11 +374,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool 
readonly,
+              what, ref, page ? page_to_pfn(page) : -1);
+ }
+ 
++int gnttab_try_end_foreign_access(grant_ref_t ref)
++{
++      int ret = _gnttab_end_foreign_access_ref(ref, 0);
++
++      if (ret)
++              put_free_entry(ref);
++
++      return ret;
++}
++EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
++
+ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+                              unsigned long page)
+ {
+-      if (gnttab_end_foreign_access_ref(ref, readonly)) {
+-              put_free_entry(ref);
++      if (gnttab_try_end_foreign_access(ref)) {
+               if (page != 0)
+                       put_page(virt_to_page(page));
+       } else
+@@ -1018,7 +1025,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
+       .update_entry                   = gnttab_update_entry_v1,
+       .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
+       .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v1,
+-      .query_foreign_access           = gnttab_query_foreign_access_v1,
++      .read_frame                     = gnttab_read_frame_v1,
+ };
+ 
+ static void gnttab_request_version(void)
+diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
+index 8bbd887ca422b..5ee38e939165c 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -387,7 +387,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void 
*vaddr,
+                     unsigned int nr_pages, grant_ref_t *grefs)
+ {
+       int err;
+-      int i, j;
++      unsigned int i;
++      grant_ref_t gref_head;
++
++      err = gnttab_alloc_grant_references(nr_pages, &gref_head);
++      if (err) {
++              xenbus_dev_fatal(dev, err, "granting access to ring page");
++              return err;
++      }
+ 
+       for (i = 0; i < nr_pages; i++) {
+               unsigned long gfn;
+@@ -397,23 +404,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void 
*vaddr,
+               else
+                       gfn = virt_to_gfn(vaddr);
+ 
+-              err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
+-              if (err < 0) {
+-                      xenbus_dev_fatal(dev, err,
+-                                       "granting access to ring page");
+-                      goto fail;
+-              }
+-              grefs[i] = err;
++              grefs[i] = gnttab_claim_grant_reference(&gref_head);
++              gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
++                                              gfn, 0);
+ 
+               vaddr = vaddr + XEN_PAGE_SIZE;
+       }
+ 
+       return 0;
+-
+-fail:
+-      for (j = 0; j < i; j++)
+-              gnttab_end_foreign_access_ref(grefs[j], 0);
+-      return err;
+ }
+ EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+ 
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index 18863d56273cc..6366b04c7d5f4 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -89,6 +89,22 @@
+ 
+ #include <linux/linkage.h>
+ #include <linux/types.h>
++
++enum arm_smccc_conduit {
++      SMCCC_CONDUIT_NONE,
++      SMCCC_CONDUIT_SMC,
++      SMCCC_CONDUIT_HVC,
++};
++
++/**
++ * arm_smccc_1_1_get_conduit()
++ *
++ * Returns the conduit to be used for SMCCCv1.1 or later.
++ *
++ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
++ */
++enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
++
+ /**
+  * struct arm_smccc_res - Result from SMC/HVC call
+  * @a0-a3 result values from registers 0 to 3
+@@ -311,5 +327,63 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, 
unsigned long a1,
+ #define SMCCC_RET_NOT_SUPPORTED                       -1
+ #define SMCCC_RET_NOT_REQUIRED                        -2
+ 
++/*
++ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
++ * Used when the SMCCC conduit is not defined. The empty asm statement
++ * avoids compiler warnings about unused variables.
++ */
++#define __fail_smccc_1_1(...)                                         \
++      do {                                                            \
++              __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
++              asm ("" __constraints(__count_args(__VA_ARGS__)));      \
++              if (___res)                                             \
++                      ___res->a0 = SMCCC_RET_NOT_SUPPORTED;           \
++      } while (0)
++
++/*
++ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro will make either an HVC call or an SMC call depending on the
++ * current SMCCC conduit. If no valid conduit is available then -1
++ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
++ *
++ * The return value also provides the conduit that was used.
++ */
++#define arm_smccc_1_1_invoke(...) ({                                  \
++              int method = arm_smccc_1_1_get_conduit();               \
++              switch (method) {                                       \
++              case SMCCC_CONDUIT_HVC:                                 \
++                      arm_smccc_1_1_hvc(__VA_ARGS__);                 \
++                      break;                                          \
++              case SMCCC_CONDUIT_SMC:                                 \
++                      arm_smccc_1_1_smc(__VA_ARGS__);                 \
++                      break;                                          \
++              default:                                                \
++                      __fail_smccc_1_1(__VA_ARGS__);                  \
++                      method = SMCCC_CONDUIT_NONE;                    \
++                      break;                                          \
++              }                                                       \
++              method;                                                 \
++      })
++
++/* Paravirtualised time calls (defined by ARM DEN0057A) */
++#define ARM_SMCCC_HV_PV_TIME_FEATURES                         \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                 \
++                         ARM_SMCCC_SMC_64,                    \
++                         ARM_SMCCC_OWNER_STANDARD_HYP,        \
++                         0x20)
++
++#define ARM_SMCCC_HV_PV_TIME_ST                                       \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                 \
++                         ARM_SMCCC_SMC_64,                    \
++                         ARM_SMCCC_OWNER_STANDARD_HYP,        \
++                         0x21)
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 7995940d41877..fe520d40597ff 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -295,6 +295,11 @@ static inline void bpf_long_memcpy(void *dst, const void 
*src, u32 size)
+ 
+ /* verify correctness of eBPF program */
+ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
++
++static inline bool unprivileged_ebpf_enabled(void)
++{
++      return !sysctl_unprivileged_bpf_disabled;
++}
+ #else
+ static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+ {
+@@ -322,6 +327,12 @@ static inline struct bpf_prog *bpf_prog_inc(struct 
bpf_prog *prog)
+ {
+       return ERR_PTR(-EOPNOTSUPP);
+ }
++
++static inline bool unprivileged_ebpf_enabled(void)
++{
++      return false;
++}
++
+ #endif /* CONFIG_BPF_SYSCALL */
+ 
+ /* verifier prototypes for helper functions called from eBPF programs */
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index d830eddacdc60..1c1ca41685162 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -107,7 +107,7 @@
+ #define __weak                __attribute__((weak))
+ #define __alias(symbol)       __attribute__((alias(#symbol)))
+ 
+-#ifdef RETPOLINE
++#ifdef CONFIG_RETPOLINE
+ #define __noretpoline __attribute__((indirect_branch("keep")))
+ #endif
+ 
+diff --git a/include/linux/module.h b/include/linux/module.h
+index 99f330ae13da5..be4a3a9fd89ca 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -791,7 +791,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
+ static inline void module_bug_cleanup(struct module *mod) {}
+ #endif        /* CONFIG_GENERIC_BUG */
+ 
+-#ifdef RETPOLINE
++#ifdef CONFIG_RETPOLINE
+ extern bool retpoline_module_ok(bool has_retpoline);
+ #else
+ static inline bool retpoline_module_ok(bool has_retpoline)
+diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
+index f9d8aac170fbc..c51ae64b6dcb8 100644
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -97,17 +97,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int 
readonly);
+  * access has been ended, free the given page too.  Access will be ended
+  * immediately iff the grant entry is not in use, otherwise it will happen
+  * some time later.  page may be 0, in which case no freeing will occur.
++ * Note that the granted page might still be accessed (read or write) by the
++ * other side after gnttab_end_foreign_access() returns, so even if page was
++ * specified as 0 it is not allowed to just reuse the page for other
++ * purposes immediately. gnttab_end_foreign_access() will take an additional
++ * reference to the granted page in this case, which is dropped only after
++ * the grant is no longer in use.
++ * This requires that multi page allocations for areas subject to
++ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
++ * via free_pages_exact()) in order to avoid high order pages.
+  */
+ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+                              unsigned long page);
+ 
++/*
++ * End access through the given grant reference, iff the grant entry is
++ * no longer in use.  In case of success ending foreign access, the
++ * grant reference is deallocated.
++ * Return 1 if the grant entry was freed, 0 if it is still in use.
++ */
++int gnttab_try_end_foreign_access(grant_ref_t ref);
++
+ int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
+ 
+ unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
+ unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+ 
+-int gnttab_query_foreign_access(grant_ref_t ref);
+-
+ /*
+  * operations on reserved batches of grant references
+  */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 78b445562b81e..184d462339e65 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -222,6 +222,11 @@ static int sysrq_sysctl_handler(struct ctl_table *table, 
int write,
+ #endif
+ 
+ #ifdef CONFIG_BPF_SYSCALL
++
++void __weak unpriv_ebpf_notify(int new_state)
++{
++}
++
+ static int bpf_unpriv_handler(struct ctl_table *table, int write,
+                              void *buffer, size_t *lenp, loff_t *ppos)
+ {
+@@ -239,6 +244,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int 
write,
+                       return -EPERM;
+               *(int *)table->data = unpriv_enable;
+       }
++
++      unpriv_ebpf_notify(unpriv_enable);
++
+       return ret;
+ }
+ #endif
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 9abcdf2e8dfe8..62b0552b7b718 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -2147,7 +2147,7 @@ static void add_intree_flag(struct buffer *b, int 
is_intree)
+ /* Cannot check for assembler */
+ static void add_retpoline(struct buffer *b)
+ {
+-      buf_printf(b, "\n#ifdef RETPOLINE\n");
++      buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
+       buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
+       buf_printf(b, "#endif\n");
+ }
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h 
b/tools/arch/x86/include/asm/cpufeatures.h
+index f6d1bc93589c7..f032dfed00a93 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -194,7 +194,7 @@
+ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+ 
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation 
for Spectre variant 2 */
+-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation 
for Spectre variant 2 */
++#define X86_FEATURE_RETPOLINE_LFENCE  ( 7*32+13) /* "" Use LFENCEs for 
Spectre variant 2 */
+ 
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is 
implemented */
+ #define X86_FEATURE_SSBD      ( 7*32+17) /* Speculative Store Bypass Disable 
*/

Reply via email to