commit:     e85cd562f9a414d55c8a7f0d2827e7398791f057
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jan 23 21:17:59 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jan 23 21:17:59 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e85cd562

Linux patch 4.14.15

 0000_README              |    4 +
 1014_linux-4.14.15.patch | 4300 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4304 insertions(+)

diff --git a/0000_README b/0000_README
index 045c7bd..c7ad7f2 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-4.14.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.14
 
+Patch:  1014_linux-4.14.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-4.14.15.patch b/1014_linux-4.14.15.patch
new file mode 100644
index 0000000..a81c84e
--- /dev/null
+++ b/1014_linux-4.14.15.patch
@@ -0,0 +1,4300 @@
+diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
+index d11eff61fc9a..5cd58439ad2d 100644
+--- a/Documentation/x86/pti.txt
++++ b/Documentation/x86/pti.txt
+@@ -78,7 +78,7 @@ this protection comes at a cost:
+      non-PTI SYSCALL entry code, so requires mapping fewer
+      things into the userspace page tables.  The downside is
+      that stacks must be switched at entry time.
+-  d. Global pages are disabled for all kernel structures not
++  c. Global pages are disabled for all kernel structures not
+      mapped into both kernel and userspace page tables.  This
+      feature of the MMU allows different processes to share TLB
+      entries mapping the kernel.  Losing the feature means more
+diff --git a/Makefile b/Makefile
+index 4951305eb867..bf1a277a67a4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
+index 37bd6d9b8eb9..a6bdc1da47ad 100644
+--- a/arch/alpha/kernel/sys_sio.c
++++ b/arch/alpha/kernel/sys_sio.c
+@@ -102,6 +102,15 @@ sio_pci_route(void)
+                                  alpha_mv.sys.sio.route_tab);
+ }
+ 
++static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
++{
++      if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
++          (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
++              return false;
++
++      return true;
++}
++
+ static unsigned int __init
+ sio_collect_irq_levels(void)
+ {
+@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
+ 
+       /* Iterate through the devices, collecting IRQ levels.  */
+       for_each_pci_dev(dev) {
+-              if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
+-                  (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
++              if (!sio_pci_dev_irq_needs_level(dev))
+                       continue;
+ 
+               if (dev->irq)
+@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
+       return level_bits;
+ }
+ 
+-static void __init
+-sio_fixup_irq_levels(unsigned int level_bits)
++static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
+ {
+       unsigned int old_level_bits;
+ 
+@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
+        */
+       old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
+ 
+-      level_bits |= (old_level_bits & 0x71ff);
++      if (reset)
++              old_level_bits &= 0x71ff;
++
++      level_bits |= old_level_bits;
+ 
+       outb((level_bits >> 0) & 0xff, 0x4d0);
+       outb((level_bits >> 8) & 0xff, 0x4d1);
+ }
+ 
++static inline void
++sio_fixup_irq_levels(unsigned int level_bits)
++{
++      __sio_fixup_irq_levels(level_bits, true);
++}
++
+ static inline int
+ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+       const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
+       int irq = COMMON_TABLE_LOOKUP, tmp;
+       tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
+-      return irq >= 0 ? tmp : -1;
++
++      irq = irq >= 0 ? tmp : -1;
++
++      /* Fixup IRQ level if an actual IRQ mapping is detected */
++      if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
++              __sio_fixup_irq_levels(1 << irq, false);
++
++      return irq;
+ }
+ 
+ static inline int
+diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts 
b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+index cf2f5240e176..27cc913ca0f5 100644
+--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
++++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+@@ -53,7 +53,8 @@
+               };
+ 
+               pinctrl: pin-controller@10000 {
+-                      pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
++                      pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
++                                   &pmx_gpio_header_gpo>;
+                       pinctrl-names = "default";
+ 
+                       pmx_uart0: pmx-uart0 {
+@@ -85,11 +86,16 @@
+                        * ground.
+                        */
+                       pmx_gpio_header: pmx-gpio-header {
+-                              marvell,pins = "mpp17", "mpp7", "mpp29", 
"mpp28",
++                              marvell,pins = "mpp17", "mpp29", "mpp28",
+                                              "mpp35", "mpp34", "mpp40";
+                               marvell,function = "gpio";
+                       };
+ 
++                      pmx_gpio_header_gpo: pxm-gpio-header-gpo {
++                              marvell,pins = "mpp7";
++                              marvell,function = "gpo";
++                      };
++
+                       pmx_gpio_init: pmx-init {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+diff --git a/arch/arm/configs/sunxi_defconfig 
b/arch/arm/configs/sunxi_defconfig
+index 5caaf971fb50..df433abfcb02 100644
+--- a/arch/arm/configs/sunxi_defconfig
++++ b/arch/arm/configs/sunxi_defconfig
+@@ -10,6 +10,7 @@ CONFIG_SMP=y
+ CONFIG_NR_CPUS=8
+ CONFIG_AEABI=y
+ CONFIG_HIGHMEM=y
++CONFIG_CMA=y
+ CONFIG_ARM_APPENDED_DTB=y
+ CONFIG_ARM_ATAG_DTB_COMPAT=y
+ CONFIG_CPU_FREQ=y
+@@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
+ # CONFIG_WIRELESS is not set
+ CONFIG_DEVTMPFS=y
+ CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_DMA_CMA=y
+ CONFIG_BLK_DEV_SD=y
+ CONFIG_ATA=y
+ CONFIG_AHCI_SUNXI=y
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c 
b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index c3276436b0ae..c12e7b572a41 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -1656,6 +1656,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
+       .main_clk       = "mmchs3_fck",
+       .prcm           = {
+               .omap2 = {
++                      .module_offs = CORE_MOD,
+                       .prcm_reg_id = 1,
+                       .module_bit = OMAP3430_EN_MMC3_SHIFT,
+                       .idlest_reg_id = 1,
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi 
b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+index f2aa2a81de4d..32690107c1cc 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+@@ -63,8 +63,10 @@
+                       cpm_ethernet: ethernet@0 {
+                               compatible = "marvell,armada-7k-pp22";
+                               reg = <0x0 0x100000>, <0x129000 0xb000>;
+-                              clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, 
<&cpm_clk 1 5>;
+-                              clock-names = "pp_clk", "gop_clk", "mg_clk";
++                              clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
++                                       <&cpm_clk 1 5>, <&cpm_clk 1 18>;
++                              clock-names = "pp_clk", "gop_clk",
++                                            "mg_clk","axi_clk";
+                               marvell,system-controller = <&cpm_syscon0>;
+                               status = "disabled";
+                               dma-coherent;
+@@ -114,7 +116,8 @@
+                               #size-cells = <0>;
+                               compatible = "marvell,orion-mdio";
+                               reg = <0x12a200 0x10>;
+-                              clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
++                              clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
++                                       <&cpm_clk 1 6>, <&cpm_clk 1 18>;
+                               status = "disabled";
+                       };
+ 
+@@ -295,8 +298,8 @@
+                               compatible = "marvell,armada-cp110-sdhci";
+                               reg = <0x780000 0x300>;
+                               interrupts = <ICU_GRP_NSR 27 
IRQ_TYPE_LEVEL_HIGH>;
+-                              clock-names = "core";
+-                              clocks = <&cpm_clk 1 4>;
++                              clock-names = "core","axi";
++                              clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
+                               dma-coherent;
+                               status = "disabled";
+                       };
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi 
b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+index 4fe70323abb3..14e47c5c3816 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+@@ -63,8 +63,10 @@
+                       cps_ethernet: ethernet@0 {
+                               compatible = "marvell,armada-7k-pp22";
+                               reg = <0x0 0x100000>, <0x129000 0xb000>;
+-                              clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, 
<&cps_clk 1 5>;
+-                              clock-names = "pp_clk", "gop_clk", "mg_clk";
++                              clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
++                                       <&cps_clk 1 5>, <&cps_clk 1 18>;
++                              clock-names = "pp_clk", "gop_clk",
++                                            "mg_clk", "axi_clk";
+                               marvell,system-controller = <&cps_syscon0>;
+                               status = "disabled";
+                               dma-coherent;
+@@ -114,7 +116,8 @@
+                               #size-cells = <0>;
+                               compatible = "marvell,orion-mdio";
+                               reg = <0x12a200 0x10>;
+-                              clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
++                              clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
++                                       <&cps_clk 1 6>, <&cps_clk 1 18>;
+                               status = "disabled";
+                       };
+ 
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 7debb74843a0..380261e258ef 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
+ 
+       ret = kvm_psci_call(vcpu);
+       if (ret < 0) {
+-              kvm_inject_undefined(vcpu);
++              vcpu_set_reg(vcpu, 0, ~0UL);
+               return 1;
+       }
+ 
+@@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run 
*run)
+ 
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+-      kvm_inject_undefined(vcpu);
++      vcpu_set_reg(vcpu, 0, ~0UL);
+       return 1;
+ }
+ 
+diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
+index 4674f1efbe7a..e1675c25d5d4 100644
+--- a/arch/mips/ar7/platform.c
++++ b/arch/mips/ar7/platform.c
+@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
+       uart_port.type          = PORT_AR7;
+       uart_port.uartclk       = clk_get_rate(bus_clk) / 2;
+       uart_port.iotype        = UPIO_MEM32;
+-      uart_port.flags         = UPF_FIXED_TYPE;
++      uart_port.flags         = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
+       uart_port.regshift      = 2;
+ 
+       uart_port.line          = 0;
+diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
+index dd5567b1e305..8f5bd04f320a 100644
+--- a/arch/mips/kernel/mips-cm.c
++++ b/arch/mips/kernel/mips-cm.c
+@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int 
core,
+                                 *this_cpu_ptr(&cm_core_lock_flags));
+       } else {
+               WARN_ON(cluster != 0);
+-              WARN_ON(vp != 0);
+               WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ 
+               /*
+diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
+index a703452d67b6..555e22d5e07f 100644
+--- a/arch/powerpc/include/asm/exception-64e.h
++++ b/arch/powerpc/include/asm/exception-64e.h
+@@ -209,5 +209,11 @@ exc_##label##_book3e:
+       ori     r3,r3,vector_offset@l;          \
+       mtspr   SPRN_IVOR##vector_number,r3;
+ 
++#define RFI_TO_KERNEL                                                 \
++      rfi
++
++#define RFI_TO_USER                                                   \
++      rfi
++
+ #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+ 
+diff --git a/arch/powerpc/include/asm/exception-64s.h 
b/arch/powerpc/include/asm/exception-64s.h
+index 9a318973af05..ccf10c2f8899 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -69,6 +69,59 @@
+  */
+ #define EX_R3         EX_DAR
+ 
++/*
++ * Macros for annotating the expected destination of (h)rfid
++ *
++ * The nop instructions allow us to insert one or more instructions to flush 
the
++ * L1-D cache when returning to userspace or a guest.
++ */
++#define RFI_FLUSH_SLOT                                                        
\
++      RFI_FLUSH_FIXUP_SECTION;                                        \
++      nop;                                                            \
++      nop;                                                            \
++      nop
++
++#define RFI_TO_KERNEL                                                 \
++      rfid
++
++#define RFI_TO_USER                                                   \
++      RFI_FLUSH_SLOT;                                                 \
++      rfid;                                                           \
++      b       rfi_flush_fallback
++
++#define RFI_TO_USER_OR_KERNEL                                         \
++      RFI_FLUSH_SLOT;                                                 \
++      rfid;                                                           \
++      b       rfi_flush_fallback
++
++#define RFI_TO_GUEST                                                  \
++      RFI_FLUSH_SLOT;                                                 \
++      rfid;                                                           \
++      b       rfi_flush_fallback
++
++#define HRFI_TO_KERNEL                                                        
\
++      hrfid
++
++#define HRFI_TO_USER                                                  \
++      RFI_FLUSH_SLOT;                                                 \
++      hrfid;                                                          \
++      b       hrfi_flush_fallback
++
++#define HRFI_TO_USER_OR_KERNEL                                                
\
++      RFI_FLUSH_SLOT;                                                 \
++      hrfid;                                                          \
++      b       hrfi_flush_fallback
++
++#define HRFI_TO_GUEST                                                 \
++      RFI_FLUSH_SLOT;                                                 \
++      hrfid;                                                          \
++      b       hrfi_flush_fallback
++
++#define HRFI_TO_UNKNOWN                                                       
\
++      RFI_FLUSH_SLOT;                                                 \
++      hrfid;                                                          \
++      b       hrfi_flush_fallback
++
+ #ifdef CONFIG_RELOCATABLE
+ #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)                  \
+       mfspr   r11,SPRN_##h##SRR0;     /* save SRR0 */                 \
+@@ -213,7 +266,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+       mtspr   SPRN_##h##SRR0,r12;                                     \
+       mfspr   r12,SPRN_##h##SRR1;     /* and SRR1 */                  \
+       mtspr   SPRN_##h##SRR1,r10;                                     \
+-      h##rfid;                                                        \
++      h##RFI_TO_KERNEL;                                               \
+       b       .       /* prevent speculative execution */
+ #define EXCEPTION_PROLOG_PSERIES_1(label, h)                          \
+       __EXCEPTION_PROLOG_PSERIES_1(label, h)
+@@ -227,7 +280,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+       mtspr   SPRN_##h##SRR0,r12;                                     \
+       mfspr   r12,SPRN_##h##SRR1;     /* and SRR1 */                  \
+       mtspr   SPRN_##h##SRR1,r10;                                     \
+-      h##rfid;                                                        \
++      h##RFI_TO_KERNEL;                                               \
+       b       .       /* prevent speculative execution */
+ 
+ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)                     \
+diff --git a/arch/powerpc/include/asm/feature-fixups.h 
b/arch/powerpc/include/asm/feature-fixups.h
+index 8f88f771cc55..1e82eb3caabd 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,7 +187,20 @@ label##3:                                         \
+       FTR_ENTRY_OFFSET label##1b-label##3b;           \
+       .popsection;
+ 
++#define RFI_FLUSH_FIXUP_SECTION                               \
++951:                                                  \
++      .pushsection __rfi_flush_fixup,"a";             \
++      .align 2;                                       \
++952:                                                  \
++      FTR_ENTRY_OFFSET 951b-952b;                     \
++      .popsection;
++
++
+ #ifndef __ASSEMBLY__
++#include <linux/types.h>
++
++extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
++
+ void apply_feature_fixups(void);
+ void setup_feature_keys(void);
+ #endif
+diff --git a/arch/powerpc/include/asm/hvcall.h 
b/arch/powerpc/include/asm/hvcall.h
+index a409177be8bd..f0461618bf7b 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -241,6 +241,7 @@
+ #define H_GET_HCA_INFO          0x1B8
+ #define H_GET_PERF_COUNT        0x1BC
+ #define H_MANAGE_TRACE          0x1C0
++#define H_GET_CPU_CHARACTERISTICS 0x1C8
+ #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+ #define H_QUERY_INT_STATE       0x1E4
+ #define H_POLL_PENDING                0x1D8
+@@ -330,6 +331,17 @@
+ #define H_SIGNAL_SYS_RESET_ALL_OTHERS         -2
+ /* >= 0 values are CPU number */
+ 
++/* H_GET_CPU_CHARACTERISTICS return values */
++#define H_CPU_CHAR_SPEC_BAR_ORI31     (1ull << 63) // IBM bit 0
++#define H_CPU_CHAR_BCCTRL_SERIALISED  (1ull << 62) // IBM bit 1
++#define H_CPU_CHAR_L1D_FLUSH_ORI30    (1ull << 61) // IBM bit 2
++#define H_CPU_CHAR_L1D_FLUSH_TRIG2    (1ull << 60) // IBM bit 3
++#define H_CPU_CHAR_L1D_THREAD_PRIV    (1ull << 59) // IBM bit 4
++
++#define H_CPU_BEHAV_FAVOUR_SECURITY   (1ull << 63) // IBM bit 0
++#define H_CPU_BEHAV_L1D_FLUSH_PR      (1ull << 62) // IBM bit 1
++#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
++
+ /* Flag values used in H_REGISTER_PROC_TBL hcall */
+ #define PROC_TABLE_OP_MASK    0x18
+ #define PROC_TABLE_DEREG      0x10
+@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int 
longbusy_rc)
+       }
+ }
+ 
++struct h_cpu_char_result {
++      u64 character;
++      u64 behaviour;
++};
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_HVCALL_H */
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index 04b60af027ae..b8366df50d19 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -231,6 +231,16 @@ struct paca_struct {
+       struct sibling_subcore_state *sibling_subcore_state;
+ #endif
+ #endif
++#ifdef CONFIG_PPC_BOOK3S_64
++      /*
++       * rfi fallback flush must be in its own cacheline to prevent
++       * other paca data leaking into the L1d
++       */
++      u64 exrfi[EX_SIZE] __aligned(0x80);
++      void *rfi_flush_fallback_area;
++      u64 l1d_flush_congruence;
++      u64 l1d_flush_sets;
++#endif
+ };
+ 
+ extern void copy_mm_to_paca(struct mm_struct *mm);
+diff --git a/arch/powerpc/include/asm/plpar_wrappers.h 
b/arch/powerpc/include/asm/plpar_wrappers.h
+index 7f01b22fa6cb..55eddf50d149 100644
+--- a/arch/powerpc/include/asm/plpar_wrappers.h
++++ b/arch/powerpc/include/asm/plpar_wrappers.h
+@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
+       return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+ }
+ 
++static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
++{
++      unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
++      long rc;
++
++      rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
++      if (rc == H_SUCCESS) {
++              p->character = retbuf[0];
++              p->behaviour = retbuf[1];
++      }
++
++      return rc;
++}
++
+ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
+diff --git a/arch/powerpc/include/asm/setup.h 
b/arch/powerpc/include/asm/setup.h
+index cf00ec26303a..469b7fdc9be4 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
+ static inline void pseries_little_endian_exceptions(void) {}
+ #endif /* CONFIG_PPC_PSERIES */
+ 
++void rfi_flush_enable(bool enable);
++
++/* These are bit flags */
++enum l1d_flush_type {
++      L1D_FLUSH_NONE          = 0x1,
++      L1D_FLUSH_FALLBACK      = 0x2,
++      L1D_FLUSH_ORI           = 0x4,
++      L1D_FLUSH_MTTRIG        = 0x8,
++};
++
++void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void do_rfi_flush_fixups(enum l1d_flush_type types);
++
+ #endif /* !__ASSEMBLY__ */
+ 
+ #endif        /* _ASM_POWERPC_SETUP_H */
+diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
+index 8cfb20e38cfe..748cdc4bb89a 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -237,6 +237,11 @@ int main(void)
+       OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
+       OFFSET(PACA_IN_MCE, paca_struct, in_mce);
+       OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
++      OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, 
rfi_flush_fallback_area);
++      OFFSET(PACA_EXRFI, paca_struct, exrfi);
++      OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
++      OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
++
+ #endif
+       OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
+       OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 4a0fd4f40245..8a8a6d7ddcc6 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -37,6 +37,11 @@
+ #include <asm/tm.h>
+ #include <asm/ppc-opcode.h>
+ #include <asm/export.h>
++#ifdef CONFIG_PPC_BOOK3S
++#include <asm/exception-64s.h>
++#else
++#include <asm/exception-64e.h>
++#endif
+ 
+ /*
+  * System calls.
+@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 
+       ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
++      ld      r2,GPR2(r1)
++      ld      r1,GPR1(r1)
++      mtlr    r4
++      mtcr    r5
++      mtspr   SPRN_SRR0,r7
++      mtspr   SPRN_SRR1,r8
++      RFI_TO_USER
++      b       .       /* prevent speculative execution */
++
++      /* exit to kernel */
+ 1:    ld      r2,GPR2(r1)
+       ld      r1,GPR1(r1)
+       mtlr    r4
+       mtcr    r5
+       mtspr   SPRN_SRR0,r7
+       mtspr   SPRN_SRR1,r8
+-      RFI
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ 
+ .Lsyscall_error:
+@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+       mtmsrd  r10, 1
+       mtspr   SPRN_SRR0, r11
+       mtspr   SPRN_SRR1, r12
+-
+-      rfid
++      RFI_TO_USER
+       b       .       /* prevent speculative execution */
+ #endif
+ _ASM_NOKPROBE_SYMBOL(system_call_common);
+@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+       ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
+       REST_GPR(13, r1)
+-1:
++
+       mtspr   SPRN_SRR1,r3
+ 
+       ld      r2,_CCR(r1)
+@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+       ld      r3,GPR3(r1)
+       ld      r4,GPR4(r1)
+       ld      r1,GPR1(r1)
++      RFI_TO_USER
++      b       .       /* prevent speculative execution */
+ 
+-      rfid
++1:    mtspr   SPRN_SRR1,r3
++
++      ld      r2,_CCR(r1)
++      mtcrf   0xFF,r2
++      ld      r2,_NIP(r1)
++      mtspr   SPRN_SRR0,r2
++
++      ld      r0,GPR0(r1)
++      ld      r2,GPR2(r1)
++      ld      r3,GPR3(r1)
++      ld      r4,GPR4(r1)
++      ld      r1,GPR1(r1)
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ 
+ #endif /* CONFIG_PPC_BOOK3E */
+@@ -1073,7 +1101,7 @@ __enter_rtas:
+       
+       mtspr   SPRN_SRR0,r5
+       mtspr   SPRN_SRR1,r6
+-      rfid
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ 
+ rtas_return_loc:
+@@ -1098,7 +1126,7 @@ rtas_return_loc:
+ 
+       mtspr   SPRN_SRR0,r3
+       mtspr   SPRN_SRR1,r4
+-      rfid
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ _ASM_NOKPROBE_SYMBOL(__enter_rtas)
+ _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
+@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
+       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+       andc    r11,r11,r12
+       mtsrr1  r11
+-      rfid
++      RFI_TO_KERNEL
+ #endif /* CONFIG_PPC_BOOK3E */
+ 
+ 1:    /* Return from OF */
+diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
+index 06598142d755..e9f72abc52b7 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -254,7 +254,7 @@ BEGIN_FTR_SECTION
+       LOAD_HANDLER(r12, machine_check_handle_early)
+ 1:    mtspr   SPRN_SRR0,r12
+       mtspr   SPRN_SRR1,r11
+-      rfid
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ 2:
+       /* Stack overflow. Stay on emergency stack and panic.
+@@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
+       li      r3,MSR_ME
+       andc    r10,r10,r3              /* Turn off MSR_ME */
+       mtspr   SPRN_SRR1,r10
+-      rfid
++      RFI_TO_KERNEL
+       b       .
+ 2:
+       /*
+@@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
+        */
+       bl      machine_check_queue_event
+       MACHINE_CHECK_HANDLER_WINDUP
+-      rfid
++      RFI_TO_USER_OR_KERNEL
+ 9:
+       /* Deliver the machine check to host kernel in V mode. */
+       MACHINE_CHECK_HANDLER_WINDUP
+@@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
+ 
++      andi.   r9,r11,MSR_PR   // Check for exception from userspace
++      cmpdi   cr4,r9,MSR_PR   // And save the result in CR4 for later
++
+       /*
+        * Test MSR_RI before calling slb_allocate_realmode, because the
+        * MSR in r11 gets clobbered. However we still want to allocate
+@@ -622,9 +625,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+ 
+       /* All done -- return from exception. */
+ 
++      bne     cr4,1f          /* returning to kernel */
++
+ .machine      push
+ .machine      "power4"
+       mtcrf   0x80,r9
++      mtcrf   0x08,r9         /* MSR[PR] indication is in cr4 */
+       mtcrf   0x04,r9         /* MSR[RI] indication is in cr5 */
+       mtcrf   0x02,r9         /* I/D indication is in cr6 */
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+@@ -638,9 +644,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+-      rfid
++      RFI_TO_USER
++      b       .       /* prevent speculative execution */
++1:
++.machine      push
++.machine      "power4"
++      mtcrf   0x80,r9
++      mtcrf   0x08,r9         /* MSR[PR] indication is in cr4 */
++      mtcrf   0x04,r9         /* MSR[RI] indication is in cr5 */
++      mtcrf   0x02,r9         /* I/D indication is in cr6 */
++      mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
++.machine      pop
++
++      RESTORE_CTR(r9, PACA_EXSLB)
++      RESTORE_PPR_PACA(PACA_EXSLB, r9)
++      mr      r3,r12
++      ld      r9,PACA_EXSLB+EX_R9(r13)
++      ld      r10,PACA_EXSLB+EX_R10(r13)
++      ld      r11,PACA_EXSLB+EX_R11(r13)
++      ld      r12,PACA_EXSLB+EX_R12(r13)
++      ld      r13,PACA_EXSLB+EX_R13(r13)
++      RFI_TO_KERNEL
+       b       .       /* prevent speculative execution */
+ 
++
+ 2:    std     r3,PACA_EXSLB+EX_DAR(r13)
+       mr      r3,r12
+       mfspr   r11,SPRN_SRR0
+@@ -649,7 +676,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+-      rfid
++      RFI_TO_KERNEL
+       b       .
+ 
+ 8:    std     r3,PACA_EXSLB+EX_DAR(r13)
+@@ -660,7 +687,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+-      rfid
++      RFI_TO_KERNEL
+       b       .
+ 
+ EXC_COMMON_BEGIN(unrecov_slb)
+@@ -905,7 +932,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                     
        \
+       mtspr   SPRN_SRR0,r10 ;                                 \
+       ld      r10,PACAKMSR(r13) ;                             \
+       mtspr   SPRN_SRR1,r10 ;                                 \
+-      rfid ;                                                  \
++      RFI_TO_KERNEL ;                                         \
+       b       . ;     /* prevent speculative execution */
+ 
+ #define SYSCALL_FASTENDIAN                                    \
+@@ -914,7 +941,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                     
        \
+       xori    r12,r12,MSR_LE ;                                \
+       mtspr   SPRN_SRR1,r12 ;                                 \
+       mr      r13,r9 ;                                        \
+-      rfid ;          /* return to userspace */               \
++      RFI_TO_USER ;   /* return to userspace */               \
+       b       . ;     /* prevent speculative execution */
+ 
+ #if defined(CONFIG_RELOCATABLE)
+@@ -1299,7 +1326,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+       ld      r11,PACA_EXGEN+EX_R11(r13)
+       ld      r12,PACA_EXGEN+EX_R12(r13)
+       ld      r13,PACA_EXGEN+EX_R13(r13)
+-      HRFID
++      HRFI_TO_UNKNOWN
+       b       .
+ #endif
+ 
+@@ -1403,10 +1430,94 @@ masked_##_H##interrupt:                                
        \
+       ld      r10,PACA_EXGEN+EX_R10(r13);             \
+       ld      r11,PACA_EXGEN+EX_R11(r13);             \
+       /* returns to kernel where r13 must be set up, so don't restore it */ \
+-      ##_H##rfid;                                     \
++      ##_H##RFI_TO_KERNEL;                            \
+       b       .;                                      \
+       MASKED_DEC_HANDLER(_H)
+ 
++TRAMP_REAL_BEGIN(rfi_flush_fallback)
++      SET_SCRATCH0(r13);
++      GET_PACA(r13);
++      std     r9,PACA_EXRFI+EX_R9(r13)
++      std     r10,PACA_EXRFI+EX_R10(r13)
++      std     r11,PACA_EXRFI+EX_R11(r13)
++      std     r12,PACA_EXRFI+EX_R12(r13)
++      std     r8,PACA_EXRFI+EX_R13(r13)
++      mfctr   r9
++      ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++      ld      r11,PACA_L1D_FLUSH_SETS(r13)
++      ld      r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
++      /*
++       * The load adresses are at staggered offsets within cachelines,
++       * which suits some pipelines better (on others it should not
++       * hurt).
++       */
++      addi    r12,r12,8
++      mtctr   r11
++      DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++      /* order ld/st prior to dcbt stop all streams with flushing */
++      sync
++1:    li      r8,0
++      .rept   8 /* 8-way set associative */
++      ldx     r11,r10,r8
++      add     r8,r8,r12
++      xor     r11,r11,r11     // Ensure r11 is 0 even if fallback area is not
++      add     r8,r8,r11       // Add 0, this creates a dependency on the ldx
++      .endr
++      addi    r10,r10,128 /* 128 byte cache line */
++      bdnz    1b
++
++      mtctr   r9
++      ld      r9,PACA_EXRFI+EX_R9(r13)
++      ld      r10,PACA_EXRFI+EX_R10(r13)
++      ld      r11,PACA_EXRFI+EX_R11(r13)
++      ld      r12,PACA_EXRFI+EX_R12(r13)
++      ld      r8,PACA_EXRFI+EX_R13(r13)
++      GET_SCRATCH0(r13);
++      rfid
++
++TRAMP_REAL_BEGIN(hrfi_flush_fallback)
++      SET_SCRATCH0(r13);
++      GET_PACA(r13);
++      std     r9,PACA_EXRFI+EX_R9(r13)
++      std     r10,PACA_EXRFI+EX_R10(r13)
++      std     r11,PACA_EXRFI+EX_R11(r13)
++      std     r12,PACA_EXRFI+EX_R12(r13)
++      std     r8,PACA_EXRFI+EX_R13(r13)
++      mfctr   r9
++      ld      r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++      ld      r11,PACA_L1D_FLUSH_SETS(r13)
++      ld      r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
++      /*
++       * The load adresses are at staggered offsets within cachelines,
++       * which suits some pipelines better (on others it should not
++       * hurt).
++       */
++      addi    r12,r12,8
++      mtctr   r11
++      DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++      /* order ld/st prior to dcbt stop all streams with flushing */
++      sync
++1:    li      r8,0
++      .rept   8 /* 8-way set associative */
++      ldx     r11,r10,r8
++      add     r8,r8,r12
++      xor     r11,r11,r11     // Ensure r11 is 0 even if fallback area is not
++      add     r8,r8,r11       // Add 0, this creates a dependency on the ldx
++      .endr
++      addi    r10,r10,128 /* 128 byte cache line */
++      bdnz    1b
++
++      mtctr   r9
++      ld      r9,PACA_EXRFI+EX_R9(r13)
++      ld      r10,PACA_EXRFI+EX_R10(r13)
++      ld      r11,PACA_EXRFI+EX_R11(r13)
++      ld      r12,PACA_EXRFI+EX_R12(r13)
++      ld      r8,PACA_EXRFI+EX_R13(r13)
++      GET_SCRATCH0(r13);
++      hrfid
++
+ /*
+  * Real mode exceptions actually use this too, but alternate
+  * instruction code patches (which end up in the common .text area)
+@@ -1426,7 +1537,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
+       addi    r13, r13, 4
+       mtspr   SPRN_SRR0, r13
+       GET_SCRATCH0(r13)
+-      rfid
++      RFI_TO_KERNEL
+       b       .
+ 
+ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+@@ -1438,7 +1549,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+       addi    r13, r13, 4
+       mtspr   SPRN_HSRR0, r13
+       GET_SCRATCH0(r13)
+-      hrfid
++      HRFI_TO_KERNEL
+       b       .
+ #endif
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index b89c6aac48c9..935059cb9e40 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -784,3 +784,104 @@ static int __init disable_hardlockup_detector(void)
+       return 0;
+ }
+ early_initcall(disable_hardlockup_detector);
++
++#ifdef CONFIG_PPC_BOOK3S_64
++static enum l1d_flush_type enabled_flush_types;
++static void *l1d_flush_fallback_area;
++static bool no_rfi_flush;
++bool rfi_flush;
++
++static int __init handle_no_rfi_flush(char *p)
++{
++      pr_info("rfi-flush: disabled on command line.");
++      no_rfi_flush = true;
++      return 0;
++}
++early_param("no_rfi_flush", handle_no_rfi_flush);
++
++/*
++ * The RFI flush is not KPTI, but because users will see doco that says to use
++ * nopti we hijack that option here to also disable the RFI flush.
++ */
++static int __init handle_no_pti(char *p)
++{
++      pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
++      handle_no_rfi_flush(NULL);
++      return 0;
++}
++early_param("nopti", handle_no_pti);
++
++static void do_nothing(void *unused)
++{
++      /*
++       * We don't need to do the flush explicitly, just enter+exit kernel is
++       * sufficient, the RFI exit handlers will do the right thing.
++       */
++}
++
++void rfi_flush_enable(bool enable)
++{
++      if (rfi_flush == enable)
++              return;
++
++      if (enable) {
++              do_rfi_flush_fixups(enabled_flush_types);
++              on_each_cpu(do_nothing, NULL, 1);
++      } else
++              do_rfi_flush_fixups(L1D_FLUSH_NONE);
++
++      rfi_flush = enable;
++}
++
++static void init_fallback_flush(void)
++{
++      u64 l1d_size, limit;
++      int cpu;
++
++      l1d_size = ppc64_caches.l1d.size;
++      limit = min(safe_stack_limit(), ppc64_rma_size);
++
++      /*
++       * Align to L1d size, and size it at 2x L1d size, to catch possible
++       * hardware prefetch runoff. We don't have a recipe for load patterns to
++       * reliably avoid the prefetcher.
++       */
++      l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, 
l1d_size, limit));
++      memset(l1d_flush_fallback_area, 0, l1d_size * 2);
++
++      for_each_possible_cpu(cpu) {
++              /*
++               * The fallback flush is currently coded for 8-way
++               * associativity. Different associativity is possible, but it
++               * will be treated as 8-way and may not evict the lines as
++               * effectively.
++               *
++               * 128 byte lines are mandatory.
++               */
++              u64 c = l1d_size / 8;
++
++              paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
++              paca[cpu].l1d_flush_congruence = c;
++              paca[cpu].l1d_flush_sets = c / 128;
++      }
++}
++
++void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++{
++      if (types & L1D_FLUSH_FALLBACK) {
++              pr_info("rfi-flush: Using fallback displacement flush\n");
++              init_fallback_flush();
++      }
++
++      if (types & L1D_FLUSH_ORI)
++              pr_info("rfi-flush: Using ori type flush\n");
++
++      if (types & L1D_FLUSH_MTTRIG)
++              pr_info("rfi-flush: Using mttrig type flush\n");
++
++      enabled_flush_types = types;
++
++      if (!no_rfi_flush)
++              rfi_flush_enable(enable);
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S 
b/arch/powerpc/kernel/vmlinux.lds.S
+index 0494e1566ee2..307843d23682 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -132,6 +132,15 @@ SECTIONS
+       /* Read-only data */
+       RO_DATA(PAGE_SIZE)
+ 
++#ifdef CONFIG_PPC64
++      . = ALIGN(8);
++      __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
++              __start___rfi_flush_fixup = .;
++              *(__rfi_flush_fixup)
++              __stop___rfi_flush_fixup = .;
++      }
++#endif
++
+       EXCEPTION_TABLE(0)
+ 
+       NOTES :kernel :notes
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 42639fba89e8..c85ac5c83bd4 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
+       mtmsrd  r0,1            /* clear RI in MSR */
+       mtsrr0  r5
+       mtsrr1  r6
+-      RFI
++      RFI_TO_KERNEL
+ 
+ kvmppc_call_hv_entry:
+       ld      r4, HSTATE_KVM_VCPU(r13)
+@@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+       mtmsrd  r6, 1                   /* Clear RI in MSR */
+       mtsrr0  r8
+       mtsrr1  r7
+-      RFI
++      RFI_TO_KERNEL
+ 
+       /* Virtual-mode return */
+ .Lvirt_return:
+@@ -1131,8 +1131,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 
+       ld      r0, VCPU_GPR(R0)(r4)
+       ld      r4, VCPU_GPR(R4)(r4)
+-
+-      hrfid
++      HRFI_TO_GUEST
+       b       .
+ 
+ secondary_too_late:
+diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S 
b/arch/powerpc/kvm/book3s_rmhandlers.S
+index 42a4b237df5f..34a5adeff084 100644
+--- a/arch/powerpc/kvm/book3s_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_rmhandlers.S
+@@ -46,6 +46,9 @@
+ 
+ #define FUNC(name)            name
+ 
++#define RFI_TO_KERNEL RFI
++#define RFI_TO_GUEST  RFI
++
+ .macro INTERRUPT_TRAMPOLINE intno
+ 
+ .global kvmppc_trampoline_\intno
+@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
+       GET_SCRATCH0(r13)
+ 
+       /* And get back into the code */
+-      RFI
++      RFI_TO_KERNEL
+ #endif
+ 
+ /*
+@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
+       ori     r5, r5, MSR_EE
+       mtsrr0  r7
+       mtsrr1  r6
+-      RFI
++      RFI_TO_KERNEL
+ 
+ #include "book3s_segment.S"
+diff --git a/arch/powerpc/kvm/book3s_segment.S 
b/arch/powerpc/kvm/book3s_segment.S
+index 2a2b96d53999..93a180ceefad 100644
+--- a/arch/powerpc/kvm/book3s_segment.S
++++ b/arch/powerpc/kvm/book3s_segment.S
+@@ -156,7 +156,7 @@ no_dcbz32_on:
+       PPC_LL  r9, SVCPU_R9(r3)
+       PPC_LL  r3, (SVCPU_R3)(r3)
+ 
+-      RFI
++      RFI_TO_GUEST
+ kvmppc_handler_trampoline_enter_end:
+ 
+ 
+@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+       cmpwi   r12, BOOK3S_INTERRUPT_DOORBELL
+       beqa    BOOK3S_INTERRUPT_DOORBELL
+ 
+-      RFI
++      RFI_TO_KERNEL
+ kvmppc_handler_trampoline_exit_end:
+diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
+index 41cf5ae273cf..a95ea007d654 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void 
*fixup_start, void *fixup_end)
+       }
+ }
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
++void do_rfi_flush_fixups(enum l1d_flush_type types)
++{
++      unsigned int instrs[3], *dest;
++      long *start, *end;
++      int i;
++
++      start = PTRRELOC(&__start___rfi_flush_fixup),
++      end = PTRRELOC(&__stop___rfi_flush_fixup);
++
++      instrs[0] = 0x60000000; /* nop */
++      instrs[1] = 0x60000000; /* nop */
++      instrs[2] = 0x60000000; /* nop */
++
++      if (types & L1D_FLUSH_FALLBACK)
++              /* b .+16 to fallback flush */
++              instrs[0] = 0x48000010;
++
++      i = 0;
++      if (types & L1D_FLUSH_ORI) {
++              instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++              instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++      }
++
++      if (types & L1D_FLUSH_MTTRIG)
++              instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++      for (i = 0; start < end; start++, i++) {
++              dest = (void *)start + *start;
++
++              pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++              patch_instruction(dest, instrs[0]);
++              patch_instruction(dest + 1, instrs[1]);
++              patch_instruction(dest + 2, instrs[2]);
++      }
++
++      printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+       long *start, *end;
+diff --git a/arch/powerpc/platforms/powernv/setup.c 
b/arch/powerpc/platforms/powernv/setup.c
+index bfe2aa702973..7966a314d93a 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -36,13 +36,62 @@
+ #include <asm/opal.h>
+ #include <asm/kexec.h>
+ #include <asm/smp.h>
++#include <asm/setup.h>
+ 
+ #include "powernv.h"
+ 
++static void pnv_setup_rfi_flush(void)
++{
++      struct device_node *np, *fw_features;
++      enum l1d_flush_type type;
++      int enable;
++
++      /* Default to fallback in case fw-features are not available */
++      type = L1D_FLUSH_FALLBACK;
++      enable = 1;
++
++      np = of_find_node_by_name(NULL, "ibm,opal");
++      fw_features = of_get_child_by_name(np, "fw-features");
++      of_node_put(np);
++
++      if (fw_features) {
++              np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
++              if (np && of_property_read_bool(np, "enabled"))
++                      type = L1D_FLUSH_MTTRIG;
++
++              of_node_put(np);
++
++              np = of_get_child_by_name(fw_features, 
"inst-l1d-flush-ori30,30,0");
++              if (np && of_property_read_bool(np, "enabled"))
++                      type = L1D_FLUSH_ORI;
++
++              of_node_put(np);
++
++              /* Enable unless firmware says NOT to */
++              enable = 2;
++              np = of_get_child_by_name(fw_features, 
"needs-l1d-flush-msr-hv-1-to-0");
++              if (np && of_property_read_bool(np, "disabled"))
++                      enable--;
++
++              of_node_put(np);
++
++              np = of_get_child_by_name(fw_features, 
"needs-l1d-flush-msr-pr-0-to-1");
++              if (np && of_property_read_bool(np, "disabled"))
++                      enable--;
++
++              of_node_put(np);
++              of_node_put(fw_features);
++      }
++
++      setup_rfi_flush(type, enable > 0);
++}
++
+ static void __init pnv_setup_arch(void)
+ {
+       set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+ 
++      pnv_setup_rfi_flush();
++
+       /* Initialize SMP */
+       pnv_smp_init();
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
+index a8531e012658..ae4f596273b5 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
+       of_pci_check_probe_only();
+ }
+ 
++static void pseries_setup_rfi_flush(void)
++{
++      struct h_cpu_char_result result;
++      enum l1d_flush_type types;
++      bool enable;
++      long rc;
++
++      /* Enable by default */
++      enable = true;
++
++      rc = plpar_get_cpu_characteristics(&result);
++      if (rc == H_SUCCESS) {
++              types = L1D_FLUSH_NONE;
++
++              if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++                      types |= L1D_FLUSH_MTTRIG;
++              if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++                      types |= L1D_FLUSH_ORI;
++
++              /* Use fallback if nothing set in hcall */
++              if (types == L1D_FLUSH_NONE)
++                      types = L1D_FLUSH_FALLBACK;
++
++              if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++                      enable = false;
++      } else {
++              /* Default to fallback if case hcall is not available */
++              types = L1D_FLUSH_FALLBACK;
++      }
++
++      setup_rfi_flush(types, enable);
++}
++
+ static void __init pSeries_setup_arch(void)
+ {
+       set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
+ 
+       fwnmi_init();
+ 
++      pseries_setup_rfi_flush();
++
+       /* By default, only probe PCI (can be overridden by rtas_pci) */
+       pci_add_flags(PCI_PROBE_ONLY);
+ 
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index a1f28a54f23a..60c4c342316c 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
+       movl    %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+ #endif
+ 
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++      FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+       /* restore callee-saved registers */
+       popl    %esi
+       popl    %edi
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index f5fda5f26e34..be6b66464f6a 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -487,6 +487,17 @@ ENTRY(__switch_to_asm)
+       movq    %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ #endif
+ 
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++      FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
++#endif
++
+       /* restore callee-saved registers */
+       popq    %r15
+       popq    %r14
+@@ -1247,7 +1258,7 @@ idtentry async_page_fault        do_async_page_fault     
has_error_code=1
+ #endif
+ 
+ #ifdef CONFIG_X86_MCE
+-idtentry machine_check                                        
has_error_code=0        paranoid=1 do_sym=*machine_check_vector(%rip)
++idtentry machine_check                do_mce                  
has_error_code=0        paranoid=1
+ #endif
+ 
+ /*
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index f275447862f4..25b9375c1484 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -206,11 +206,11 @@
+ #define X86_FEATURE_RETPOLINE         ( 7*32+12) /* Generic Retpoline 
mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD     ( 7*32+13) /* AMD Retpoline mitigation 
for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN                ( 7*32+14) /* Intel Processor 
Inventory Number */
+-#define X86_FEATURE_INTEL_PT          ( 7*32+15) /* Intel Processor Trace */
+ #define X86_FEATURE_AVX512_4VNNIW     ( 7*32+16) /* AVX-512 Neural Network 
Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS     ( 7*32+17) /* AVX-512 Multiply 
Accumulation Single precision */
+ 
+ #define X86_FEATURE_MBA                       ( 7*32+18) /* Memory Bandwidth 
Allocation */
++#define X86_FEATURE_RSB_CTXSW         ( 7*32+19) /* Fill RSB on context 
switches */
+ 
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW                ( 8*32+ 0) /* Intel TPR Shadow 
*/
+@@ -245,6 +245,7 @@
+ #define X86_FEATURE_AVX512IFMA                ( 9*32+21) /* AVX-512 Integer 
Fused Multiply-Add instructions */
+ #define X86_FEATURE_CLFLUSHOPT                ( 9*32+23) /* CLFLUSHOPT 
instruction */
+ #define X86_FEATURE_CLWB              ( 9*32+24) /* CLWB instruction */
++#define X86_FEATURE_INTEL_PT          ( 9*32+25) /* Intel Processor Trace */
+ #define X86_FEATURE_AVX512PF          ( 9*32+26) /* AVX-512 Prefetch */
+ #define X86_FEATURE_AVX512ER          ( 9*32+27) /* AVX-512 Exponential and 
Reciprocal */
+ #define X86_FEATURE_AVX512CD          ( 9*32+28) /* AVX-512 Conflict 
Detection */
+diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
+index 6a77c63540f7..e7d96c0766fe 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
+ 
+ void __init sme_early_init(void);
+ 
+-void __init sme_encrypt_kernel(void);
++void __init sme_encrypt_kernel(struct boot_params *bp);
+ void __init sme_enable(struct boot_params *bp);
+ 
+ /* Architecture __weak replacement functions */
+@@ -61,7 +61,7 @@ static inline void __init sme_unmap_bootdata(char 
*real_mode_data) { }
+ 
+ static inline void __init sme_early_init(void) { }
+ 
+-static inline void __init sme_encrypt_kernel(void) { }
++static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+ static inline void __init sme_enable(struct boot_params *bp) { }
+ 
+ #endif        /* CONFIG_AMD_MEM_ENCRYPT */
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index 402a11c803c3..4ad41087ce0e 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -11,7 +11,7 @@
+  * Fill the CPU return stack buffer.
+  *
+  * Each entry in the RSB, if used for a speculative 'ret', contains an
+- * infinite 'pause; jmp' loop to capture speculative execution.
++ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+  *
+  * This is required in various cases for retpoline and IBRS-based
+  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+@@ -38,11 +38,13 @@
+       call    772f;                           \
+ 773:  /* speculation trap */                  \
+       pause;                                  \
++      lfence;                                 \
+       jmp     773b;                           \
+ 772:                                          \
+       call    774f;                           \
+ 775:  /* speculation trap */                  \
+       pause;                                  \
++      lfence;                                 \
+       jmp     775b;                           \
+ 774:                                          \
+       dec     reg;                            \
+@@ -73,6 +75,7 @@
+       call    .Ldo_rop_\@
+ .Lspec_trap_\@:
+       pause
++      lfence
+       jmp     .Lspec_trap_\@
+ .Ldo_rop_\@:
+       mov     \reg, (%_ASM_SP)
+@@ -165,6 +168,7 @@
+       "       .align 16\n"                                    \
+       "901:   call   903f;\n"                                 \
+       "902:   pause;\n"                                       \
++      "       lfence;\n"                                      \
+       "       jmp    902b;\n"                                 \
+       "       .align 16\n"                                    \
+       "903:   addl   $4, %%esp;\n"                            \
+@@ -190,6 +194,9 @@ enum spectre_v2_mitigation {
+       SPECTRE_V2_IBRS,
+ };
+ 
++extern char __indirect_thunk_start[];
++extern char __indirect_thunk_end[];
++
+ /*
+  * On VMEXIT we must ensure that no RSB predictions learned in the guest
+  * can be followed in the host, by overwriting the RSB completely. Both
+@@ -199,16 +206,17 @@ enum spectre_v2_mitigation {
+ static inline void vmexit_fill_RSB(void)
+ {
+ #ifdef CONFIG_RETPOLINE
+-      unsigned long loops = RSB_CLEAR_LOOPS / 2;
++      unsigned long loops;
+ 
+       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+                     ALTERNATIVE("jmp 910f",
+                                 __stringify(__FILL_RETURN_BUFFER(%0, 
RSB_CLEAR_LOOPS, %1)),
+                                 X86_FEATURE_RETPOLINE)
+                     "910:"
+-                    : "=&r" (loops), ASM_CALL_CONSTRAINT
+-                    : "r" (loops) : "memory" );
++                    : "=r" (loops), ASM_CALL_CONSTRAINT
++                    : : "memory" );
+ #endif
+ }
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __NOSPEC_BRANCH_H__ */
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 31051f35cbb7..3de69330e6c5 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs 
*, long);
+ #ifdef CONFIG_X86_32
+ dotraplinkage void do_iret_error(struct pt_regs *, long);
+ #endif
++dotraplinkage void do_mce(struct pt_regs *, long);
+ 
+ static inline int get_si_code(unsigned long condition)
+ {
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 88c214e75a6b..2ce1c708b8ee 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -369,8 +369,11 @@ static int x86_vector_alloc_irqs(struct irq_domain 
*domain, unsigned int virq,
+               irq_data->hwirq = virq + i;
+               err = assign_irq_vector_policy(virq + i, node, data, info,
+                                              irq_data);
+-              if (err)
++              if (err) {
++                      irq_data->chip_data = NULL;
++                      free_apic_chip_data(data);
+                       goto error;
++              }
+               /*
+                * If the apic destination mode is physical, then the
+                * effective affinity is restricted to a single target
+@@ -383,7 +386,7 @@ static int x86_vector_alloc_irqs(struct irq_domain 
*domain, unsigned int virq,
+       return 0;
+ 
+ error:
+-      x86_vector_free_irqs(domain, virq, i + 1);
++      x86_vector_free_irqs(domain, virq, i);
+       return err;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index e4dc26185aa7..390b3dc3d438 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -23,6 +23,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/set_memory.h>
++#include <asm/intel-family.h>
+ 
+ static void __init spectre_v2_select_mitigation(void);
+ 
+@@ -155,6 +156,23 @@ static enum spectre_v2_mitigation_cmd __init 
spectre_v2_parse_cmdline(void)
+       return SPECTRE_V2_CMD_NONE;
+ }
+ 
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++          boot_cpu_data.x86 == 6) {
++              switch (boot_cpu_data.x86_model) {
++              case INTEL_FAM6_SKYLAKE_MOBILE:
++              case INTEL_FAM6_SKYLAKE_DESKTOP:
++              case INTEL_FAM6_SKYLAKE_X:
++              case INTEL_FAM6_KABYLAKE_MOBILE:
++              case INTEL_FAM6_KABYLAKE_DESKTOP:
++                      return true;
++              }
++      }
++      return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -213,6 +231,24 @@ static void __init spectre_v2_select_mitigation(void)
+ 
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
++
++      /*
++       * If neither SMEP or KPTI are available, there is a risk of
++       * hitting userspace addresses in the RSB after a context switch
++       * from a shallow call stack to a deeper one. To prevent this fill
++       * the entire RSB, even when using IBRS.
++       *
++       * Skylake era CPUs have a separate issue with *underflow* of the
++       * RSB, when they will predict 'ret' targets from the generic BTB.
++       * The proper mitigation for this is IBRS. If IBRS is not supported
++       * or deactivated in favour of retpolines the RSB fill on context
++       * switch is required.
++       */
++      if ((!boot_cpu_has(X86_FEATURE_PTI) &&
++           !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++              setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++              pr_info("Filling RSB on context switch\n");
++      }
+ }
+ 
+ #undef pr_fmt
+diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
+index 88dcf8479013..99442370de40 100644
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct 
rdt_resource *r)
+                */
+               if (static_branch_unlikely(&rdt_mon_enable_key))
+                       rmdir_mondata_subdir_allrdtgrp(r, d->id);
+-              kfree(d->ctrl_val);
+-              kfree(d->rmid_busy_llc);
+-              kfree(d->mbm_total);
+-              kfree(d->mbm_local);
+               list_del(&d->list);
+               if (is_mbm_enabled())
+                       cancel_delayed_work(&d->mbm_over);
+@@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct 
rdt_resource *r)
+                       cancel_delayed_work(&d->cqm_limbo);
+               }
+ 
++              kfree(d->ctrl_val);
++              kfree(d->rmid_busy_llc);
++              kfree(d->mbm_total);
++              kfree(d->mbm_local);
+               kfree(d);
+               return;
+       }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c 
b/arch/x86/kernel/cpu/mcheck/mce.c
+index 3b413065c613..a9e898b71208 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -1788,6 +1788,11 @@ static void unexpected_machine_check(struct pt_regs 
*regs, long error_code)
+ void (*machine_check_vector)(struct pt_regs *, long error_code) =
+                                               unexpected_machine_check;
+ 
++dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
++{
++      machine_check_vector(regs, error_code);
++}
++
+ /*
+  * Called for each booted CPU to set up machine checks.
+  * Must be called with preempt off:
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index 05459ad3db46..d0e69769abfd 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -21,7 +21,6 @@ struct cpuid_bit {
+ static const struct cpuid_bit cpuid_bits[] = {
+       { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
+       { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
+-      { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
+       { X86_FEATURE_AVX512_4VNNIW,    CPUID_EDX,  2, 0x00000007, 0 },
+       { X86_FEATURE_AVX512_4FMAPS,    CPUID_EDX,  3, 0x00000007, 0 },
+       { X86_FEATURE_CAT_L3,           CPUID_EBX,  1, 0x00000010, 0 },
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 6a5d757b9cfd..7ba5d819ebe3 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
+       p = fixup_pointer(&phys_base, physaddr);
+       *p += load_delta - sme_get_me_mask();
+ 
+-      /* Encrypt the kernel (if SME is active) */
+-      sme_encrypt_kernel();
++      /* Encrypt the kernel and related (if SME is active) */
++      sme_encrypt_kernel(bp);
+ 
+       /*
+        * Return the SME encryption mask (if SME is active) to be used as a
+diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
+index 014cb2fc47ff..236917bac5f2 100644
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -56,7 +56,7 @@ struct idt_data {
+  * Early traps running on the DEFAULT_STACK because the other interrupt
+  * stacks work only after cpu_init().
+  */
+-static const __initdata struct idt_data early_idts[] = {
++static const __initconst struct idt_data early_idts[] = {
+       INTG(X86_TRAP_DB,               debug),
+       SYSG(X86_TRAP_BP,               int3),
+ #ifdef CONFIG_X86_32
+@@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
+  * the traps which use them are reinitialized with IST after cpu_init() has
+  * set up TSS.
+  */
+-static const __initdata struct idt_data def_idts[] = {
++static const __initconst struct idt_data def_idts[] = {
+       INTG(X86_TRAP_DE,               divide_error),
+       INTG(X86_TRAP_NMI,              nmi),
+       INTG(X86_TRAP_BR,               bounds),
+@@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
+ /*
+  * The APIC and SMP idt entries
+  */
+-static const __initdata struct idt_data apic_idts[] = {
++static const __initconst struct idt_data apic_idts[] = {
+ #ifdef CONFIG_SMP
+       INTG(RESCHEDULE_VECTOR,         reschedule_interrupt),
+       INTG(CALL_FUNCTION_VECTOR,      call_function_interrupt),
+@@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
+  * Early traps running on the DEFAULT_STACK because the other interrupt
+  * stacks work only after cpu_init().
+  */
+-static const __initdata struct idt_data early_pf_idts[] = {
++static const __initconst struct idt_data early_pf_idts[] = {
+       INTG(X86_TRAP_PF,               page_fault),
+ };
+ 
+@@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
+  * Override for the debug_idt. Same as the default, but with interrupt
+  * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
+  */
+-static const __initdata struct idt_data dbg_idts[] = {
++static const __initconst struct idt_data dbg_idts[] = {
+       INTG(X86_TRAP_DB,       debug),
+       INTG(X86_TRAP_BP,       int3),
+ };
+@@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
+  * The exceptions which use Interrupt stacks. They are setup after
+  * cpu_init() when the TSS has been initialized.
+  */
+-static const __initdata struct idt_data ist_idts[] = {
++static const __initconst struct idt_data ist_idts[] = {
+       ISTG(X86_TRAP_DB,       debug,          DEBUG_STACK),
+       ISTG(X86_TRAP_NMI,      nmi,            NMI_STACK),
+       SISTG(X86_TRAP_BP,      int3,           DEBUG_STACK),
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 4f98aad38237..3668f28cf5fc 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -40,6 +40,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/set_memory.h>
+ #include <asm/sections.h>
++#include <asm/nospec-branch.h>
+ 
+ #include "common.h"
+ 
+@@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
+ }
+ 
+ /* Check whether insn is indirect jump */
+-static int insn_is_indirect_jump(struct insn *insn)
++static int __insn_is_indirect_jump(struct insn *insn)
+ {
+       return ((insn->opcode.bytes[0] == 0xff &&
+               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+@@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, 
unsigned long start, int len)
+       return (start <= target && target <= start + len);
+ }
+ 
++static int insn_is_indirect_jump(struct insn *insn)
++{
++      int ret = __insn_is_indirect_jump(insn);
++
++#ifdef CONFIG_RETPOLINE
++      /*
++       * Jump to x86_indirect_thunk_* is treated as an indirect jump.
++       * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
++       * older gcc may use indirect jump. So we add this check instead of
++       * replace indirect-jump check.
++       */
++      if (!ret)
++              ret = insn_jump_into_range(insn,
++                              (unsigned long)__indirect_thunk_start,
++                              (unsigned long)__indirect_thunk_end -
++                              (unsigned long)__indirect_thunk_start);
++#endif
++      return ret;
++}
++
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 3cb2486c47e4..8bd1d8292cf7 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -380,19 +380,24 @@ void stop_this_cpu(void *dummy)
+       disable_local_APIC();
+       mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+ 
++      /*
++       * Use wbinvd on processors that support SME. This provides support
++       * for performing a successful kexec when going from SME inactive
++       * to SME active (or vice-versa). The cache must be cleared so that
++       * if there are entries with the same physical address, both with and
++       * without the encryption bit, they don't race each other when flushed
++       * and potentially end up with the wrong entry being committed to
++       * memory.
++       */
++      if (boot_cpu_has(X86_FEATURE_SME))
++              native_wbinvd();
+       for (;;) {
+               /*
+-               * Use wbinvd followed by hlt to stop the processor. This
+-               * provides support for kexec on a processor that supports
+-               * SME. With kexec, going from SME inactive to SME active
+-               * requires clearing cache entries so that addresses without
+-               * the encryption bit set don't corrupt the same physical
+-               * address that has the encryption bit set when caches are
+-               * flushed. To achieve this a wbinvd is performed followed by
+-               * a hlt. Even if the processor is not in the kexec/SME
+-               * scenario this only adds a wbinvd to a halting processor.
++               * Use native_halt() so that memory contents don't change
++               * (stack usage and variables) after possibly issuing the
++               * native_wbinvd() above.
+                */
+-              asm volatile("wbinvd; hlt" : : : "memory");
++              native_halt();
+       }
+ }
+ 
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 0957dd73d127..e84cb4c75cd0 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -376,14 +376,6 @@ static void __init reserve_initrd(void)
+           !ramdisk_image || !ramdisk_size)
+               return;         /* No initrd provided by bootloader */
+ 
+-      /*
+-       * If SME is active, this memory will be marked encrypted by the
+-       * kernel when it is accessed (including relocation). However, the
+-       * ramdisk image was loaded decrypted by the bootloader, so make
+-       * sure that it is encrypted before accessing it.
+-       */
+-      sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+-
+       initrd_start = 0;
+ 
+       mapped_size = memblock_mem_size(max_pfn_mapped);
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index ad2b925a808e..47506567435e 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
+               case INTEL_FAM6_KABYLAKE_DESKTOP:
+                       crystal_khz = 24000;    /* 24.0 MHz */
+                       break;
+-              case INTEL_FAM6_SKYLAKE_X:
+               case INTEL_FAM6_ATOM_DENVERTON:
+                       crystal_khz = 25000;    /* 25.0 MHz */
+                       break;
+@@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void)
+               }
+       }
+ 
++      if (crystal_khz == 0)
++              return 0;
+       /*
+        * TSC frequency determined by CPUID is a "hardware reported"
+        * frequency and is the most accurate one so far we have. This
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 1e413a9326aa..9b138a06c1a4 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -124,6 +124,12 @@ SECTIONS
+               ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is 
too big");
+ #endif
+ 
++#ifdef CONFIG_RETPOLINE
++              __indirect_thunk_start = .;
++              *(.text.__x86.indirect_thunk)
++              __indirect_thunk_end = .;
++#endif
++
+               /* End of text section */
+               _etext = .;
+       } :text = 0x9090
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index cb45c6cb465f..dfb2ba91b670 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -9,7 +9,7 @@
+ #include <asm/nospec-branch.h>
+ 
+ .macro THUNK reg
+-      .section .text.__x86.indirect_thunk.\reg
++      .section .text.__x86.indirect_thunk
+ 
+ ENTRY(__x86_indirect_thunk_\reg)
+       CFI_STARTPROC
+@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
+  * than one per register with the correct names. So we do it
+  * the simple and nasty way...
+  */
+-#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
++#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
++#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
+ #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+ 
+ GENERATE_THUNK(_ASM_AX)
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 3109ba6c6ede..b264b590eeec 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -173,14 +173,15 @@ is_prefetch(struct pt_regs *regs, unsigned long 
error_code, unsigned long addr)
+  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
+  *         faulted on a pte with its pkey=4.
+  */
+-static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
++static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
++              u32 *pkey)
+ {
+       /* This is effectively an #ifdef */
+       if (!boot_cpu_has(X86_FEATURE_OSPKE))
+               return;
+ 
+       /* Fault not from Protection Keys: nothing to do */
+-      if (si_code != SEGV_PKUERR)
++      if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
+               return;
+       /*
+        * force_sig_info_fault() is called from a number of
+@@ -219,7 +220,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned 
long address,
+               lsb = PAGE_SHIFT;
+       info.si_addr_lsb = lsb;
+ 
+-      fill_sig_info_pkey(si_code, &info, pkey);
++      fill_sig_info_pkey(si_signo, si_code, &info, pkey);
+ 
+       force_sig_info(si_signo, &info, tsk);
+ }
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
+index 47388f0c0e59..af6f2f9c6a26 100644
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
+ 
+ static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+ 
+-static __init void *early_alloc(size_t size, int nid)
++static __init void *early_alloc(size_t size, int nid, bool panic)
+ {
+-      return memblock_virt_alloc_try_nid_nopanic(size, size,
+-              __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
++      if (panic)
++              return memblock_virt_alloc_try_nid(size, size,
++                      __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
++      else
++              return memblock_virt_alloc_try_nid_nopanic(size, size,
++                      __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ }
+ 
+ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
+@@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned 
long addr,
+               if (boot_cpu_has(X86_FEATURE_PSE) &&
+                   ((end - addr) == PMD_SIZE) &&
+                   IS_ALIGNED(addr, PMD_SIZE)) {
+-                      p = early_alloc(PMD_SIZE, nid);
++                      p = early_alloc(PMD_SIZE, nid, false);
+                       if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
+                               return;
+                       else if (p)
+                               memblock_free(__pa(p), PMD_SIZE);
+               }
+ 
+-              p = early_alloc(PAGE_SIZE, nid);
++              p = early_alloc(PAGE_SIZE, nid, true);
+               pmd_populate_kernel(&init_mm, pmd, p);
+       }
+ 
+@@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned 
long addr,
+               if (!pte_none(*pte))
+                       continue;
+ 
+-              p = early_alloc(PAGE_SIZE, nid);
++              p = early_alloc(PAGE_SIZE, nid, true);
+               entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
+               set_pte_at(&init_mm, addr, pte, entry);
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+@@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned 
long addr,
+               if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
+                   ((end - addr) == PUD_SIZE) &&
+                   IS_ALIGNED(addr, PUD_SIZE)) {
+-                      p = early_alloc(PUD_SIZE, nid);
++                      p = early_alloc(PUD_SIZE, nid, false);
+                       if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
+                               return;
+                       else if (p)
+                               memblock_free(__pa(p), PUD_SIZE);
+               }
+ 
+-              p = early_alloc(PAGE_SIZE, nid);
++              p = early_alloc(PAGE_SIZE, nid, true);
+               pud_populate(&init_mm, pud, p);
+       }
+ 
+@@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned 
long addr,
+       unsigned long next;
+ 
+       if (p4d_none(*p4d)) {
+-              void *p = early_alloc(PAGE_SIZE, nid);
++              void *p = early_alloc(PAGE_SIZE, nid, true);
+ 
+               p4d_populate(&init_mm, p4d, p);
+       }
+@@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned 
long addr,
+       unsigned long next;
+ 
+       if (pgd_none(*pgd)) {
+-              p = early_alloc(PAGE_SIZE, nid);
++              p = early_alloc(PAGE_SIZE, nid, true);
+               pgd_populate(&init_mm, pgd, p);
+       }
+ 
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 0286327e65fa..48c03c74c7f4 100644
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -213,37 +213,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned 
long size)
+       set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
+ }
+ 
+-static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
+-                               unsigned long end)
++struct sme_populate_pgd_data {
++      void    *pgtable_area;
++      pgd_t   *pgd;
++
++      pmdval_t pmd_flags;
++      pteval_t pte_flags;
++      unsigned long paddr;
++
++      unsigned long vaddr;
++      unsigned long vaddr_end;
++};
++
++static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+ {
+       unsigned long pgd_start, pgd_end, pgd_size;
+       pgd_t *pgd_p;
+ 
+-      pgd_start = start & PGDIR_MASK;
+-      pgd_end = end & PGDIR_MASK;
++      pgd_start = ppd->vaddr & PGDIR_MASK;
++      pgd_end = ppd->vaddr_end & PGDIR_MASK;
+ 
+-      pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
+-      pgd_size *= sizeof(pgd_t);
++      pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
+ 
+-      pgd_p = pgd_base + pgd_index(start);
++      pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
+ 
+       memset(pgd_p, 0, pgd_size);
+ }
+ 
+-#define PGD_FLAGS     _KERNPG_TABLE_NOENC
+-#define P4D_FLAGS     _KERNPG_TABLE_NOENC
+-#define PUD_FLAGS     _KERNPG_TABLE_NOENC
+-#define PMD_FLAGS     (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
++#define PGD_FLAGS             _KERNPG_TABLE_NOENC
++#define P4D_FLAGS             _KERNPG_TABLE_NOENC
++#define PUD_FLAGS             _KERNPG_TABLE_NOENC
++#define PMD_FLAGS             _KERNPG_TABLE_NOENC
++
++#define PMD_FLAGS_LARGE               (__PAGE_KERNEL_LARGE_EXEC & 
~_PAGE_GLOBAL)
++
++#define PMD_FLAGS_DEC         PMD_FLAGS_LARGE
++#define PMD_FLAGS_DEC_WP      ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
++                               (_PAGE_PAT | _PAGE_PWT))
++
++#define PMD_FLAGS_ENC         (PMD_FLAGS_LARGE | _PAGE_ENC)
++
++#define PTE_FLAGS             (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
++
++#define PTE_FLAGS_DEC         PTE_FLAGS
++#define PTE_FLAGS_DEC_WP      ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
++                               (_PAGE_PAT | _PAGE_PWT))
++
++#define PTE_FLAGS_ENC         (PTE_FLAGS | _PAGE_ENC)
+ 
+-static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
+-                                   unsigned long vaddr, pmdval_t pmd_val)
++static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+ {
+       pgd_t *pgd_p;
+       p4d_t *p4d_p;
+       pud_t *pud_p;
+       pmd_t *pmd_p;
+ 
+-      pgd_p = pgd_base + pgd_index(vaddr);
++      pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
+       if (native_pgd_val(*pgd_p)) {
+               if (IS_ENABLED(CONFIG_X86_5LEVEL))
+                       p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & 
~PTE_FLAGS_MASK);
+@@ -253,15 +278,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, 
void *pgtable_area,
+               pgd_t pgd;
+ 
+               if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+-                      p4d_p = pgtable_area;
++                      p4d_p = ppd->pgtable_area;
+                       memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+-                      pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
++                      ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+ 
+                       pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
+               } else {
+-                      pud_p = pgtable_area;
++                      pud_p = ppd->pgtable_area;
+                       memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+-                      pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
++                      ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ 
+                       pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
+               }
+@@ -269,58 +294,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, 
void *pgtable_area,
+       }
+ 
+       if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+-              p4d_p += p4d_index(vaddr);
++              p4d_p += p4d_index(ppd->vaddr);
+               if (native_p4d_val(*p4d_p)) {
+                       pud_p = (pud_t *)(native_p4d_val(*p4d_p) & 
~PTE_FLAGS_MASK);
+               } else {
+                       p4d_t p4d;
+ 
+-                      pud_p = pgtable_area;
++                      pud_p = ppd->pgtable_area;
+                       memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+-                      pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
++                      ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+ 
+                       p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
+                       native_set_p4d(p4d_p, p4d);
+               }
+       }
+ 
+-      pud_p += pud_index(vaddr);
++      pud_p += pud_index(ppd->vaddr);
+       if (native_pud_val(*pud_p)) {
+               if (native_pud_val(*pud_p) & _PAGE_PSE)
+-                      goto out;
++                      return NULL;
+ 
+               pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
+       } else {
+               pud_t pud;
+ 
+-              pmd_p = pgtable_area;
++              pmd_p = ppd->pgtable_area;
+               memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
+-              pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
++              ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
+ 
+               pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
+               native_set_pud(pud_p, pud);
+       }
+ 
+-      pmd_p += pmd_index(vaddr);
++      return pmd_p;
++}
++
++static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
++{
++      pmd_t *pmd_p;
++
++      pmd_p = sme_prepare_pgd(ppd);
++      if (!pmd_p)
++              return;
++
++      pmd_p += pmd_index(ppd->vaddr);
+       if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
+-              native_set_pmd(pmd_p, native_make_pmd(pmd_val));
++              native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | 
ppd->pmd_flags));
++}
++
++static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
++{
++      pmd_t *pmd_p;
++      pte_t *pte_p;
++
++      pmd_p = sme_prepare_pgd(ppd);
++      if (!pmd_p)
++              return;
++
++      pmd_p += pmd_index(ppd->vaddr);
++      if (native_pmd_val(*pmd_p)) {
++              if (native_pmd_val(*pmd_p) & _PAGE_PSE)
++                      return;
++
++              pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
++      } else {
++              pmd_t pmd;
++
++              pte_p = ppd->pgtable_area;
++              memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
++              ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
++
++              pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
++              native_set_pmd(pmd_p, pmd);
++      }
+ 
+-out:
+-      return pgtable_area;
++      pte_p += pte_index(ppd->vaddr);
++      if (!native_pte_val(*pte_p))
++              native_set_pte(pte_p, native_make_pte(ppd->paddr | 
ppd->pte_flags));
++}
++
++static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
++{
++      while (ppd->vaddr < ppd->vaddr_end) {
++              sme_populate_pgd_large(ppd);
++
++              ppd->vaddr += PMD_PAGE_SIZE;
++              ppd->paddr += PMD_PAGE_SIZE;
++      }
++}
++
++static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
++{
++      while (ppd->vaddr < ppd->vaddr_end) {
++              sme_populate_pgd(ppd);
++
++              ppd->vaddr += PAGE_SIZE;
++              ppd->paddr += PAGE_SIZE;
++      }
++}
++
++static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
++                                 pmdval_t pmd_flags, pteval_t pte_flags)
++{
++      unsigned long vaddr_end;
++
++      ppd->pmd_flags = pmd_flags;
++      ppd->pte_flags = pte_flags;
++
++      /* Save original end value since we modify the struct value */
++      vaddr_end = ppd->vaddr_end;
++
++      /* If start is not 2MB aligned, create PTE entries */
++      ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
++      __sme_map_range_pte(ppd);
++
++      /* Create PMD entries */
++      ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
++      __sme_map_range_pmd(ppd);
++
++      /* If end is not 2MB aligned, create PTE entries */
++      ppd->vaddr_end = vaddr_end;
++      __sme_map_range_pte(ppd);
++}
++
++static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
++{
++      __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
++}
++
++static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
++{
++      __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
++}
++
++static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data 
*ppd)
++{
++      __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
+ }
+ 
+ static unsigned long __init sme_pgtable_calc(unsigned long len)
+ {
+-      unsigned long p4d_size, pud_size, pmd_size;
++      unsigned long p4d_size, pud_size, pmd_size, pte_size;
+       unsigned long total;
+ 
+       /*
+        * Perform a relatively simplistic calculation of the pagetable
+-       * entries that are needed. That mappings will be covered by 2MB
+-       * PMD entries so we can conservatively calculate the required
++       * entries that are needed. Those mappings will be covered mostly
++       * by 2MB PMD entries so we can conservatively calculate the required
+        * number of P4D, PUD and PMD structures needed to perform the
+-       * mappings. Incrementing the count for each covers the case where
+-       * the addresses cross entries.
++       * mappings.  For mappings that are not 2MB aligned, PTE mappings
++       * would be needed for the start and end portion of the address range
++       * that fall outside of the 2MB alignment.  This results in, at most,
++       * two extra pages to hold PTE entries for each range that is mapped.
++       * Incrementing the count for each covers the case where the addresses
++       * cross entries.
+        */
+       if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+               p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
+@@ -334,8 +461,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long 
len)
+       }
+       pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
+       pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
++      pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
+ 
+-      total = p4d_size + pud_size + pmd_size;
++      total = p4d_size + pud_size + pmd_size + pte_size;
+ 
+       /*
+        * Now calculate the added pagetable structures needed to populate
+@@ -359,29 +487,29 @@ static unsigned long __init sme_pgtable_calc(unsigned 
long len)
+       return total;
+ }
+ 
+-void __init sme_encrypt_kernel(void)
++void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
+ {
+       unsigned long workarea_start, workarea_end, workarea_len;
+       unsigned long execute_start, execute_end, execute_len;
+       unsigned long kernel_start, kernel_end, kernel_len;
++      unsigned long initrd_start, initrd_end, initrd_len;
++      struct sme_populate_pgd_data ppd;
+       unsigned long pgtable_area_len;
+-      unsigned long paddr, pmd_flags;
+       unsigned long decrypted_base;
+-      void *pgtable_area;
+-      pgd_t *pgd;
+ 
+       if (!sme_active())
+               return;
+ 
+       /*
+-       * Prepare for encrypting the kernel by building new pagetables with
+-       * the necessary attributes needed to encrypt the kernel in place.
++       * Prepare for encrypting the kernel and initrd by building new
++       * pagetables with the necessary attributes needed to encrypt the
++       * kernel in place.
+        *
+        *   One range of virtual addresses will map the memory occupied
+-       *   by the kernel as encrypted.
++       *   by the kernel and initrd as encrypted.
+        *
+        *   Another range of virtual addresses will map the memory occupied
+-       *   by the kernel as decrypted and write-protected.
++       *   by the kernel and initrd as decrypted and write-protected.
+        *
+        *     The use of write-protect attribute will prevent any of the
+        *     memory from being cached.
+@@ -392,6 +520,20 @@ void __init sme_encrypt_kernel(void)
+       kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
+       kernel_len = kernel_end - kernel_start;
+ 
++      initrd_start = 0;
++      initrd_end = 0;
++      initrd_len = 0;
++#ifdef CONFIG_BLK_DEV_INITRD
++      initrd_len = (unsigned long)bp->hdr.ramdisk_size |
++                   ((unsigned long)bp->ext_ramdisk_size << 32);
++      if (initrd_len) {
++              initrd_start = (unsigned long)bp->hdr.ramdisk_image |
++                             ((unsigned long)bp->ext_ramdisk_image << 32);
++              initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
++              initrd_len = initrd_end - initrd_start;
++      }
++#endif
++
+       /* Set the encryption workarea to be immediately after the kernel */
+       workarea_start = kernel_end;
+ 
+@@ -414,16 +556,21 @@ void __init sme_encrypt_kernel(void)
+        */
+       pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
+       pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
++      if (initrd_len)
++              pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
+ 
+       /* PUDs and PMDs needed in the current pagetables for the workarea */
+       pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
+ 
+       /*
+        * The total workarea includes the executable encryption area and
+-       * the pagetable area.
++       * the pagetable area. The start of the workarea is already 2MB
++       * aligned, align the end of the workarea on a 2MB boundary so that
++       * we don't try to create/allocate PTE entries from the workarea
++       * before it is mapped.
+        */
+       workarea_len = execute_len + pgtable_area_len;
+-      workarea_end = workarea_start + workarea_len;
++      workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
+ 
+       /*
+        * Set the address to the start of where newly created pagetable
+@@ -432,45 +579,30 @@ void __init sme_encrypt_kernel(void)
+        * pagetables and when the new encrypted and decrypted kernel
+        * mappings are populated.
+        */
+-      pgtable_area = (void *)execute_end;
++      ppd.pgtable_area = (void *)execute_end;
+ 
+       /*
+        * Make sure the current pagetable structure has entries for
+        * addressing the workarea.
+        */
+-      pgd = (pgd_t *)native_read_cr3_pa();
+-      paddr = workarea_start;
+-      while (paddr < workarea_end) {
+-              pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-                                              paddr,
+-                                              paddr + PMD_FLAGS);
+-
+-              paddr += PMD_PAGE_SIZE;
+-      }
++      ppd.pgd = (pgd_t *)native_read_cr3_pa();
++      ppd.paddr = workarea_start;
++      ppd.vaddr = workarea_start;
++      ppd.vaddr_end = workarea_end;
++      sme_map_range_decrypted(&ppd);
+ 
+       /* Flush the TLB - no globals so cr3 is enough */
+       native_write_cr3(__native_read_cr3());
+ 
+       /*
+        * A new pagetable structure is being built to allow for the kernel
+-       * to be encrypted. It starts with an empty PGD that will then be
+-       * populated with new PUDs and PMDs as the encrypted and decrypted
+-       * kernel mappings are created.
++       * and initrd to be encrypted. It starts with an empty PGD that will
++       * then be populated with new PUDs and PMDs as the encrypted and
++       * decrypted kernel mappings are created.
+        */
+-      pgd = pgtable_area;
+-      memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
+-      pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
+-
+-      /* Add encrypted kernel (identity) mappings */
+-      pmd_flags = PMD_FLAGS | _PAGE_ENC;
+-      paddr = kernel_start;
+-      while (paddr < kernel_end) {
+-              pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-                                              paddr,
+-                                              paddr + pmd_flags);
+-
+-              paddr += PMD_PAGE_SIZE;
+-      }
++      ppd.pgd = ppd.pgtable_area;
++      memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
++      ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
+ 
+       /*
+        * A different PGD index/entry must be used to get different
+@@ -479,47 +611,79 @@ void __init sme_encrypt_kernel(void)
+        * the base of the mapping.
+        */
+       decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
++      if (initrd_len) {
++              unsigned long check_base;
++
++              check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
++              decrypted_base = max(decrypted_base, check_base);
++      }
+       decrypted_base <<= PGDIR_SHIFT;
+ 
++      /* Add encrypted kernel (identity) mappings */
++      ppd.paddr = kernel_start;
++      ppd.vaddr = kernel_start;
++      ppd.vaddr_end = kernel_end;
++      sme_map_range_encrypted(&ppd);
++
+       /* Add decrypted, write-protected kernel (non-identity) mappings */
+-      pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
+-      paddr = kernel_start;
+-      while (paddr < kernel_end) {
+-              pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-                                              paddr + decrypted_base,
+-                                              paddr + pmd_flags);
+-
+-              paddr += PMD_PAGE_SIZE;
++      ppd.paddr = kernel_start;
++      ppd.vaddr = kernel_start + decrypted_base;
++      ppd.vaddr_end = kernel_end + decrypted_base;
++      sme_map_range_decrypted_wp(&ppd);
++
++      if (initrd_len) {
++              /* Add encrypted initrd (identity) mappings */
++              ppd.paddr = initrd_start;
++              ppd.vaddr = initrd_start;
++              ppd.vaddr_end = initrd_end;
++              sme_map_range_encrypted(&ppd);
++              /*
++               * Add decrypted, write-protected initrd (non-identity) mappings
++               */
++              ppd.paddr = initrd_start;
++              ppd.vaddr = initrd_start + decrypted_base;
++              ppd.vaddr_end = initrd_end + decrypted_base;
++              sme_map_range_decrypted_wp(&ppd);
+       }
+ 
+       /* Add decrypted workarea mappings to both kernel mappings */
+-      paddr = workarea_start;
+-      while (paddr < workarea_end) {
+-              pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-                                              paddr,
+-                                              paddr + PMD_FLAGS);
++      ppd.paddr = workarea_start;
++      ppd.vaddr = workarea_start;
++      ppd.vaddr_end = workarea_end;
++      sme_map_range_decrypted(&ppd);
+ 
+-              pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+-                                              paddr + decrypted_base,
+-                                              paddr + PMD_FLAGS);
+-
+-              paddr += PMD_PAGE_SIZE;
+-      }
++      ppd.paddr = workarea_start;
++      ppd.vaddr = workarea_start + decrypted_base;
++      ppd.vaddr_end = workarea_end + decrypted_base;
++      sme_map_range_decrypted(&ppd);
+ 
+       /* Perform the encryption */
+       sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
+-                          kernel_len, workarea_start, (unsigned long)pgd);
++                          kernel_len, workarea_start, (unsigned long)ppd.pgd);
++
++      if (initrd_len)
++              sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
++                                  initrd_len, workarea_start,
++                                  (unsigned long)ppd.pgd);
+ 
+       /*
+        * At this point we are running encrypted.  Remove the mappings for
+        * the decrypted areas - all that is needed for this is to remove
+        * the PGD entry/entries.
+        */
+-      sme_clear_pgd(pgd, kernel_start + decrypted_base,
+-                    kernel_end + decrypted_base);
++      ppd.vaddr = kernel_start + decrypted_base;
++      ppd.vaddr_end = kernel_end + decrypted_base;
++      sme_clear_pgd(&ppd);
++
++      if (initrd_len) {
++              ppd.vaddr = initrd_start + decrypted_base;
++              ppd.vaddr_end = initrd_end + decrypted_base;
++              sme_clear_pgd(&ppd);
++      }
+ 
+-      sme_clear_pgd(pgd, workarea_start + decrypted_base,
+-                    workarea_end + decrypted_base);
++      ppd.vaddr = workarea_start + decrypted_base;
++      ppd.vaddr_end = workarea_end + decrypted_base;
++      sme_clear_pgd(&ppd);
+ 
+       /* Flush the TLB - no globals so cr3 is enough */
+       native_write_cr3(__native_read_cr3());
+diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
+index 730e6d541df1..01f682cf77a8 100644
+--- a/arch/x86/mm/mem_encrypt_boot.S
++++ b/arch/x86/mm/mem_encrypt_boot.S
+@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
+ 
+       /*
+        * Entry parameters:
+-       *   RDI - virtual address for the encrypted kernel mapping
+-       *   RSI - virtual address for the decrypted kernel mapping
+-       *   RDX - length of kernel
++       *   RDI - virtual address for the encrypted mapping
++       *   RSI - virtual address for the decrypted mapping
++       *   RDX - length to encrypt
+        *   RCX - virtual address of the encryption workarea, including:
+        *     - stack page (PAGE_SIZE)
+        *     - encryption routine page (PAGE_SIZE)
+@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
+       addq    $PAGE_SIZE, %rax        /* Workarea encryption routine */
+ 
+       push    %r12
+-      movq    %rdi, %r10              /* Encrypted kernel */
+-      movq    %rsi, %r11              /* Decrypted kernel */
+-      movq    %rdx, %r12              /* Kernel length */
++      movq    %rdi, %r10              /* Encrypted area */
++      movq    %rsi, %r11              /* Decrypted area */
++      movq    %rdx, %r12              /* Area length */
+ 
+       /* Copy encryption routine into the workarea */
+       movq    %rax, %rdi                              /* Workarea encryption 
routine */
+@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
+       rep     movsb
+ 
+       /* Setup registers for call */
+-      movq    %r10, %rdi              /* Encrypted kernel */
+-      movq    %r11, %rsi              /* Decrypted kernel */
++      movq    %r10, %rdi              /* Encrypted area */
++      movq    %r11, %rsi              /* Decrypted area */
+       movq    %r8, %rdx               /* Pagetables used for encryption */
+-      movq    %r12, %rcx              /* Kernel length */
++      movq    %r12, %rcx              /* Area length */
+       movq    %rax, %r8               /* Workarea encryption routine */
+       addq    $PAGE_SIZE, %r8         /* Workarea intermediate copy buffer */
+ 
+@@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
+ 
+ ENTRY(__enc_copy)
+ /*
+- * Routine used to encrypt kernel.
++ * Routine used to encrypt memory in place.
+  *   This routine must be run outside of the kernel proper since
+  *   the kernel will be encrypted during the process. So this
+  *   routine is defined here and then copied to an area outside
+@@ -79,19 +79,19 @@ ENTRY(__enc_copy)
+  *   during execution.
+  *
+  *   On entry the registers must be:
+- *     RDI - virtual address for the encrypted kernel mapping
+- *     RSI - virtual address for the decrypted kernel mapping
++ *     RDI - virtual address for the encrypted mapping
++ *     RSI - virtual address for the decrypted mapping
+  *     RDX - address of the pagetables to use for encryption
+- *     RCX - length of kernel
++ *     RCX - length of area
+  *      R8 - intermediate copy buffer
+  *
+  *     RAX - points to this routine
+  *
+- * The kernel will be encrypted by copying from the non-encrypted
+- * kernel space to an intermediate buffer and then copying from the
+- * intermediate buffer back to the encrypted kernel space. The physical
+- * addresses of the two kernel space mappings are the same which
+- * results in the kernel being encrypted "in place".
++ * The area will be encrypted by copying from the non-encrypted
++ * memory space to an intermediate buffer and then copying from the
++ * intermediate buffer back to the encrypted memory space. The physical
++ * addresses of the two mappings are the same which results in the area
++ * being encrypted "in place".
+  */
+       /* Enable the new page tables */
+       mov     %rdx, %cr3
+@@ -103,47 +103,55 @@ ENTRY(__enc_copy)
+       orq     $X86_CR4_PGE, %rdx
+       mov     %rdx, %cr4
+ 
++      push    %r15
++      push    %r12
++
++      movq    %rcx, %r9               /* Save area length */
++      movq    %rdi, %r10              /* Save encrypted area address */
++      movq    %rsi, %r11              /* Save decrypted area address */
++
+       /* Set the PAT register PA5 entry to write-protect */
+-      push    %rcx
+       movl    $MSR_IA32_CR_PAT, %ecx
+       rdmsr
+-      push    %rdx                    /* Save original PAT value */
++      mov     %rdx, %r15              /* Save original PAT value */
+       andl    $0xffff00ff, %edx       /* Clear PA5 */
+       orl     $0x00000500, %edx       /* Set PA5 to WP */
+       wrmsr
+-      pop     %rdx                    /* RDX contains original PAT value */
+-      pop     %rcx
+-
+-      movq    %rcx, %r9               /* Save kernel length */
+-      movq    %rdi, %r10              /* Save encrypted kernel address */
+-      movq    %rsi, %r11              /* Save decrypted kernel address */
+ 
+       wbinvd                          /* Invalidate any cache entries */
+ 
+-      /* Copy/encrypt 2MB at a time */
++      /* Copy/encrypt up to 2MB at a time */
++      movq    $PMD_PAGE_SIZE, %r12
+ 1:
+-      movq    %r11, %rsi              /* Source - decrypted kernel */
++      cmpq    %r12, %r9
++      jnb     2f
++      movq    %r9, %r12
++
++2:
++      movq    %r11, %rsi              /* Source - decrypted area */
+       movq    %r8, %rdi               /* Dest   - intermediate copy buffer */
+-      movq    $PMD_PAGE_SIZE, %rcx    /* 2MB length */
++      movq    %r12, %rcx
+       rep     movsb
+ 
+       movq    %r8, %rsi               /* Source - intermediate copy buffer */
+-      movq    %r10, %rdi              /* Dest   - encrypted kernel */
+-      movq    $PMD_PAGE_SIZE, %rcx    /* 2MB length */
++      movq    %r10, %rdi              /* Dest   - encrypted area */
++      movq    %r12, %rcx
+       rep     movsb
+ 
+-      addq    $PMD_PAGE_SIZE, %r11
+-      addq    $PMD_PAGE_SIZE, %r10
+-      subq    $PMD_PAGE_SIZE, %r9     /* Kernel length decrement */
++      addq    %r12, %r11
++      addq    %r12, %r10
++      subq    %r12, %r9               /* Kernel length decrement */
+       jnz     1b                      /* Kernel length not zero? */
+ 
+       /* Restore PAT register */
+-      push    %rdx                    /* Save original PAT value */
+       movl    $MSR_IA32_CR_PAT, %ecx
+       rdmsr
+-      pop     %rdx                    /* Restore original PAT value */
++      mov     %r15, %rdx              /* Restore original PAT value */
+       wrmsr
+ 
++      pop     %r12
++      pop     %r15
++
+       ret
+ .L__enc_copy_end:
+ ENDPROC(__enc_copy)
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index ee4c1ec9dca0..e7ded346d94b 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4439,6 +4439,7 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+        * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+        */
+       { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
++      { "LITEON EP1-*",       NULL,           ATA_HORKAGE_MAX_SEC_1024 },
+ 
+       /* Devices we expect to fail diagnostics */
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+index a2978a37b4f3..700fc754f28a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+@@ -174,6 +174,7 @@ gf119_sor = {
+               .links = gf119_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gf119_sor_dp_pattern,
++              .drive = gf119_sor_dp_drive,
+               .vcpi = gf119_sor_dp_vcpi,
+               .audio = gf119_sor_dp_audio,
+               .audio_sym = gf119_sor_dp_audio_sym,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index b8a09807c5de..3824595fece1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -266,8 +266,8 @@ static const struct drm_connector_funcs 
vmw_legacy_connector_funcs = {
+       .set_property = vmw_du_connector_set_property,
+       .destroy = vmw_ldu_connector_destroy,
+       .reset = vmw_du_connector_reset,
+-      .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+-      .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
++      .atomic_duplicate_state = vmw_du_connector_duplicate_state,
++      .atomic_destroy_state = vmw_du_connector_destroy_state,
+       .atomic_set_property = vmw_du_connector_atomic_set_property,
+       .atomic_get_property = vmw_du_connector_atomic_get_property,
+ };
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index d1552d3e0652..7ae38a67388c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -420,8 +420,8 @@ static const struct drm_connector_funcs 
vmw_sou_connector_funcs = {
+       .set_property = vmw_du_connector_set_property,
+       .destroy = vmw_sou_connector_destroy,
+       .reset = vmw_du_connector_reset,
+-      .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+-      .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
++      .atomic_duplicate_state = vmw_du_connector_duplicate_state,
++      .atomic_destroy_state = vmw_du_connector_destroy_state,
+       .atomic_set_property = vmw_du_connector_atomic_set_property,
+       .atomic_get_property = vmw_du_connector_atomic_get_property,
+ };
+diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
+index 10f00a82ec9d..e54a9b835b62 100644
+--- a/drivers/i2c/i2c-core-smbus.c
++++ b/drivers/i2c/i2c-core-smbus.c
+@@ -396,16 +396,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter 
*adapter, u16 addr,
+                                  the underlying bus driver */
+               break;
+       case I2C_SMBUS_I2C_BLOCK_DATA:
++              if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
++                      dev_err(&adapter->dev, "Invalid block %s size %d\n",
++                              read_write == I2C_SMBUS_READ ? "read" : "write",
++                              data->block[0]);
++                      return -EINVAL;
++              }
++
+               if (read_write == I2C_SMBUS_READ) {
+                       msg[1].len = data->block[0];
+               } else {
+                       msg[0].len = data->block[0] + 1;
+-                      if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
+-                              dev_err(&adapter->dev,
+-                                      "Invalid block write size %d\n",
+-                                      data->block[0]);
+-                              return -EINVAL;
+-                      }
+                       for (i = 1; i <= data->block[0]; i++)
+                               msgbuf0[i] = data->block[i];
+               }
+diff --git a/drivers/infiniband/hw/hfi1/file_ops.c 
b/drivers/infiniband/hw/hfi1/file_ops.c
+index d9a1e9893136..fd28f09b4445 100644
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -881,11 +881,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
+       }
+ 
+       if (ret) {
+-              hfi1_rcd_put(fd->uctxt);
+-              fd->uctxt = NULL;
+               spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
+               __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
+               spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
++              hfi1_rcd_put(fd->uctxt);
++              fd->uctxt = NULL;
+       }
+ 
+       return ret;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index acb79d3a4f1d..756ece6118c0 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4303,12 +4303,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
+ 
+       memset(ah_attr, 0, sizeof(*ah_attr));
+ 
+-      ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+-      rdma_ah_set_port_num(ah_attr, path->port);
+-      if (rdma_ah_get_port_num(ah_attr) == 0 ||
+-          rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
++      if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
+               return;
+ 
++      ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
++
+       rdma_ah_set_port_num(ah_attr, path->port);
+       rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c 
b/drivers/infiniband/ulp/isert/ib_isert.c
+index ceabdb85df8b..9d4785ba24cb 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 
++      ib_drain_qp(isert_conn->qp);
+       list_del_init(&isert_conn->node);
+       isert_conn->cm_id = NULL;
+       isert_put_conn(isert_conn);
+diff --git a/drivers/input/misc/twl4030-vibra.c 
b/drivers/input/misc/twl4030-vibra.c
+index 6c51d404874b..c37aea9ac272 100644
+--- a/drivers/input/misc/twl4030-vibra.c
++++ b/drivers/input/misc/twl4030-vibra.c
+@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
+                        twl4030_vibra_suspend, twl4030_vibra_resume);
+ 
+ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
+-                            struct device_node *node)
++                            struct device_node *parent)
+ {
++      struct device_node *node;
++
+       if (pdata && pdata->coexist)
+               return true;
+ 
+-      node = of_find_node_by_name(node, "codec");
++      node = of_get_child_by_name(parent, "codec");
+       if (node) {
+               of_node_put(node);
+               return true;
+diff --git a/drivers/input/misc/twl6040-vibra.c 
b/drivers/input/misc/twl6040-vibra.c
+index 5690eb7ff954..15e0d352c4cc 100644
+--- a/drivers/input/misc/twl6040-vibra.c
++++ b/drivers/input/misc/twl6040-vibra.c
+@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device 
*pdev)
+       int vddvibr_uV = 0;
+       int error;
+ 
+-      of_node_get(twl6040_core_dev->of_node);
+-      twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
++      twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
+                                                "vibra");
+       if (!twl6040_core_node) {
+               dev_err(&pdev->dev, "parent of node is missing?\n");
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 850b00e3ad8e..3d9c294e84db 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
+       case SS4_PACKET_ID_MULTI:
+               if (priv->flags & ALPS_BUTTONPAD) {
+                       if (IS_SS4PLUS_DEV(priv->dev_id)) {
+-                              f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+-                              f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++                              f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++                              f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++                              no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
+                       } else {
+                               f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+                               f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
++                              no_data_x = SS4_MFPACKET_NO_AX_BL;
+                       }
++                      no_data_y = SS4_MFPACKET_NO_AY_BL;
+ 
+                       f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
+                       f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
+-                      no_data_x = SS4_MFPACKET_NO_AX_BL;
+-                      no_data_y = SS4_MFPACKET_NO_AY_BL;
+               } else {
+                       if (IS_SS4PLUS_DEV(priv->dev_id)) {
+-                              f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+-                              f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++                              f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++                              f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++                              no_data_x = SS4_PLUS_MFPACKET_NO_AX;
+                       } else {
+-                              f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+-                              f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++                              f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
++                              f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
++                              no_data_x = SS4_MFPACKET_NO_AX;
+                       }
++                      no_data_y = SS4_MFPACKET_NO_AY;
++
+                       f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
+                       f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
+-                      no_data_x = SS4_MFPACKET_NO_AX;
+-                      no_data_y = SS4_MFPACKET_NO_AY;
+               }
+ 
+               f->first_mp = 0;
+diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
+index c80a7c76cb76..79b6d69d1486 100644
+--- a/drivers/input/mouse/alps.h
++++ b/drivers/input/mouse/alps.h
+@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
+ #define SS4_TS_Z_V2(_b)               (s8)(_b[4] & 0x7F)
+ 
+ 
+-#define SS4_MFPACKET_NO_AX    8160    /* X-Coordinate value */
+-#define SS4_MFPACKET_NO_AY    4080    /* Y-Coordinate value */
+-#define SS4_MFPACKET_NO_AX_BL 8176    /* Buttonless X-Coordinate value */
+-#define SS4_MFPACKET_NO_AY_BL 4088    /* Buttonless Y-Coordinate value */
++#define SS4_MFPACKET_NO_AX            8160    /* X-Coordinate value */
++#define SS4_MFPACKET_NO_AY            4080    /* Y-Coordinate value */
++#define SS4_MFPACKET_NO_AX_BL         8176    /* Buttonless X-Coord value */
++#define SS4_MFPACKET_NO_AY_BL         4088    /* Buttonless Y-Coord value */
++#define SS4_PLUS_MFPACKET_NO_AX               4080    /* SS4 PLUS, X */
++#define SS4_PLUS_MFPACKET_NO_AX_BL    4088    /* Buttonless SS4 PLUS, X */
+ 
+ /*
+  * enum V7_PACKET_ID - defines the packet type for V7
+diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
+index 4f2bb5947a4e..141ea228aac6 100644
+--- a/drivers/input/rmi4/rmi_driver.c
++++ b/drivers/input/rmi4/rmi_driver.c
+@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+               rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+                       "Failed to process interrupt request: %d\n", ret);
+ 
+-      if (count)
++      if (count) {
+               kfree(attn_data.data);
++              attn_data.data = NULL;
++      }
+ 
+       if (!kfifo_is_empty(&drvdata->attn_fifo))
+               return rmi_irq_fn(irq, dev_id);
+diff --git a/drivers/input/touchscreen/88pm860x-ts.c 
b/drivers/input/touchscreen/88pm860x-ts.c
+index 7ed828a51f4c..3486d9403805 100644
+--- a/drivers/input/touchscreen/88pm860x-ts.c
++++ b/drivers/input/touchscreen/88pm860x-ts.c
+@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device 
*pdev,
+       int data, n, ret;
+       if (!np)
+               return -ENODEV;
+-      np = of_find_node_by_name(np, "touch");
++      np = of_get_child_by_name(np, "touch");
+       if (!np) {
+               dev_err(&pdev->dev, "Can't find touch node\n");
+               return -EINVAL;
+@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device 
*pdev,
+       if (data) {
+               ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
+               if (ret < 0)
+-                      return -EINVAL;
++                      goto err_put_node;
+       }
+       /* set tsi prebias time */
+       if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
+               ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
+               if (ret < 0)
+-                      return -EINVAL;
++                      goto err_put_node;
+       }
+       /* set prebias & prechg time of pen detect */
+       data = 0;
+@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device 
*pdev,
+       if (data) {
+               ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
+               if (ret < 0)
+-                      return -EINVAL;
++                      goto err_put_node;
+       }
+       of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
++
++      of_node_put(np);
++
+       return 0;
++
++err_put_node:
++      of_node_put(np);
++
++      return -EINVAL;
+ }
+ #else
+ #define pm860x_touch_dt_init(x, y, z) (-1)
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 9fc12f556534..554d60394c06 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
+       /* Ignore extra keys (which are used for IV etc) */
+       subkey_size = crypt_subkey_size(cc);
+ 
+-      if (crypt_integrity_hmac(cc))
++      if (crypt_integrity_hmac(cc)) {
++              if (subkey_size < cc->key_mac_size)
++                      return -EINVAL;
++
+               crypt_copy_authenckey(cc->authenc_key, cc->key,
+                                     subkey_size - cc->key_mac_size,
+                                     cc->key_mac_size);
++      }
++
+       for (i = 0; i < cc->tfms_count; i++) {
+               if (crypt_integrity_hmac(cc))
+                       r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
+@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config 
*cc, const char *key_string
+ 
+       ret = crypt_setkey(cc);
+ 
+-      /* wipe the kernel key payload copy in each case */
+-      memset(cc->key, 0, cc->key_size * sizeof(u8));
+-
+       if (!ret) {
+               set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+               kzfree(cc->key_string);
+@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char 
*cipher_in, char *key)
+               }
+       }
+ 
++      /* wipe the kernel key payload copy */
++      if (cc->key_string)
++              memset(cc->key, 0, cc->key_size * sizeof(u8));
++
+       return ret;
+ }
+ 
+@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int 
argc, char **argv)
+                       cc->tag_pool_max_sectors * cc->on_disk_tag_size);
+               if (!cc->tag_pool) {
+                       ti->error = "Cannot allocate integrity tags mempool";
++                      ret = -ENOMEM;
+                       goto bad;
+               }
+ 
+@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned 
argc, char **argv)
+                               return ret;
+                       if (cc->iv_gen_ops && cc->iv_gen_ops->init)
+                               ret = cc->iv_gen_ops->init(cc);
++                      /* wipe the kernel key payload copy */
++                      if (cc->key_string)
++                              memset(cc->key, 0, cc->key_size * sizeof(u8));
+                       return ret;
+               }
+               if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
+@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct 
queue_limits *limits)
+ 
+ static struct target_type crypt_target = {
+       .name   = "crypt",
+-      .version = {1, 18, 0},
++      .version = {1, 18, 1},
+       .module = THIS_MODULE,
+       .ctr    = crypt_ctr,
+       .dtr    = crypt_dtr,
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 5e6737a44468..3cc2052f972c 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2558,7 +2558,8 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+       int r = 0;
+       unsigned i;
+       __u64 journal_pages, journal_desc_size, journal_tree_size;
+-      unsigned char *crypt_data = NULL;
++      unsigned char *crypt_data = NULL, *crypt_iv = NULL;
++      struct skcipher_request *req = NULL;
+ 
+       ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
+       ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
+@@ -2616,9 +2617,20 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+ 
+               if (blocksize == 1) {
+                       struct scatterlist *sg;
+-                      SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
+-                      unsigned char iv[ivsize];
+-                      skcipher_request_set_tfm(req, ic->journal_crypt);
++
++                      req = skcipher_request_alloc(ic->journal_crypt, 
GFP_KERNEL);
++                      if (!req) {
++                              *error = "Could not allocate crypt request";
++                              r = -ENOMEM;
++                              goto bad;
++                      }
++
++                      crypt_iv = kmalloc(ivsize, GFP_KERNEL);
++                      if (!crypt_iv) {
++                              *error = "Could not allocate iv";
++                              r = -ENOMEM;
++                              goto bad;
++                      }
+ 
+                       ic->journal_xor = dm_integrity_alloc_page_list(ic);
+                       if (!ic->journal_xor) {
+@@ -2640,9 +2652,9 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+                               sg_set_buf(&sg[i], va, PAGE_SIZE);
+                       }
+                       sg_set_buf(&sg[i], &ic->commit_ids, sizeof 
ic->commit_ids);
+-                      memset(iv, 0x00, ivsize);
++                      memset(crypt_iv, 0x00, ivsize);
+ 
+-                      skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * 
ic->journal_pages + sizeof ic->commit_ids, iv);
++                      skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * 
ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+                       init_completion(&comp.comp);
+                       comp.in_flight = (atomic_t)ATOMIC_INIT(1);
+                       if (do_crypt(true, req, &comp))
+@@ -2658,10 +2670,22 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+                       crypto_free_skcipher(ic->journal_crypt);
+                       ic->journal_crypt = NULL;
+               } else {
+-                      SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
+-                      unsigned char iv[ivsize];
+                       unsigned crypt_len = roundup(ivsize, blocksize);
+ 
++                      req = skcipher_request_alloc(ic->journal_crypt, 
GFP_KERNEL);
++                      if (!req) {
++                              *error = "Could not allocate crypt request";
++                              r = -ENOMEM;
++                              goto bad;
++                      }
++
++                      crypt_iv = kmalloc(ivsize, GFP_KERNEL);
++                      if (!crypt_iv) {
++                              *error = "Could not allocate iv";
++                              r = -ENOMEM;
++                              goto bad;
++                      }
++
+                       crypt_data = kmalloc(crypt_len, GFP_KERNEL);
+                       if (!crypt_data) {
+                               *error = "Unable to allocate crypt data";
+@@ -2669,8 +2693,6 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+                               goto bad;
+                       }
+ 
+-                      skcipher_request_set_tfm(req, ic->journal_crypt);
+-
+                       ic->journal_scatterlist = 
dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
+                       if (!ic->journal_scatterlist) {
+                               *error = "Unable to allocate sg list";
+@@ -2694,12 +2716,12 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+                               struct skcipher_request *section_req;
+                               __u32 section_le = cpu_to_le32(i);
+ 
+-                              memset(iv, 0x00, ivsize);
++                              memset(crypt_iv, 0x00, ivsize);
+                               memset(crypt_data, 0x00, crypt_len);
+                               memcpy(crypt_data, &section_le, 
min((size_t)crypt_len, sizeof(section_le)));
+ 
+                               sg_init_one(&sg, crypt_data, crypt_len);
+-                              skcipher_request_set_crypt(req, &sg, &sg, 
crypt_len, iv);
++                              skcipher_request_set_crypt(req, &sg, &sg, 
crypt_len, crypt_iv);
+                               init_completion(&comp.comp);
+                               comp.in_flight = (atomic_t)ATOMIC_INIT(1);
+                               if (do_crypt(true, req, &comp))
+@@ -2757,6 +2779,9 @@ static int create_journal(struct dm_integrity_c *ic, 
char **error)
+       }
+ bad:
+       kfree(crypt_data);
++      kfree(crypt_iv);
++      skcipher_request_free(req);
++
+       return r;
+ }
+ 
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index d31d18d9727c..36ef284ad086 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -80,10 +80,14 @@
+ #define SECTOR_TO_BLOCK_SHIFT 3
+ 
+ /*
++ * For btree insert:
+  *  3 for btree insert +
+  *  2 for btree lookup used within space map
++ * For btree remove:
++ *  2 for shadow spine +
++ *  4 for rebalance 3 child node
+  */
+-#define THIN_MAX_CONCURRENT_LOCKS 5
++#define THIN_MAX_CONCURRENT_LOCKS 6
+ 
+ /* This should be plenty */
+ #define SPACE_MAP_ROOT_SIZE 128
+diff --git a/drivers/md/persistent-data/dm-btree.c 
b/drivers/md/persistent-data/dm-btree.c
+index f21ce6a3d4cf..58b319757b1e 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, 
uint64_t key)
+       pn->keys[1] = rn->keys[0];
+       memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
+ 
+-      /*
+-       * rejig the spine.  This is ugly, since it knows too
+-       * much about the spine
+-       */
+-      if (s->nodes[0] != new_parent) {
+-              unlock_block(s->info, s->nodes[0]);
+-              s->nodes[0] = new_parent;
+-      }
+-      if (key < le64_to_cpu(rn->keys[0])) {
+-              unlock_block(s->info, right);
+-              s->nodes[1] = left;
+-      } else {
+-              unlock_block(s->info, left);
+-              s->nodes[1] = right;
+-      }
+-      s->count = 2;
+-
++      unlock_block(s->info, left);
++      unlock_block(s->info, right);
+       return 0;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c 
b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 85140c9af581..8b941f814472 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct 
sdhci_host *host,
+               return;
+       }
+ 
++      /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
++      if (is_imx53_esdhc(imx_data)) {
++              /*
++               * According to the i.MX53 reference manual, if DLLCTRL[10] can
++               * be set, then the controller is eSDHCv3, else it is eSDHCv2.
++               */
++              val = readl(host->ioaddr + ESDHC_DLL_CTRL);
++              writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
++              temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
++              writel(val, host->ioaddr + ESDHC_DLL_CTRL);
++              if (temp & BIT(10))
++                      pre_div = 2;
++      }
++
+       temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+       temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+               | ESDHC_CLOCK_MASK);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 7ccdc3e30c98..53d6bb045e9e 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device 
*dev, void *cmd_tail)
+       void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
+       int err = 0;
+       u8 *packet_ptr;
+-      int i, n = 1, packet_len;
++      int packet_len;
+       ptrdiff_t cmd_len;
+ 
+       /* usb device unregistered? */
+@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device 
*dev, void *cmd_tail)
+       }
+ 
+       packet_ptr = cmd_head;
++      packet_len = cmd_len;
+ 
+       /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
+-      if ((dev->udev->speed != USB_SPEED_HIGH) &&
+-          (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
+-              packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
+-              n += cmd_len / packet_len;
+-      } else {
+-              packet_len = cmd_len;
+-      }
++      if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
++              packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
+ 
+-      for (i = 0; i < n; i++) {
++      do {
+               err = usb_bulk_msg(dev->udev,
+                                  usb_sndbulkpipe(dev->udev,
+                                                  PCAN_USBPRO_EP_CMDOUT),
+@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device 
*dev, void *cmd_tail)
+               }
+ 
+               packet_ptr += packet_len;
+-      }
++              cmd_len -= packet_len;
++
++              if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
++                      packet_len = cmd_len;
++
++      } while (packet_len > 0);
+ 
+       return err;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c 
b/drivers/net/ethernet/marvell/mvpp2.c
+index fcf9ba5eb8d1..d147dc7d0f77 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -4552,11 +4552,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct 
mvpp2_port *port)
+                      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+               val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
+               writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+-
+-              val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+-              val |= MVPP2_GMAC_DISABLE_PADDING;
+-              val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
+-              writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+       } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
+               val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+               val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
+@@ -4564,10 +4559,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct 
mvpp2_port *port)
+                      MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
+               val &= ~MVPP22_CTRL4_DP_CLK_SEL;
+               writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+-
+-              val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+-              val &= ~MVPP2_GMAC_DISABLE_PADDING;
+-              writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+       }
+ 
+       /* The port is connected to a copper PHY */
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index a268f4d6f3e9..48a365e303e5 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, 
int index)
+       if (ret)
+               return ERR_PTR(-ENODEV);
+ 
++      /* This phy type handled by the usb-phy subsystem for now */
++      if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
++              return ERR_PTR(-ENODEV);
++
+       mutex_lock(&phy_provider_mutex);
+       phy_provider = of_phy_provider_lookup(args.np);
+       if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
+diff --git a/drivers/scsi/libsas/sas_scsi_host.c 
b/drivers/scsi/libsas/sas_scsi_host.c
+index ea8ad06ff582..10b17da20176 100644
+--- a/drivers/scsi/libsas/sas_scsi_host.c
++++ b/drivers/scsi/libsas/sas_scsi_host.c
+@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, 
int reset_type,
+ 
+ int sas_eh_abort_handler(struct scsi_cmnd *cmd)
+ {
+-      int res;
++      int res = TMF_RESP_FUNC_FAILED;
+       struct sas_task *task = TO_SAS_TASK(cmd);
+       struct Scsi_Host *host = cmd->device->host;
++      struct domain_device *dev = cmd_to_domain_dev(cmd);
+       struct sas_internal *i = to_sas_internal(host->transportt);
++      unsigned long flags;
+ 
+       if (!i->dft->lldd_abort_task)
+               return FAILED;
+ 
+-      res = i->dft->lldd_abort_task(task);
++      spin_lock_irqsave(host->host_lock, flags);
++      /* We cannot do async aborts for SATA devices */
++      if (dev_is_sata(dev) && !host->host_eh_scheduled) {
++              spin_unlock_irqrestore(host->host_lock, flags);
++              return FAILED;
++      }
++      spin_unlock_irqrestore(host->host_lock, flags);
++
++      if (task)
++              res = i->dft->lldd_abort_task(task);
++      else
++              SAS_DPRINTK("no task to abort\n");
+       if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
+               return SUCCESS;
+ 
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 3909c55ed389..f0f4ab36c444 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = {
+ 
+ /*
+  * Currently we rely on the pipe array holding a power-of-2 number
+- * of pages.
++ * of pages. Returns 0 on error.
+  */
+ static inline unsigned int round_pipe_size(unsigned int size)
+ {
+       unsigned long nr_pages;
+ 
++      if (size < pipe_min_size)
++              size = pipe_min_size;
++
+       nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      if (nr_pages == 0)
++              return 0;
++
+       return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+ }
+ 
+@@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, 
unsigned long arg)
+       long ret = 0;
+ 
+       size = round_pipe_size(arg);
++      if (size == 0)
++              return -EINVAL;
+       nr_pages = size >> PAGE_SHIFT;
+ 
+       if (!nr_pages)
+@@ -1123,13 +1131,18 @@ static long pipe_set_size(struct pipe_inode_info 
*pipe, unsigned long arg)
+ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
+                size_t *lenp, loff_t *ppos)
+ {
++      unsigned int rounded_pipe_max_size;
+       int ret;
+ 
+       ret = proc_douintvec_minmax(table, write, buf, lenp, ppos);
+       if (ret < 0 || !write)
+               return ret;
+ 
+-      pipe_max_size = round_pipe_size(pipe_max_size);
++      rounded_pipe_max_size = round_pipe_size(pipe_max_size);
++      if (rounded_pipe_max_size == 0)
++              return -EINVAL;
++
++      pipe_max_size = rounded_pipe_max_size;
+       return ret;
+ }
+ 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 9390032a11e1..e6094a15ef30 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -424,8 +424,11 @@ static int do_task_stat(struct seq_file *m, struct 
pid_namespace *ns,
+                * safe because the task has stopped executing permanently.
+                */
+               if (permitted && (task->flags & PF_DUMPCORE)) {
+-                      eip = KSTK_EIP(task);
+-                      esp = KSTK_ESP(task);
++                      if (try_get_task_stack(task)) {
++                              eip = KSTK_EIP(task);
++                              esp = KSTK_ESP(task);
++                              put_task_stack(task);
++                      }
+               }
+       }
+ 
+diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
+index 4178d2493547..5e335b6203f4 100644
+--- a/include/linux/delayacct.h
++++ b/include/linux/delayacct.h
+@@ -71,7 +71,7 @@ extern void delayacct_init(void);
+ extern void __delayacct_tsk_init(struct task_struct *);
+ extern void __delayacct_tsk_exit(struct task_struct *);
+ extern void __delayacct_blkio_start(void);
+-extern void __delayacct_blkio_end(void);
++extern void __delayacct_blkio_end(struct task_struct *);
+ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+ extern __u64 __delayacct_blkio_ticks(struct task_struct *);
+ extern void __delayacct_freepages_start(void);
+@@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
+               __delayacct_blkio_start();
+ }
+ 
+-static inline void delayacct_blkio_end(void)
++static inline void delayacct_blkio_end(struct task_struct *p)
+ {
+       if (current->delays)
+-              __delayacct_blkio_end();
++              __delayacct_blkio_end(p);
+       delayacct_clear_flag(DELAYACCT_PF_BLKIO);
+ }
+ 
+@@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct 
*tsk)
+ {}
+ static inline void delayacct_blkio_start(void)
+ {}
+-static inline void delayacct_blkio_end(void)
++static inline void delayacct_blkio_end(struct task_struct *p)
+ {}
+ static inline int delayacct_add_tsk(struct taskstats *d,
+                                       struct task_struct *tsk)
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index 9c5a2628d6ce..1d3877c39a00 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -124,6 +124,11 @@ static inline bool 
is_write_device_private_entry(swp_entry_t entry)
+       return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+ }
+ 
++static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
++{
++      return swp_offset(entry);
++}
++
+ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+ {
+       return pfn_to_page(swp_offset(entry));
+@@ -154,6 +159,11 @@ static inline bool 
is_write_device_private_entry(swp_entry_t entry)
+       return false;
+ }
+ 
++static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
++{
++      return 0;
++}
++
+ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+ {
+       return NULL;
+@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t 
entry)
+       return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
+ }
+ 
++static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
++{
++      return swp_offset(entry);
++}
++
+ static inline struct page *migration_entry_to_page(swp_entry_t entry)
+ {
+       struct page *p = pfn_to_page(swp_offset(entry));
+@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
+ {
+       return 0;
+ }
++
++static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
++{
++      return 0;
++}
++
+ static inline struct page *migration_entry_to_page(swp_entry_t entry)
+ {
+       return NULL;
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index bae807eb2933..853291714ae0 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -31,11 +31,17 @@
+ #else
+ #define MODULE_RANDSTRUCT_PLUGIN
+ #endif
++#ifdef RETPOLINE
++#define MODULE_VERMAGIC_RETPOLINE "retpoline "
++#else
++#define MODULE_VERMAGIC_RETPOLINE ""
++#endif
+ 
+ #define VERMAGIC_STRING                                               \
+       UTS_RELEASE " "                                                 \
+       MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
+       MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
+       MODULE_ARCH_VERMAGIC                                            \
+-      MODULE_RANDSTRUCT_PLUGIN
++      MODULE_RANDSTRUCT_PLUGIN                                        \
++      MODULE_VERMAGIC_RETPOLINE
+ 
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 4a1c33416b6a..e2764d767f18 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
+  * Finish delay accounting for a statistic using its timestamps (@start),
+  * accumalator (@total) and @count
+  */
+-static void delayacct_end(u64 *start, u64 *total, u32 *count)
++static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 
*count)
+ {
+       s64 ns = ktime_get_ns() - *start;
+       unsigned long flags;
+ 
+       if (ns > 0) {
+-              spin_lock_irqsave(&current->delays->lock, flags);
++              spin_lock_irqsave(lock, flags);
+               *total += ns;
+               (*count)++;
+-              spin_unlock_irqrestore(&current->delays->lock, flags);
++              spin_unlock_irqrestore(lock, flags);
+       }
+ }
+ 
+@@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
+       current->delays->blkio_start = ktime_get_ns();
+ }
+ 
+-void __delayacct_blkio_end(void)
++/*
++ * We cannot rely on the `current` macro, as we haven't yet switched back to
++ * the process being woken.
++ */
++void __delayacct_blkio_end(struct task_struct *p)
+ {
+-      if (current->delays->flags & DELAYACCT_PF_SWAPIN)
+-              /* Swapin block I/O */
+-              delayacct_end(&current->delays->blkio_start,
+-                      &current->delays->swapin_delay,
+-                      &current->delays->swapin_count);
+-      else    /* Other block I/O */
+-              delayacct_end(&current->delays->blkio_start,
+-                      &current->delays->blkio_delay,
+-                      &current->delays->blkio_count);
++      struct task_delay_info *delays = p->delays;
++      u64 *total;
++      u32 *count;
++
++      if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
++              total = &delays->swapin_delay;
++              count = &delays->swapin_count;
++      } else {
++              total = &delays->blkio_delay;
++              count = &delays->blkio_count;
++      }
++
++      delayacct_end(&delays->lock, &delays->blkio_start, total, count);
+ }
+ 
+ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+@@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
+ 
+ void __delayacct_freepages_end(void)
+ {
+-      delayacct_end(&current->delays->freepages_start,
+-                      &current->delays->freepages_delay,
+-                      &current->delays->freepages_count);
++      delayacct_end(
++              &current->delays->lock,
++              &current->delays->freepages_start,
++              &current->delays->freepages_delay,
++              &current->delays->freepages_count);
+ }
+ 
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 76ed5921117a..52b3f4703158 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned 
int flags,
+       struct futex_q *this, *next;
+       DEFINE_WAKE_Q(wake_q);
+ 
++      if (nr_wake < 0 || nr_requeue < 0)
++              return -EINVAL;
++
+       /*
+        * When PI not supported: return -ENOSYS if requeue_pi is true,
+        * consequently the compiler knows requeue_pi is always false past
+@@ -2294,21 +2297,17 @@ static void unqueue_me_pi(struct futex_q *q)
+       spin_unlock(q->lock_ptr);
+ }
+ 
+-/*
+- * Fixup the pi_state owner with the new owner.
+- *
+- * Must be called with hash bucket lock held and mm->sem held for non
+- * private futexes.
+- */
+ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+-                              struct task_struct *newowner)
++                              struct task_struct *argowner)
+ {
+-      u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+       struct futex_pi_state *pi_state = q->pi_state;
+       u32 uval, uninitialized_var(curval), newval;
+-      struct task_struct *oldowner;
++      struct task_struct *oldowner, *newowner;
++      u32 newtid;
+       int ret;
+ 
++      lockdep_assert_held(q->lock_ptr);
++
+       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ 
+       oldowner = pi_state->owner;
+@@ -2317,11 +2316,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, 
struct futex_q *q,
+               newtid |= FUTEX_OWNER_DIED;
+ 
+       /*
+-       * We are here either because we stole the rtmutex from the
+-       * previous highest priority waiter or we are the highest priority
+-       * waiter but have failed to get the rtmutex the first time.
++       * We are here because either:
++       *
++       *  - we stole the lock and pi_state->owner needs updating to reflect
++       *    that (@argowner == current),
++       *
++       * or:
++       *
++       *  - someone stole our lock and we need to fix things to point to the
++       *    new owner (@argowner == NULL).
+        *
+-       * We have to replace the newowner TID in the user space variable.
++       * Either way, we have to replace the TID in the user space variable.
+        * This must be atomic as we have to preserve the owner died bit here.
+        *
+        * Note: We write the user space value _before_ changing the pi_state
+@@ -2334,6 +2339,42 @@ static int fixup_pi_state_owner(u32 __user *uaddr, 
struct futex_q *q,
+        * in the PID check in lookup_pi_state.
+        */
+ retry:
++      if (!argowner) {
++              if (oldowner != current) {
++                      /*
++                       * We raced against a concurrent self; things are
++                       * already fixed up. Nothing to do.
++                       */
++                      ret = 0;
++                      goto out_unlock;
++              }
++
++              if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
++                      /* We got the lock after all, nothing to fix. */
++                      ret = 0;
++                      goto out_unlock;
++              }
++
++              /*
++               * Since we just failed the trylock; there must be an owner.
++               */
++              newowner = rt_mutex_owner(&pi_state->pi_mutex);
++              BUG_ON(!newowner);
++      } else {
++              WARN_ON_ONCE(argowner != current);
++              if (oldowner == current) {
++                      /*
++                       * We raced against a concurrent self; things are
++                       * already fixed up. Nothing to do.
++                       */
++                      ret = 0;
++                      goto out_unlock;
++              }
++              newowner = argowner;
++      }
++
++      newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
++
+       if (get_futex_value_locked(&uval, uaddr))
+               goto handle_fault;
+ 
+@@ -2434,15 +2475,28 @@ static int fixup_owner(u32 __user *uaddr, struct 
futex_q *q, int locked)
+                * Got the lock. We might not be the anticipated owner if we
+                * did a lock-steal - fix up the PI-state in that case:
+                *
+-               * We can safely read pi_state->owner without holding wait_lock
+-               * because we now own the rt_mutex, only the owner will attempt
+-               * to change it.
++               * Speculative pi_state->owner read (we don't hold wait_lock);
++               * since we own the lock pi_state->owner == current is the
++               * stable state, anything else needs more attention.
+                */
+               if (q->pi_state->owner != current)
+                       ret = fixup_pi_state_owner(uaddr, q, current);
+               goto out;
+       }
+ 
++      /*
++       * If we didn't get the lock; check if anybody stole it from us. In
++       * that case, we need to fix up the uval to point to them instead of
++       * us, otherwise bad things happen. [10]
++       *
++       * Another speculative read; pi_state->owner == current is unstable
++       * but needs our attention.
++       */
++      if (q->pi_state->owner == current) {
++              ret = fixup_pi_state_owner(uaddr, q, NULL);
++              goto out;
++      }
++
+       /*
+        * Paranoia check. If we did not take the lock, then we should not be
+        * the owner of the rt_mutex.
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 6f3dba6e4e9e..65cc0cb984e6 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+       return ret;
+ }
+ 
++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
++{
++      int ret = try_to_take_rt_mutex(lock, current, NULL);
++
++      /*
++       * try_to_take_rt_mutex() sets the lock waiters bit
++       * unconditionally. Clean this up.
++       */
++      fixup_rt_mutex_waiters(lock);
++
++      return ret;
++}
++
+ /*
+  * Slow path try-lock function:
+  */
+@@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex 
*lock)
+        */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ 
+-      ret = try_to_take_rt_mutex(lock, current, NULL);
+-
+-      /*
+-       * try_to_take_rt_mutex() sets the lock waiters bit
+-       * unconditionally. Clean this up.
+-       */
+-      fixup_rt_mutex_waiters(lock);
++      ret = __rt_mutex_slowtrylock(lock);
+ 
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ 
+@@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex 
*lock)
+       return rt_mutex_slowtrylock(lock);
+ }
+ 
++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
++{
++      return __rt_mutex_slowtrylock(lock);
++}
++
+ /**
+  * rt_mutex_timed_lock - lock a rt_mutex interruptible
+  *                    the timeout structure is provided
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 124e98ca0b17..68686b3ec3c1 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex 
*lock,
+                                struct rt_mutex_waiter *waiter);
+ 
+ extern int rt_mutex_futex_trylock(struct rt_mutex *l);
++extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
+ 
+ extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
+ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 8fa7b6f9e19b..55062461b2fd 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2046,7 +2046,7 @@ try_to_wake_up(struct task_struct *p, unsigned int 
state, int wake_flags)
+       p->state = TASK_WAKING;
+ 
+       if (p->in_iowait) {
+-              delayacct_blkio_end();
++              delayacct_blkio_end(p);
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
+ 
+@@ -2059,7 +2059,7 @@ try_to_wake_up(struct task_struct *p, unsigned int 
state, int wake_flags)
+ #else /* CONFIG_SMP */
+ 
+       if (p->in_iowait) {
+-              delayacct_blkio_end();
++              delayacct_blkio_end(p);
+               atomic_dec(&task_rq(p)->nr_iowait);
+       }
+ 
+@@ -2112,7 +2112,7 @@ static void try_to_wake_up_local(struct task_struct *p, 
struct rq_flags *rf)
+ 
+       if (!task_on_rq_queued(p)) {
+               if (p->in_iowait) {
+-                      delayacct_blkio_end();
++                      delayacct_blkio_end(p);
+                       atomic_dec(&rq->nr_iowait);
+               }
+               ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 73e3cdbc61f1..db5e6daadd94 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1656,7 +1656,7 @@ void run_local_timers(void)
+       hrtimer_run_queues();
+       /* Raise the softirq only if required. */
+       if (time_before(jiffies, base->clk)) {
+-              if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
++              if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
+                       return;
+               /* CPU is awake, so check the deferrable base. */
+               base++;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 87468398b9ed..d53268a4e167 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map 
**map, int len)
+ {
+       struct trace_event_call *call, *p;
+       const char *last_system = NULL;
++      bool first = false;
+       int last_i;
+       int i;
+ 
+@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map 
**map, int len)
+       list_for_each_entry_safe(call, p, &ftrace_events, list) {
+               /* events are usually grouped together with systems */
+               if (!last_system || call->class->system != last_system) {
++                      first = true;
+                       last_i = 0;
+                       last_system = call->class->system;
+               }
+ 
++              /*
++               * Since calls are grouped by systems, the likelyhood that the
++               * next call in the iteration belongs to the same system as the
++               * previous call is high. As an optimization, we skip seaching
++               * for a map[] that matches the call's system if the last call
++               * was from the same system. That's what last_i is for. If the
++               * call has the same system as the previous call, then last_i
++               * will be the index of the first map[] that has a matching
++               * system.
++               */
+               for (i = last_i; i < len; i++) {
+                       if (call->class->system == map[i]->system) {
+                               /* Save the first system if need be */
+-                              if (!last_i)
++                              if (first) {
+                                       last_i = i;
++                                      first = false;
++                              }
+                               update_event_printk(call, map[i]);
+                       }
+               }
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a2dccfe1acec..8365a52a74c5 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -48,6 +48,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/moduleparam.h>
+ #include <linux/uaccess.h>
++#include <linux/nmi.h>
+ 
+ #include "workqueue_internal.h"
+ 
+@@ -4479,6 +4480,12 @@ void show_workqueue_state(void)
+                       if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+                               show_pwq(pwq);
+                       spin_unlock_irqrestore(&pwq->pool->lock, flags);
++                      /*
++                       * We could be printing a lot from atomic context, e.g.
++                       * sysrq-t -> show_workqueue_state(). Avoid triggering
++                       * hard lockup.
++                       */
++                      touch_nmi_watchdog();
+               }
+       }
+ 
+@@ -4506,6 +4513,12 @@ void show_workqueue_state(void)
+               pr_cont("\n");
+       next_pool:
+               spin_unlock_irqrestore(&pool->lock, flags);
++              /*
++               * We could be printing a lot from atomic context, e.g.
++               * sysrq-t -> show_workqueue_state(). Avoid triggering
++               * hard lockup.
++               */
++              touch_nmi_watchdog();
+       }
+ 
+       rcu_read_unlock_sched();
+diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
+index d22b84310f6d..956015614395 100644
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -30,10 +30,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
+       return true;
+ }
+ 
++/**
++ * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
++ *
++ * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
++ * mapped. check_pte() has to validate this.
++ *
++ * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
++ * page.
++ *
++ * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains 
migration
++ * entry that points to @pvmw->page or any subpage in case of THP.
++ *
++ * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
++ * @pvmw->page or any subpage in case of THP.
++ *
++ * Otherwise, return false.
++ *
++ */
+ static bool check_pte(struct page_vma_mapped_walk *pvmw)
+ {
++      unsigned long pfn;
++
+       if (pvmw->flags & PVMW_MIGRATION) {
+-#ifdef CONFIG_MIGRATION
+               swp_entry_t entry;
+               if (!is_swap_pte(*pvmw->pte))
+                       return false;
+@@ -41,37 +60,31 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
+ 
+               if (!is_migration_entry(entry))
+                       return false;
+-              if (migration_entry_to_page(entry) - pvmw->page >=
+-                              hpage_nr_pages(pvmw->page)) {
+-                      return false;
+-              }
+-              if (migration_entry_to_page(entry) < pvmw->page)
+-                      return false;
+-#else
+-              WARN_ON_ONCE(1);
+-#endif
+-      } else {
+-              if (is_swap_pte(*pvmw->pte)) {
+-                      swp_entry_t entry;
+ 
+-                      entry = pte_to_swp_entry(*pvmw->pte);
+-                      if (is_device_private_entry(entry) &&
+-                          device_private_entry_to_page(entry) == pvmw->page)
+-                              return true;
+-              }
++              pfn = migration_entry_to_pfn(entry);
++      } else if (is_swap_pte(*pvmw->pte)) {
++              swp_entry_t entry;
+ 
+-              if (!pte_present(*pvmw->pte))
++              /* Handle un-addressable ZONE_DEVICE memory */
++              entry = pte_to_swp_entry(*pvmw->pte);
++              if (!is_device_private_entry(entry))
+                       return false;
+ 
+-              /* THP can be referenced by any subpage */
+-              if (pte_page(*pvmw->pte) - pvmw->page >=
+-                              hpage_nr_pages(pvmw->page)) {
+-                      return false;
+-              }
+-              if (pte_page(*pvmw->pte) < pvmw->page)
++              pfn = device_private_entry_to_pfn(entry);
++      } else {
++              if (!pte_present(*pvmw->pte))
+                       return false;
++
++              pfn = pte_pfn(*pvmw->pte);
+       }
+ 
++      if (pfn < page_to_pfn(pvmw->page))
++              return false;
++
++      /* THP can be referenced by any subpage */
++      if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
++              return false;
++
+       return true;
+ }
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index ecd5c703d11e..e3626e8500c2 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct 
net_device *dev,
+ {
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-      if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-                    skb->len != CAN_MTU ||
+-                    cfd->len > CAN_MAX_DLEN,
+-                    "PF_CAN: dropped non conform CAN skbuf: "
+-                    "dev type %d, len %d, datalen %d\n",
+-                    dev->type, skb->len, cfd->len))
+-              goto drop;
++      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
++                   cfd->len > CAN_MAX_DLEN)) {
++              pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type 
%d, len %d, datalen %d\n",
++                           dev->type, skb->len, cfd->len);
++              kfree_skb(skb);
++              return NET_RX_DROP;
++      }
+ 
+       can_receive(skb, dev);
+       return NET_RX_SUCCESS;
+-
+-drop:
+-      kfree_skb(skb);
+-      return NET_RX_DROP;
+ }
+ 
+ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+@@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct 
net_device *dev,
+ {
+       struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-      if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-                    skb->len != CANFD_MTU ||
+-                    cfd->len > CANFD_MAX_DLEN,
+-                    "PF_CAN: dropped non conform CAN FD skbuf: "
+-                    "dev type %d, len %d, datalen %d\n",
+-                    dev->type, skb->len, cfd->len))
+-              goto drop;
++      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
++                   cfd->len > CANFD_MAX_DLEN)) {
++              pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev 
type %d, len %d, datalen %d\n",
++                           dev->type, skb->len, cfd->len);
++              kfree_skb(skb);
++              return NET_RX_DROP;
++      }
+ 
+       can_receive(skb, dev);
+       return NET_RX_SUCCESS;
+-
+-drop:
+-      kfree_skb(skb);
+-      return NET_RX_DROP;
+ }
+ 
+ /*
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index a00d607e7224..2ad693232f74 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
+ #endif
+       int len;
+ 
++      if (sp->sadb_address_len <
++          DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
++                       sizeof(uint64_t)))
++              return -EINVAL;
++
+       switch (addr->sa_family) {
+       case AF_INET:
+               len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), 
sizeof(uint64_t));
+@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct 
sadb_msg *hdr, void *
+               uint16_t ext_type;
+               int ext_len;
+ 
++              if (len < sizeof(*ehdr))
++                      return -EINVAL;
++
+               ext_len  = ehdr->sadb_ext_len;
+               ext_len *= sizeof(uint64_t);
+               ext_type = ehdr->sadb_ext_type;
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index e63af4e19382..6bed45dc2cb1 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -270,12 +270,18 @@ else
+ objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
+ endif
+ 
++ifdef CONFIG_MODVERSIONS
++objtool_o = $(@D)/.tmp_$(@F)
++else
++objtool_o = $(@)
++endif
++
+ # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
+ # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
+ # 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
+ cmd_objtool = $(if $(patsubst y%,, \
+       
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
+-      $(__objtool_obj) $(objtool_args) "$(@)";)
++      $(__objtool_obj) $(objtool_args) "$(objtool_o)";)
+ objtool_obj = $(if $(patsubst y%,, \
+       
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
+       $(__objtool_obj))
+@@ -291,15 +297,15 @@ objtool_dep = $(objtool_obj)                             
        \
+ define rule_cc_o_c
+       $(call echo-cmd,checksrc) $(cmd_checksrc)                         \
+       $(call cmd_and_fixdep,cc_o_c)                                     \
+-      $(cmd_modversions_c)                                              \
+       $(call echo-cmd,objtool) $(cmd_objtool)                           \
++      $(cmd_modversions_c)                                              \
+       $(call echo-cmd,record_mcount) $(cmd_record_mcount)
+ endef
+ 
+ define rule_as_o_S
+       $(call cmd_and_fixdep,as_o_S)                                     \
+-      $(cmd_modversions_S)                                              \
+-      $(call echo-cmd,objtool) $(cmd_objtool)
++      $(call echo-cmd,objtool) $(cmd_objtool)                           \
++      $(cmd_modversions_S)
+ endef
+ 
+ # List module undefined symbols (or empty line if not enabled)
+diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
+index 1bf949c43b76..f6ab3ccf698f 100644
+--- a/scripts/gdb/linux/tasks.py
++++ b/scripts/gdb/linux/tasks.py
+@@ -96,6 +96,8 @@ def get_thread_info(task):
+         thread_info_addr = task.address + ia64_task_size
+         thread_info = thread_info_addr.cast(thread_info_ptr_type)
+     else:
++        if task.type.fields()[0].type == thread_info_type.get_type():
++            return task['thread_info']
+         thread_info = task['stack'].cast(thread_info_ptr_type)
+     return thread_info.dereference()
+ 
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index db7894bb028c..faa67861cbc1 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, 
unsigned int b,
+ {
+       u_int64_t n = (u_int64_t) a * b;
+       if (c == 0) {
+-              snd_BUG_ON(!n);
+               *r = 0;
+               return UINT_MAX;
+       }
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index d10c780dfd54..ac30fc1ab98b 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int 
client_index, int poolsize)
+       rwlock_init(&client->ports_lock);
+       mutex_init(&client->ports_mutex);
+       INIT_LIST_HEAD(&client->ports_list_head);
++      mutex_init(&client->ioctl_mutex);
+ 
+       /* find free slot in the client table */
+       spin_lock_irqsave(&clients_lock, flags);
+@@ -2126,7 +2127,9 @@ static long snd_seq_ioctl(struct file *file, unsigned 
int cmd,
+                       return -EFAULT;
+       }
+ 
++      mutex_lock(&client->ioctl_mutex);
+       err = handler->func(client, &buf);
++      mutex_unlock(&client->ioctl_mutex);
+       if (err >= 0) {
+               /* Some commands includes a bug in 'dir' field. */
+               if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
+diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
+index c6614254ef8a..0611e1e0ed5b 100644
+--- a/sound/core/seq/seq_clientmgr.h
++++ b/sound/core/seq/seq_clientmgr.h
+@@ -61,6 +61,7 @@ struct snd_seq_client {
+       struct list_head ports_list_head;
+       rwlock_t ports_lock;
+       struct mutex ports_mutex;
++      struct mutex ioctl_mutex;
+       int convert32;          /* convert 32->64bit */
+ 
+       /* output pool */
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 80bbadc83721..d6e079f4ec09 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
+       /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+ 
+       /* codec SSID */
++      SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
+       SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
+       SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
+       SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index acdb196ddb44..145e92d6ca94 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6173,6 +6173,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", 
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+       SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", 
ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
++      SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", 
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x164a, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x164b, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index ae0272f9a091..e6acc281dd37 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
+       @$(MAKE) $(build)=objtool
+ 
+ $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
+-      @./sync-check.sh
++      @$(CONFIG_SHELL) ./sync-check.sh
+       $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
+ 
+ 
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index 8acfc47af70e..540a209b78ab 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct 
section *sec,
+                       *type = INSN_STACK;
+                       op->src.type = OP_SRC_ADD;
+                       op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+-                      op->dest.type = OP_SRC_REG;
++                      op->dest.type = OP_DEST_REG;
+                       op->dest.reg = CFI_SP;
+               }
+               break;
+diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
+index 4c6b5c9ef073..91e8e19ff5e0 100644
+--- a/tools/objtool/builtin-orc.c
++++ b/tools/objtool/builtin-orc.c
+@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
+       const char *objname;
+ 
+       argc--; argv++;
++      if (argc <= 0)
++              usage_with_options(orc_usage, check_options);
++
+       if (!strncmp(argv[0], "gen", 3)) {
+               argc = parse_options(argc, argv, check_options, orc_usage, 0);
+               if (argc != 1)
+@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
+               objname = argv[0];
+ 
+               return check(objname, no_fp, no_unreachable, true);
+-
+       }
+ 
+       if (!strcmp(argv[0], "dump")) {
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 24460155c82c..c1c338661699 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -26,6 +26,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <unistd.h>
++#include <errno.h>
+ 
+ #include "elf.h"
+ #include "warn.h"
+@@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
+ 
+       elf->fd = open(name, flags);
+       if (elf->fd == -1) {
+-              perror("open");
++              fprintf(stderr, "objtool: Can't open '%s': %s\n",
++                      name, strerror(errno));
+               goto err;
+       }
+ 
+diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
+index e5ca31429c9b..e61fe703197b 100644
+--- a/tools/objtool/orc_gen.c
++++ b/tools/objtool/orc_gen.c
+@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
+ 
+       /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
+       sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
++      if (!sec)
++              return -1;
+ 
+       ip_relasec = elf_create_rela_section(file->elf, sec);
+       if (!ip_relasec)
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index b4b69c2d1012..9dea96380339 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
+               return -EFAULT;
+       }
+ 
+-      if (is_vm_hugetlb_page(vma) && !logging_active) {
++      if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
+               hugetlb = true;
+               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+       } else {

Reply via email to