commit:     868863fa3b01b3d0e5bc4bd44d8bbf00ed6a5e95
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 15 15:06:56 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 15 15:06:56 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=868863fa

Linux patch 4.19.51

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1050_linux-4.19.51.patch | 3093 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3097 insertions(+)

diff --git a/0000_README b/0000_README
index 9cf9325..5b58ca2 100644
--- a/0000_README
+++ b/0000_README
@@ -243,6 +243,10 @@ Patch:  1049_linux-4.19.50.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.50
 
+Patch:  1050_linux-4.19.51.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.51
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1050_linux-4.19.51.patch b/1050_linux-4.19.51.patch
new file mode 100644
index 0000000..f589a82
--- /dev/null
+++ b/1050_linux-4.19.51.patch
@@ -0,0 +1,3093 @@
+diff --git a/Makefile b/Makefile
+index f7e7e365e2ff..dd4be2f32b88 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts 
b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+index cdda614e417e..a370857beac0 100644
+--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+@@ -106,6 +106,7 @@
+                               regulator-name = "PVDD_APIO_1V8";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
++                              regulator-always-on;
+                       };
+ 
+                       ldo3_reg: LDO3 {
+@@ -144,6 +145,7 @@
+                               regulator-name = "PVDD_ABB_1V8";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
++                              regulator-always-on;
+                       };
+ 
+                       ldo9_reg: LDO9 {
+diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
+index 7fae2ffb76fe..ab522c2da6df 100644
+--- a/arch/arm/boot/dts/imx50.dtsi
++++ b/arch/arm/boot/dts/imx50.dtsi
+@@ -420,7 +420,7 @@
+                               reg = <0x63fb0000 0x4000>;
+                               interrupts = <6>;
+                               clocks = <&clks IMX5_CLK_SDMA_GATE>,
+-                                       <&clks IMX5_CLK_SDMA_GATE>;
++                                       <&clks IMX5_CLK_AHB>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+                               fsl,sdma-ram-script-name = 
"imx/sdma/sdma-imx50.bin";
+diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
+index 5c4ba91e43ba..ef2abc097843 100644
+--- a/arch/arm/boot/dts/imx51.dtsi
++++ b/arch/arm/boot/dts/imx51.dtsi
+@@ -481,7 +481,7 @@
+                               reg = <0x83fb0000 0x4000>;
+                               interrupts = <6>;
+                               clocks = <&clks IMX5_CLK_SDMA_GATE>,
+-                                       <&clks IMX5_CLK_SDMA_GATE>;
++                                       <&clks IMX5_CLK_AHB>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+                               fsl,sdma-ram-script-name = 
"imx/sdma/sdma-imx51.bin";
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index 6386185ae234..b6b0818343c4 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -701,7 +701,7 @@
+                               reg = <0x63fb0000 0x4000>;
+                               interrupts = <6>;
+                               clocks = <&clks IMX5_CLK_SDMA_GATE>,
+-                                       <&clks IMX5_CLK_SDMA_GATE>;
++                                       <&clks IMX5_CLK_AHB>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+                               fsl,sdma-ram-script-name = 
"imx/sdma/sdma-imx53.bin";
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index 61d2d26afbf4..00d44a60972f 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -905,7 +905,7 @@
+                               compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX6QDL_CLK_SDMA>,
++                              clocks = <&clks IMX6QDL_CLK_IPG>,
+                                        <&clks IMX6QDL_CLK_SDMA>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
+index 7a4f5dace902..2fa88c6f1882 100644
+--- a/arch/arm/boot/dts/imx6sl.dtsi
++++ b/arch/arm/boot/dts/imx6sl.dtsi
+@@ -739,7 +739,7 @@
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks IMX6SL_CLK_SDMA>,
+-                                       <&clks IMX6SL_CLK_SDMA>;
++                                       <&clks IMX6SL_CLK_AHB>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+                               /* imx6sl reuses imx6q sdma firmware */
+diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
+index 3e6ffaf5f104..7c7d5c47578e 100644
+--- a/arch/arm/boot/dts/imx6sll.dtsi
++++ b/arch/arm/boot/dts/imx6sll.dtsi
+@@ -591,7 +591,7 @@
+                               compatible = "fsl,imx6sll-sdma", 
"fsl,imx35-sdma";
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX6SLL_CLK_SDMA>,
++                              clocks = <&clks IMX6SLL_CLK_IPG>,
+                                        <&clks IMX6SLL_CLK_SDMA>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index 50083cecc6c9..7b62e6fb47eb 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -803,7 +803,7 @@
+                               compatible = "fsl,imx6sx-sdma", 
"fsl,imx6q-sdma";
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX6SX_CLK_SDMA>,
++                              clocks = <&clks IMX6SX_CLK_IPG>,
+                                        <&clks IMX6SX_CLK_SDMA>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 6dc0b569acdf..2366f093cc76 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -707,7 +707,7 @@
+                                            "fsl,imx35-sdma";
+                               reg = <0x020ec000 0x4000>;
+                               interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX6UL_CLK_SDMA>,
++                              clocks = <&clks IMX6UL_CLK_IPG>,
+                                        <&clks IMX6UL_CLK_SDMA>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index a052198f6e96..a7f697b0290f 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1050,8 +1050,8 @@
+                               compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
+                               reg = <0x30bd0000 0x10000>;
+                               interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX7D_SDMA_CORE_CLK>,
+-                                       <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
++                              clocks = <&clks IMX7D_IPG_ROOT_CLK>,
++                                       <&clks IMX7D_SDMA_CORE_CLK>;
+                               clock-names = "ipg", "ahb";
+                               #dma-cells = <3>;
+                               fsl,sdma-ram-script-name = 
"imx/sdma/sdma-imx7d.bin";
+diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
+index cba23eaa6072..7a88f160b1fb 100644
+--- a/arch/arm/include/asm/hardirq.h
++++ b/arch/arm/include/asm/hardirq.h
+@@ -6,6 +6,7 @@
+ #include <linux/threads.h>
+ #include <asm/irq.h>
+ 
++/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
+ #define NR_IPI        7
+ 
+ typedef struct {
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index a3ce7c5365fa..bada66ef4419 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -76,6 +76,10 @@ enum ipi_msg_type {
+       IPI_CPU_STOP,
+       IPI_IRQ_WORK,
+       IPI_COMPLETION,
++      /*
++       * CPU_BACKTRACE is special and not included in NR_IPI
++       * or tracable with trace_ipi_*
++       */
+       IPI_CPU_BACKTRACE,
+       /*
+        * SGI8-15 can be reserved by secure firmware, and thus may
+@@ -803,7 +807,7 @@ core_initcall(register_cpufreq_notifier);
+ 
+ static void raise_nmi(cpumask_t *mask)
+ {
+-      smp_cross_call(mask, IPI_CPU_BACKTRACE);
++      __smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ }
+ 
+ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index b1fe53e8b460..088c34e99b02 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -434,8 +434,27 @@ early_wakeup:
+ 
+ static void exynos5420_prepare_pm_resume(void)
+ {
++      unsigned int mpidr, cluster;
++
++      mpidr = read_cpuid_mpidr();
++      cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
++
+       if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
+               WARN_ON(mcpm_cpu_powered_up());
++
++      if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
++              /*
++               * When system is resumed on the LITTLE/KFC core (cluster 1),
++               * the DSCR is not properly updated until the power is turned
++               * on also for the cluster 0. Enable it for a while to
++               * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
++               * block and avoid undefined instruction issue on CP14 reset.
++               */
++              pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
++                              EXYNOS_COMMON_CONFIGURATION(0));
++              pmu_raw_writel(0,
++                              EXYNOS_COMMON_CONFIGURATION(0));
++      }
+ }
+ 
+ static void exynos5420_pm_resume(void)
+diff --git a/arch/arm/mach-omap2/pm33xx-core.c 
b/arch/arm/mach-omap2/pm33xx-core.c
+index f4971e4a86b2..ca7026958d42 100644
+--- a/arch/arm/mach-omap2/pm33xx-core.c
++++ b/arch/arm/mach-omap2/pm33xx-core.c
+@@ -51,10 +51,12 @@ static int amx3_common_init(void)
+ 
+       /* CEFUSE domain can be turned off post bootup */
+       cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
+-      if (cefuse_pwrdm)
+-              omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
+-      else
++      if (!cefuse_pwrdm)
+               pr_err("PM: Failed to get cefuse_pwrdm\n");
++      else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
++              pr_info("PM: Leaving EFUSE power domain active\n");
++      else
++              omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
+ 
+       return 0;
+ }
+diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
+index 89950b7bf536..bdaf3536241a 100644
+--- a/arch/mips/kernel/prom.c
++++ b/arch/mips/kernel/prom.c
+@@ -41,7 +41,19 @@ char *mips_get_machine_name(void)
+ #ifdef CONFIG_USE_OF
+ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+ {
+-      return add_memory_region(base, size, BOOT_MEM_RAM);
++      if (base >= PHYS_ADDR_MAX) {
++              pr_warn("Trying to add an invalid memory region, skipped\n");
++              return;
++      }
++
++      /* Truncate the passed memory region instead of type casting */
++      if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
++              pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
++                      size, base, PHYS_ADDR_MAX - base);
++              size = PHYS_ADDR_MAX - base;
++      }
++
++      add_memory_region(base, size, BOOT_MEM_RAM);
+ }
+ 
+ int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
+diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
+index 052de4c8acb2..0c572a48158e 100644
+--- a/arch/um/kernel/time.c
++++ b/arch/um/kernel/time.c
+@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
+ static struct clock_event_device timer_clockevent = {
+       .name                   = "posix-timer",
+       .rating                 = 250,
+-      .cpumask                = cpu_all_mask,
++      .cpumask                = cpu_possible_mask,
+       .features               = CLOCK_EVT_FEAT_PERIODIC |
+                                 CLOCK_EVT_FEAT_ONESHOT,
+       .set_state_shutdown     = itimer_shutdown,
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 09c53bcbd497..c8b0bf2b0d5e 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3072,7 +3072,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
+               return ret;
+ 
+       if (event->attr.precise_ip) {
+-              if (!(event->attr.freq || event->attr.wakeup_events)) {
++              if (!(event->attr.freq || (event->attr.wakeup_events && 
!event->attr.watermark))) {
+                       event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+                       if (!(event->attr.sample_type &
+                             ~intel_pmu_large_pebs_flags(event)))
+diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
+index 52e55108404e..d3a73f9335e1 100644
+--- a/arch/x86/pci/irq.c
++++ b/arch/x86/pci/irq.c
+@@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] 
__initconst = {
+ 
+ void __init pcibios_irq_init(void)
+ {
++      struct irq_routing_table *rtable = NULL;
++
+       DBG(KERN_DEBUG "PCI: IRQ init\n");
+ 
+       if (raw_pci_ops == NULL)
+@@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
+       pirq_table = pirq_find_routing_table();
+ 
+ #ifdef CONFIG_PCI_BIOS
+-      if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
++      if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
+               pirq_table = pcibios_get_irq_routing_table();
++              rtable = pirq_table;
++      }
+ #endif
+       if (pirq_table) {
+               pirq_peer_trick();
+@@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
+                * If we're using the I/O APIC, avoid using the PCI IRQ
+                * routing table
+                */
+-              if (io_apic_assign_pci_irqs)
++              if (io_apic_assign_pci_irqs) {
++                      kfree(rtable);
+                       pirq_table = NULL;
++              }
+       }
+ 
+       x86_init.pci.fixup_irqs();
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 15e8c9955b79..6bb397995610 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2509,6 +2509,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
+       if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
+           bfq_symmetric_scenario(bfqd))
+               sl = min_t(u64, sl, BFQ_MIN_TT);
++      else if (bfqq->wr_coeff > 1)
++              sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
+ 
+       bfqd->last_idling_start = ktime_get();
+       hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 33488b1426b7..6eed5d84c2ef 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -411,7 +411,6 @@ void blk_sync_queue(struct request_queue *q)
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+ 
+-              cancel_delayed_work_sync(&q->requeue_work);
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->run_work);
+       } else {
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 4e563ee462cb..70d839b9c3b0 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2465,6 +2465,8 @@ void blk_mq_release(struct request_queue *q)
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+ 
++      cancel_delayed_work_sync(&q->requeue_work);
++
+       /* hctx kobj stays in hctx */
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (!hctx)
+diff --git a/drivers/clk/rockchip/clk-rk3288.c 
b/drivers/clk/rockchip/clk-rk3288.c
+index 64191694ff6e..9cfdbea493bb 100644
+--- a/drivers/clk/rockchip/clk-rk3288.c
++++ b/drivers/clk/rockchip/clk-rk3288.c
+@@ -835,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
+       RK3288_CLKSEL_CON(10),
+       RK3288_CLKSEL_CON(33),
+       RK3288_CLKSEL_CON(37),
++
++      /* We turn aclk_dmac1 on for suspend; this will restore it */
++      RK3288_CLKGATE_CON(10),
+ };
+ 
+ static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
+@@ -850,6 +853,14 @@ static int rk3288_clk_suspend(void)
+                               readl_relaxed(rk3288_cru_base + reg_id);
+       }
+ 
++      /*
++       * Going into deep sleep (specifically setting PMU_CLR_DMA in
++       * RK3288_PMU_PWRMODE_CON1) appears to fail unless
++       * "aclk_dmac1" is on.
++       */
++      writel_relaxed(1 << (12 + 16),
++                     rk3288_cru_base + RK3288_CLKGATE_CON(10));
++
+       /*
+        * Switch PLLs other than DPLL (for SDRAM) to slow mode to
+        * avoid crashes on resume. The Mask ROM on the system will
+diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
+index 1fbf9cb9b742..89c5e5b46068 100644
+--- a/drivers/dma/idma64.c
++++ b/drivers/dma/idma64.c
+@@ -597,7 +597,7 @@ static int idma64_probe(struct idma64_chip *chip)
+       idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ 
+-      idma64->dma.dev = chip->dev;
++      idma64->dma.dev = chip->sysdev;
+ 
+       dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ 
+@@ -637,6 +637,7 @@ static int idma64_platform_probe(struct platform_device 
*pdev)
+ {
+       struct idma64_chip *chip;
+       struct device *dev = &pdev->dev;
++      struct device *sysdev = dev->parent;
+       struct resource *mem;
+       int ret;
+ 
+@@ -653,11 +654,12 @@ static int idma64_platform_probe(struct platform_device 
*pdev)
+       if (IS_ERR(chip->regs))
+               return PTR_ERR(chip->regs);
+ 
+-      ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++      ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
+ 
+       chip->dev = dev;
++      chip->sysdev = sysdev;
+ 
+       ret = idma64_probe(chip);
+       if (ret)
+diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
+index 6b816878e5e7..baa32e1425de 100644
+--- a/drivers/dma/idma64.h
++++ b/drivers/dma/idma64.h
+@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, 
int offset, u32 value)
+ /**
+  * struct idma64_chip - representation of iDMA 64-bit controller hardware
+  * @dev:              struct device of the DMA controller
++ * @sysdev:           struct device of the physical device that does DMA
+  * @irq:              irq line
+  * @regs:             memory mapped I/O space
+  * @idma64:           struct idma64 that is filed by idma64_probe()
+  */
+ struct idma64_chip {
+       struct device   *dev;
++      struct device   *sysdev;
+       int             irq;
+       void __iomem    *regs;
+       struct idma64   *idma64;
+diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
+index 57304b2e989f..b00cc03ad6b6 100644
+--- a/drivers/edac/Kconfig
++++ b/drivers/edac/Kconfig
+@@ -250,8 +250,8 @@ config EDAC_PND2
+         micro-server but may appear on others in the future.
+ 
+ config EDAC_MPC85XX
+-      tristate "Freescale MPC83xx / MPC85xx"
+-      depends on FSL_SOC
++      bool "Freescale MPC83xx / MPC85xx"
++      depends on FSL_SOC && EDAC=y
+       help
+         Support for error detection and correction on the Freescale
+         MPC8349, MPC8560, MPC8540, MPC8548, T4240
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 6c1acf642c8e..6fa430d98517 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -343,6 +343,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank 
*bank, unsigned offset)
+       }
+ }
+ 
++/*
++ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
++ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
++ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
++ * are capable waking up the system from off mode.
++ */
++static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 
gpio_mask)
++{
++      u32 no_wake = bank->non_wakeup_gpios;
++
++      if (no_wake)
++              return !!(~no_wake & gpio_mask);
++
++      return false;
++}
++
+ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
+                                               unsigned trigger)
+ {
+@@ -374,13 +390,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank 
*bank, int gpio,
+       }
+ 
+       /* This part needs to be executed always for OMAP{34xx, 44xx} */
+-      if (!bank->regs->irqctrl) {
+-              /* On omap24xx proceed only when valid GPIO bit is set */
+-              if (bank->non_wakeup_gpios) {
+-                      if (!(bank->non_wakeup_gpios & gpio_bit))
+-                              goto exit;
+-              }
+-
++      if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, 
gpio)) {
+               /*
+                * Log the edge gpio and manually trigger the IRQ
+                * after resume if the input level changes
+@@ -393,7 +403,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank 
*bank, int gpio,
+                       bank->enabled_non_wakeup_gpios &= ~gpio_bit;
+       }
+ 
+-exit:
+       bank->level_mask =
+               readl_relaxed(bank->base + bank->regs->leveldetect0) |
+               readl_relaxed(bank->base + bank->regs->leveldetect1);
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 7e09ce75ffb2..a9cb5571de54 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -37,6 +37,7 @@ struct fsl_gpio_soc_data {
+ 
+ struct vf610_gpio_port {
+       struct gpio_chip gc;
++      struct irq_chip ic;
+       void __iomem *base;
+       void __iomem *gpio_base;
+       const struct fsl_gpio_soc_data *sdata;
+@@ -66,8 +67,6 @@ struct vf610_gpio_port {
+ #define PORT_INT_EITHER_EDGE  0xb
+ #define PORT_INT_LOGIC_ONE    0xc
+ 
+-static struct irq_chip vf610_gpio_irq_chip;
+-
+ static const struct fsl_gpio_soc_data imx_data = {
+       .have_paddr = true,
+ };
+@@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, 
u32 enable)
+       return 0;
+ }
+ 
+-static struct irq_chip vf610_gpio_irq_chip = {
+-      .name           = "gpio-vf610",
+-      .irq_ack        = vf610_gpio_irq_ack,
+-      .irq_mask       = vf610_gpio_irq_mask,
+-      .irq_unmask     = vf610_gpio_irq_unmask,
+-      .irq_set_type   = vf610_gpio_irq_set_type,
+-      .irq_set_wake   = vf610_gpio_irq_set_wake,
+-};
+-
+ static int vf610_gpio_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+@@ -259,6 +249,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+       struct vf610_gpio_port *port;
+       struct resource *iores;
+       struct gpio_chip *gc;
++      struct irq_chip *ic;
+       int i;
+       int ret;
+ 
+@@ -295,6 +286,14 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+       gc->direction_output = vf610_gpio_direction_output;
+       gc->set = vf610_gpio_set;
+ 
++      ic = &port->ic;
++      ic->name = "gpio-vf610";
++      ic->irq_ack = vf610_gpio_irq_ack;
++      ic->irq_mask = vf610_gpio_irq_mask;
++      ic->irq_unmask = vf610_gpio_irq_unmask;
++      ic->irq_set_type = vf610_gpio_irq_set_type;
++      ic->irq_set_wake = vf610_gpio_irq_set_wake;
++
+       ret = gpiochip_add_data(gc, port);
+       if (ret < 0)
+               return ret;
+@@ -306,14 +305,13 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+       /* Clear the interrupt status register for all GPIO's */
+       vf610_gpio_writel(~0, port->base + PORT_ISFR);
+ 
+-      ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
+-                                 handle_edge_irq, IRQ_TYPE_NONE);
++      ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
+       if (ret) {
+               dev_err(dev, "failed to add irqchip\n");
+               gpiochip_remove(gc);
+               return ret;
+       }
+-      gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
++      gpiochip_set_chained_irqchip(gc, ic, port->irq,
+                                    vf610_gpio_irq_handler);
+ 
+       return 0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+index bf8b68f8db4f..bce5741f2952 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+@@ -388,6 +388,10 @@ void dpp1_cnv_setup (
+       default:
+               break;
+       }
++
++      /* Set default color space based on format if none is given. */
++      color_space = input_color_space ? input_color_space : color_space;
++
+       REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+                       CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+       REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+@@ -399,7 +403,7 @@ void dpp1_cnv_setup (
+               for (i = 0; i < 12; i++)
+                       tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
+ 
+-              tbl_entry.color_space = input_color_space;
++              tbl_entry.color_space = color_space;
+ 
+               if (color_space >= COLOR_SPACE_YCBCR601)
+                       select = INPUT_CSC_SELECT_ICSC;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index a0355709abd1..7736ef123e9b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1890,7 +1890,7 @@ static void update_dpp(struct dpp *dpp, struct 
dc_plane_state *plane_state)
+                       plane_state->format,
+                       EXPANSION_MODE_ZERO,
+                       plane_state->input_csc_color_matrix,
+-                      COLOR_SPACE_YCBCR601_LIMITED);
++                      plane_state->color_space);
+ 
+       //set scale and bias registers
+       build_prescale_params(&bns_params, plane_state);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 
b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index 85c2d407a52e..e7ddd3e3db92 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
+                       vsync_polarity = 1;
+       }
+ 
+-      if (mode->vrefresh <= 24000)
++      if (drm_mode_vrefresh(mode) <= 24)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+-      else if (mode->vrefresh <= 25000)
++      else if (drm_mode_vrefresh(mode) <= 25)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+-      else if (mode->vrefresh <= 30000)
++      else if (drm_mode_vrefresh(mode) <= 30)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+       else
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
+index 00d9d77f583a..4b75ad40dd80 100644
+--- a/drivers/gpu/drm/nouveau/Kconfig
++++ b/drivers/gpu/drm/nouveau/Kconfig
+@@ -16,20 +16,9 @@ config DRM_NOUVEAU
+       select INPUT if ACPI && X86
+       select THERMAL if ACPI && X86
+       select ACPI_VIDEO if ACPI && X86
+-      help
+-        Choose this option for open-source NVIDIA support.
+-
+-config NOUVEAU_LEGACY_CTX_SUPPORT
+-      bool "Nouveau legacy context support"
+-      depends on DRM_NOUVEAU
+       select DRM_VM
+-      default y
+       help
+-        There was a version of the nouveau DDX that relied on legacy
+-        ctx ioctls not erroring out. But that was back in time a long
+-        ways, so offer a way to disable it now. For uapi compat with
+-        old nouveau ddx this should be on by default, but modern distros
+-        should consider turning it off.
++        Choose this option for open-source NVIDIA support.
+ 
+ config NOUVEAU_PLATFORM_DRIVER
+       bool "Nouveau (NVIDIA) SoC GPUs"
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h 
b/drivers/gpu/drm/nouveau/dispnv50/disp.h
+index e48c5eb35b49..66c125a6b0b3 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
+@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
+               NV50_DISP_INTERLOCK__SIZE
+       } type;
+       u32 data;
++      u32 wimm;
+ };
+ 
+ void corec37d_ntfy_init(struct nouveau_bo *, u32);
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c 
b/drivers/gpu/drm/nouveau/dispnv50/head.c
+index 4f57e5379796..d81a99bb2ac3 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
+@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct 
drm_crtc_state *state)
+                       asyh->set.or = head->func->or != NULL;
+               }
+ 
+-              if (asyh->state.mode_changed)
++              if (asyh->state.mode_changed || asyh->state.connectors_changed)
+                       nv50_head_atomic_check_mode(head, asyh);
+ 
+               if (asyh->state.color_mgmt_changed ||
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c 
b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+index 9103b8494279..f7dbd965e4e7 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct 
nouveau_drm *drm,
+               return ret;
+       }
+ 
++      wndw->interlock.wimm = wndw->interlock.data;
+       wndw->immd = func;
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c 
b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 2187922e8dc2..b3db4553098d 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -151,7 +151,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
+       if (asyw->set.point) {
+               if (asyw->set.point = false, asyw->set.mask)
+                       interlock[wndw->interlock.type] |= wndw->interlock.data;
+-              interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
++              interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
+ 
+               wndw->immd->point(wndw, asyw);
+               wndw->immd->update(wndw, interlock);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c 
b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 2b7a54cc3c9e..74d2283f2c28 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -1015,11 +1015,8 @@ nouveau_driver_fops = {
+ static struct drm_driver
+ driver_stub = {
+       .driver_features =
+-              DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+-#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
+-              | DRIVER_KMS_LEGACY_CONTEXT
+-#endif
+-              ,
++              DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
++              DRIVER_KMS_LEGACY_CONTEXT,
+ 
+       .load = nouveau_drm_load,
+       .unload = nouveau_drm_unload,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c 
b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+index 5f301e632599..818d21bd28d3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
+        * and it's better to have a failed modeset than that.
+        */
+       for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
+-              if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
+-                      failsafe = cfg;
++              if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
++                      /* Try to respect sink limits too when selecting
++                       * lowest link configuration.
++                       */
++                      if (!failsafe ||
++                          (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
++                              failsafe = cfg;
++              }
++
+               if (failsafe && cfg[1].rate < dataKBps)
+                       break;
+       }
+diff --git a/drivers/gpu/drm/pl111/pl111_display.c 
b/drivers/gpu/drm/pl111/pl111_display.c
+index 754f6b25f265..6d9f78612dee 100644
+--- a/drivers/gpu/drm/pl111/pl111_display.c
++++ b/drivers/gpu/drm/pl111/pl111_display.c
+@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
+               dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
+               return PTR_ERR(parent);
+       }
++
++      spin_lock_init(&priv->tim2_lock);
++
+       /* If the clock divider is broken, use the parent directly */
+       if (priv->variant->broken_clockdivider) {
+               priv->clk = parent;
+               return 0;
+       }
+       parent_name = __clk_get_name(parent);
+-
+-      spin_lock_init(&priv->tim2_lock);
+       div->init = &init;
+ 
+       ret = devm_clk_hw_register(drm->dev, div);
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index ab39315c9078..39e608271263 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -818,6 +818,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane 
*plane,
+               drm_atomic_set_fb_for_plane(plane->state, state->fb);
+       }
+ 
++      swap(plane->state->fb, state->fb);
+       /* Set the cursor's position on the screen.  This is the
+        * expected change from the drm_mode_cursor_universal()
+        * helper.
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 9ae3678844eb..40fbf20d69e5 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -2414,13 +2414,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device 
*smmu, bool bypass)
+       /* Clear CR0 and sync (disables SMMU and queue processing) */
+       reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
+       if (reg & CR0_SMMUEN) {
+-              if (is_kdump_kernel()) {
+-                      arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+-                      arm_smmu_device_disable(smmu);
+-                      return -EBUSY;
+-              }
+-
+               dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
++              WARN_ON(is_kdump_kernel() && !disable_bypass);
++              arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+       }
+ 
+       ret = arm_smmu_device_disable(smmu);
+@@ -2513,6 +2509,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device 
*smmu, bool bypass)
+               return ret;
+       }
+ 
++      if (is_kdump_kernel())
++              enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
+ 
+       /* Enable the SMMU interface, or ensure bypass */
+       if (!bypass || disable_bypass) {
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 603bf5233a99..c1439019dd12 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4033,9 +4033,7 @@ static void __init init_no_remapping_devices(void)
+ 
+               /* This IOMMU has *only* gfx devices. Either bypass it or
+                  set the gfx_mapped flag, as appropriate */
+-              if (dmar_map_gfx) {
+-                      intel_iommu_gfx_mapped = 1;
+-              } else {
++              if (!dmar_map_gfx) {
+                       drhd->ignored = 1;
+                       for_each_active_dev_scope(drhd->devices,
+                                                 drhd->devices_cnt, i, dev)
+@@ -4831,6 +4829,9 @@ int __init intel_iommu_init(void)
+               goto out_free_reserved_range;
+       }
+ 
++      if (dmar_map_gfx)
++              intel_iommu_gfx_mapped = 1;
++
+       init_no_remapping_devices();
+ 
+       ret = init_dmars();
+diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
+index 533b0da5235d..ca1f993c0de3 100644
+--- a/drivers/mailbox/stm32-ipcc.c
++++ b/drivers/mailbox/stm32-ipcc.c
+@@ -8,9 +8,9 @@
+ #include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/mailbox_controller.h>
+ #include <linux/module.h>
+-#include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_wakeirq.h>
+ 
+@@ -240,9 +240,11 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
+ 
+       /* irq */
+       for (i = 0; i < IPCC_IRQ_NUM; i++) {
+-              ipcc->irqs[i] = of_irq_get_byname(dev->of_node, irq_name[i]);
++              ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
+               if (ipcc->irqs[i] < 0) {
+-                      dev_err(dev, "no IRQ specified %s\n", irq_name[i]);
++                      if (ipcc->irqs[i] != -EPROBE_DEFER)
++                              dev_err(dev, "no IRQ specified %s\n",
++                                      irq_name[i]);
+                       ret = ipcc->irqs[i];
+                       goto err_clk;
+               }
+@@ -263,9 +265,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
+ 
+       /* wakeup */
+       if (of_property_read_bool(np, "wakeup-source")) {
+-              ipcc->wkp = of_irq_get_byname(dev->of_node, "wakeup");
++              ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
+               if (ipcc->wkp < 0) {
+-                      dev_err(dev, "could not get wakeup IRQ\n");
++                      if (ipcc->wkp != -EPROBE_DEFER)
++                              dev_err(dev, "could not get wakeup IRQ\n");
+                       ret = ipcc->wkp;
+                       goto err_clk;
+               }
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 50bffc3382d7..ff3fba16e735 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss 
*lpss)
+ {
+       u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+ 
++      /* Set the device in reset state */
++      writel(0, lpss->priv + LPSS_PRIV_RESETS);
++
+       intel_lpss_deassert_reset(lpss);
+ 
+       intel_lpss_set_remap_addr(lpss);
+diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
+index 3bd75061f777..f78be039e463 100644
+--- a/drivers/mfd/tps65912-spi.c
++++ b/drivers/mfd/tps65912-spi.c
+@@ -27,6 +27,7 @@ static const struct of_device_id 
tps65912_spi_of_match_table[] = {
+       { .compatible = "ti,tps65912", },
+       { /* sentinel */ }
+ };
++MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
+ 
+ static int tps65912_spi_probe(struct spi_device *spi)
+ {
+diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
+index dd19f17a1b63..2b8c479dbfa6 100644
+--- a/drivers/mfd/twl6040.c
++++ b/drivers/mfd/twl6040.c
+@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
+                       }
+               }
+ 
++              /*
++               * Register access can produce errors after power-up unless we
++               * wait at least 8ms based on measurements on duovero.
++               */
++              usleep_range(10000, 12000);
++
+               /* Sync with the HW */
+-              regcache_sync(twl6040->regmap);
++              ret = regcache_sync(twl6040->regmap);
++              if (ret) {
++                      dev_err(twl6040->dev, "Failed to sync with the HW: 
%i\n",
++                              ret);
++                      goto out;
++              }
+ 
+               /* Default PLL configuration after power up */
+               twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
+diff --git a/drivers/misc/pci_endpoint_test.c 
b/drivers/misc/pci_endpoint_test.c
+index 896e2df9400f..fd33a3b9c66f 100644
+--- a/drivers/misc/pci_endpoint_test.c
++++ b/drivers/misc/pci_endpoint_test.c
+@@ -662,6 +662,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
+       data = (struct pci_endpoint_test_data *)ent->driver_data;
+       if (data) {
+               test_reg_bar = data->test_reg_bar;
++              test->test_reg_bar = test_reg_bar;
+               test->alignment = data->alignment;
+               irq_type = data->irq_type;
+       }
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 1841d250e9e2..eb1a65cb878f 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1295,9 +1295,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
+               }
+ 
+               /*
+-               * Don't poll for busy completion in irq context.
++               * Busy detection has been handled by mmci_cmd_irq() above.
++               * Clear the status bit to prevent polling in IRQ context.
+                */
+-              if (host->variant->busy_detect && host->busy_status)
++              if (host->variant->busy_detect_flag)
+                       status &= ~host->variant->busy_detect_flag;
+ 
+               ret = 1;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 340baf6a470c..4648c6a9d9e8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -4300,8 +4300,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
+               return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ 
+       /* check if we just hit the duplicate */
+-      if (!ret)
+-              ret = -EINVAL;
++      if (!ret) {
++              dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
++                       vport->vport_id, addr);
++              return 0;
++      }
+ 
+       dev_err(&hdev->pdev->dev,
+               "PF failed to add unicast entry(%pM) in the MAC table\n",
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index df8808cd7e11..4e04985fb430 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -6758,10 +6758,12 @@ static int i40e_setup_tc(struct net_device *netdev, 
void *type_data)
+       struct i40e_pf *pf = vsi->back;
+       u8 enabled_tc = 0, num_tc, hw;
+       bool need_reset = false;
++      int old_queue_pairs;
+       int ret = -EINVAL;
+       u16 mode;
+       int i;
+ 
++      old_queue_pairs = vsi->num_queue_pairs;
+       num_tc = mqprio_qopt->qopt.num_tc;
+       hw = mqprio_qopt->qopt.hw;
+       mode = mqprio_qopt->mode;
+@@ -6862,6 +6864,7 @@ config_tc:
+               }
+               ret = i40e_configure_queue_channels(vsi);
+               if (ret) {
++                      vsi->num_queue_pairs = old_queue_pairs;
+                       netdev_info(netdev,
+                                   "Failed configuring queue channels\n");
+                       need_reset = true;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c 
b/drivers/net/ethernet/intel/ice/ice_main.c
+index db1543bca701..875f97aba6e0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -652,6 +652,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
+       case ICE_FC_RX_PAUSE:
+               fc = "RX";
+               break;
++      case ICE_FC_NONE:
++              fc = "None";
++              break;
+       default:
+               fc = "Unknown";
+               break;
+diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
+index e0d6760f3219..4b5af2413970 100644
+--- a/drivers/net/thunderbolt.c
++++ b/drivers/net/thunderbolt.c
+@@ -1285,6 +1285,7 @@ static int __maybe_unused tbnet_suspend(struct device 
*dev)
+               tbnet_tear_down(net, true);
+       }
+ 
++      tb_unregister_protocol_handler(&net->handler);
+       return 0;
+ }
+ 
+@@ -1293,6 +1294,8 @@ static int __maybe_unused tbnet_resume(struct device 
*dev)
+       struct tb_service *svc = tb_to_service(dev);
+       struct tbnet *net = tb_service_get_drvdata(svc);
+ 
++      tb_register_protocol_handler(&net->handler);
++
+       netif_carrier_off(net->dev);
+       if (netif_running(net->dev)) {
+               netif_device_attach(net->dev);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7b9ef8e734e7..c8eeecc58115 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1132,6 +1132,7 @@ static enum blk_eh_timer_return nvme_timeout(struct 
request *req, bool reserved)
+       struct nvme_dev *dev = nvmeq->dev;
+       struct request *abort_req;
+       struct nvme_command cmd;
++      bool shutdown = false;
+       u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ 
+       /* If PCI error recovery process is happening, we cannot reset or
+@@ -1168,12 +1169,14 @@ static enum blk_eh_timer_return nvme_timeout(struct 
request *req, bool reserved)
+        * shutdown, so we return BLK_EH_DONE.
+        */
+       switch (dev->ctrl.state) {
++      case NVME_CTRL_DELETING:
++              shutdown = true;
+       case NVME_CTRL_CONNECTING:
+       case NVME_CTRL_RESETTING:
+               dev_warn_ratelimited(dev->ctrl.device,
+                        "I/O %d QID %d timeout, disable controller\n",
+                        req->tag, nvmeq->qid);
+-              nvme_dev_disable(dev, false);
++              nvme_dev_disable(dev, shutdown);
+               nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+               return BLK_EH_DONE;
+       default:
+@@ -2187,8 +2190,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool 
shutdown)
+        * must flush all entered requests to their failed completion to avoid
+        * deadlocking blk-mq hot-cpu notifier.
+        */
+-      if (shutdown)
++      if (shutdown) {
+               nvme_start_queues(&dev->ctrl);
++              if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
++                      blk_mq_unquiesce_queue(dev->ctrl.admin_q);
++      }
+       mutex_unlock(&dev->shutdown_lock);
+ }
+ 
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 7c530c88b3fb..99de51e87f7f 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -1028,7 +1028,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
+ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void 
*buf)
+ {
+       u8 *p, *b;
+-      int i, bit_offset = cell->bit_offset;
++      int i, extra, bit_offset = cell->bit_offset;
+ 
+       p = b = buf;
+       if (bit_offset) {
+@@ -1043,11 +1043,16 @@ static void nvmem_shift_read_buffer_in_place(struct 
nvmem_cell *cell, void *buf)
+                       p = b;
+                       *b++ >>= bit_offset;
+               }
+-
+-              /* result fits in less bytes */
+-              if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
+-                      *p-- = 0;
++      } else {
++              /* point to the msb */
++              p += cell->bytes - 1;
+       }
++
++      /* result fits in less bytes */
++      extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
++      while (--extra >= 0)
++              *p-- = 0;
++
+       /* clear msb bits if any leftover in the last byte */
+       *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+ }
+diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
+index d020f89248fd..69f8e972e29c 100644
+--- a/drivers/nvmem/sunxi_sid.c
++++ b/drivers/nvmem/sunxi_sid.c
+@@ -235,8 +235,10 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = {
+ static const struct of_device_id sunxi_sid_of_match[] = {
+       { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
+       { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
++      { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
+       { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+       { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
++      { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
+       {/* sentinel */},
+ };
+ MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c 
b/drivers/pci/controller/dwc/pci-keystone.c
+index e88bd221fffe..5e199e7d2d4f 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -237,6 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie 
*ks_pcie)
+               ks_dw_pcie_enable_error_irq(ks_pcie);
+ }
+ 
++#ifdef CONFIG_ARM
+ /*
+  * When a PCI device does not exist during config cycles, keystone host gets a
+  * bus error instead of returning 0xffffffff. This handler always returns 0
+@@ -256,6 +257,7 @@ static int keystone_pcie_fault(unsigned long addr, 
unsigned int fsr,
+ 
+       return 0;
+ }
++#endif
+ 
+ static int __init ks_pcie_host_init(struct pcie_port *pp)
+ {
+@@ -279,12 +281,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
+       val |= BIT(12);
+       writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ 
++#ifdef CONFIG_ARM
+       /*
+        * PCIe access errors that result into OCP errors are caught by ARM as
+        * "External aborts"
+        */
+       hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+                       "Asynchronous external abort");
++#endif
+ 
+       return 0;
+ }
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c 
b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index de8635af4cde..739d97080d3b 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -385,6 +385,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 
func_no,
+ {
+       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+       struct pci_epc *epc = ep->epc;
++      unsigned int aligned_offset;
+       u16 msg_ctrl, msg_data;
+       u32 msg_addr_lower, msg_addr_upper, reg;
+       u64 msg_addr;
+@@ -410,13 +411,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 
func_no,
+               reg = ep->msi_cap + PCI_MSI_DATA_32;
+               msg_data = dw_pcie_readw_dbi(pci, reg);
+       }
+-      msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
++      aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
++      msg_addr = ((u64)msg_addr_upper) << 32 |
++                      (msg_addr_lower & ~aligned_offset);
+       ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+                                 epc->mem->page_size);
+       if (ret)
+               return ret;
+ 
+-      writel(msg_data | (interrupt_num - 1), ep->msi_mem);
++      writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+ 
+       dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c 
b/drivers/pci/controller/dwc/pcie-designware-host.c
+index b56e22262a77..acd50920c2ff 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -303,20 +303,24 @@ void dw_pcie_free_msi(struct pcie_port *pp)
+ 
+       irq_domain_remove(pp->msi_domain);
+       irq_domain_remove(pp->irq_domain);
++
++      if (pp->msi_page)
++              __free_page(pp->msi_page);
+ }
+ 
+ void dw_pcie_msi_init(struct pcie_port *pp)
+ {
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+       struct device *dev = pci->dev;
+-      struct page *page;
+       u64 msi_target;
+ 
+-      page = alloc_page(GFP_KERNEL);
+-      pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++      pp->msi_page = alloc_page(GFP_KERNEL);
++      pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
++                                  DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, pp->msi_data)) {
+               dev_err(dev, "Failed to map MSI data\n");
+-              __free_page(page);
++              __free_page(pp->msi_page);
++              pp->msi_page = NULL;
+               return;
+       }
+       msi_target = (u64)pp->msi_data;
+@@ -439,7 +443,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
+       if (ret)
+               pci->num_viewport = 2;
+ 
+-      if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
++      if (pci_msi_enabled()) {
+               /*
+                * If a specific SoC driver needs to change the
+                * default number of vectors, it needs to implement
+@@ -477,7 +481,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
+       if (pp->ops->host_init) {
+               ret = pp->ops->host_init(pp);
+               if (ret)
+-                      goto error;
++                      goto err_free_msi;
+       }
+ 
+       pp->root_bus_nr = pp->busn->start;
+@@ -491,7 +495,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
+ 
+       ret = pci_scan_root_bus_bridge(bridge);
+       if (ret)
+-              goto error;
++              goto err_free_msi;
+ 
+       bus = bridge->bus;
+ 
+@@ -507,6 +511,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
+       pci_bus_add_devices(bus);
+       return 0;
+ 
++err_free_msi:
++      if (pci_msi_enabled() && !pp->ops->msi_host_init)
++              dw_pcie_free_msi(pp);
+ error:
+       pci_free_host_bridge(bridge);
+       return ret;
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h 
b/drivers/pci/controller/dwc/pcie-designware.h
+index 9f1a5e399b70..14dcf6646699 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -164,6 +164,7 @@ struct pcie_port {
+       struct irq_domain       *irq_domain;
+       struct irq_domain       *msi_domain;
+       dma_addr_t              msi_data;
++      struct page             *msi_page;
+       u32                     num_vectors;
+       u32                     irq_status[MAX_MSI_CTRLS];
+       raw_spinlock_t          lock;
+diff --git a/drivers/pci/controller/pcie-rcar.c 
b/drivers/pci/controller/pcie-rcar.c
+index 6a4e435bd35f..9b9c677ad3a0 100644
+--- a/drivers/pci/controller/pcie-rcar.c
++++ b/drivers/pci/controller/pcie-rcar.c
+@@ -892,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+ {
+       struct device *dev = pcie->dev;
+       struct rcar_msi *msi = &pcie->msi;
+-      unsigned long base;
++      phys_addr_t base;
+       int err, i;
+ 
+       mutex_init(&msi->lock);
+@@ -931,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+ 
+       /* setup MSI data target */
+       msi->pages = __get_free_pages(GFP_KERNEL, 0);
++      if (!msi->pages) {
++              err = -ENOMEM;
++              goto err;
++      }
+       base = virt_to_phys((void *)msi->pages);
+ 
+-      rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+-      rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
++      rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
++      rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
+ 
+       /* enable all MSI interrupts */
+       rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+diff --git a/drivers/pci/controller/pcie-xilinx.c 
b/drivers/pci/controller/pcie-xilinx.c
+index 7b1389d8e2a5..ea48cba5480b 100644
+--- a/drivers/pci/controller/pcie-xilinx.c
++++ b/drivers/pci/controller/pcie-xilinx.c
+@@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = {
+  * xilinx_pcie_enable_msi - Enable MSI support
+  * @port: PCIe port information
+  */
+-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
++static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+ {
+       phys_addr_t msg_addr;
+ 
+       port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
++      if (!port->msi_pages)
++              return -ENOMEM;
++
+       msg_addr = virt_to_phys((void *)port->msi_pages);
+       pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
+       pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
++
++      return 0;
+ }
+ 
+ /* INTx Functions */
+@@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct 
xilinx_pcie_port *port)
+       struct device *dev = port->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *pcie_intc_node;
++      int ret;
+ 
+       /* Setup INTx */
+       pcie_intc_node = of_get_next_child(node, NULL);
+@@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct 
xilinx_pcie_port *port)
+                       return -ENODEV;
+               }
+ 
+-              xilinx_pcie_enable_msi(port);
++              ret = xilinx_pcie_enable_msi(port);
++              if (ret)
++                      return ret;
+       }
+ 
+       return 0;
+diff --git a/drivers/pci/hotplug/rpadlpar_core.c 
b/drivers/pci/hotplug/rpadlpar_core.c
+index e2356a9c7088..182f9e3443ee 100644
+--- a/drivers/pci/hotplug/rpadlpar_core.c
++++ b/drivers/pci/hotplug/rpadlpar_core.c
+@@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
+               if (rc == 0)
+                       break;
+       }
++      of_node_put(parent);
+ 
+       return dn;
+ }
+@@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char 
*drc_name,
+       return np;
+ }
+ 
++/* Returns a device_node with its reference count incremented */
+ static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
+ {
+       struct device_node *dn;
+@@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name)
+                       rc = dlpar_add_phb(drc_name, dn);
+                       break;
+       }
++      of_node_put(dn);
+ 
+       printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
+ exit:
+@@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name)
+                       rc = dlpar_remove_pci_slot(drc_name, dn);
+                       break;
+       }
++      of_node_put(dn);
+       vm_unmap_aliases();
+ 
+       printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 37d0c15c9eeb..72db2e0ebced 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -1116,7 +1116,8 @@ static int mask_event(struct switchtec_dev *stdev, int 
eid, int idx)
+       if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
+               return 0;
+ 
+-      if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
++      if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
++          eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
+               return 0;
+ 
+       dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
+diff --git a/drivers/platform/chrome/cros_ec_proto.c 
b/drivers/platform/chrome/cros_ec_proto.c
+index e5d5b1adb5a9..ac784ac66ac3 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev,
+       else
+               xfer_fxn = ec_dev->cmd_xfer;
+ 
++      if (!xfer_fxn) {
++              /*
++               * This error can happen if a communication error happened and
++               * the EC is trying to use protocol v2, on an underlying
++               * communication mechanism that does not support v2.
++               */
++              dev_err_once(ec_dev->dev,
++                           "missing EC transfer API, cannot send command\n");
++              return -EIO;
++      }
++
+       ret = (*xfer_fxn)(ec_dev, msg);
+       if (msg->result == EC_RES_IN_PROGRESS) {
+               int i;
+diff --git a/drivers/platform/x86/intel_pmc_ipc.c 
b/drivers/platform/x86/intel_pmc_ipc.c
+index e7edc8c63936..4ad9d127f2f5 100644
+--- a/drivers/platform/x86/intel_pmc_ipc.c
++++ b/drivers/platform/x86/intel_pmc_ipc.c
+@@ -776,13 +776,17 @@ static int ipc_create_pmc_devices(void)
+       if (ret) {
+               dev_err(ipcdev.dev, "Failed to add punit platform device\n");
+               platform_device_unregister(ipcdev.tco_dev);
++              return ret;
+       }
+ 
+       if (!ipcdev.telem_res_inval) {
+               ret = ipc_create_telemetry_device();
+-              if (ret)
++              if (ret) {
+                       dev_warn(ipcdev.dev,
+                               "Failed to add telemetry platform device\n");
++                      platform_device_unregister(ipcdev.punit_dev);
++                      platform_device_unregister(ipcdev.tco_dev);
++              }
+       }
+ 
+       return ret;
+diff --git a/drivers/power/supply/max14656_charger_detector.c 
b/drivers/power/supply/max14656_charger_detector.c
+index b91b1d2999dc..d19307f791c6 100644
+--- a/drivers/power/supply/max14656_charger_detector.c
++++ b/drivers/power/supply/max14656_charger_detector.c
+@@ -280,6 +280,13 @@ static int max14656_probe(struct i2c_client *client,
+ 
+       INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+ 
++      chip->detect_psy = devm_power_supply_register(dev,
++                     &chip->psy_desc, &psy_cfg);
++      if (IS_ERR(chip->detect_psy)) {
++              dev_err(dev, "power_supply_register failed\n");
++              return -EINVAL;
++      }
++
+       ret = devm_request_irq(dev, chip->irq, max14656_irq,
+                              IRQF_TRIGGER_FALLING,
+                              MAX14656_NAME, chip);
+@@ -289,13 +296,6 @@ static int max14656_probe(struct i2c_client *client,
+       }
+       enable_irq_wake(chip->irq);
+ 
+-      chip->detect_psy = devm_power_supply_register(dev,
+-                     &chip->psy_desc, &psy_cfg);
+-      if (IS_ERR(chip->detect_psy)) {
+-              dev_err(dev, "power_supply_register failed\n");
+-              return -EINVAL;
+-      }
+-
+       schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
+ 
+       return 0;
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 1581f6ab1b1f..c45e5719ba17 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
+       if (IS_ENABLED(CONFIG_OF))
+               of_pwmchip_add(chip);
+ 
+-      pwmchip_sysfs_export(chip);
+-
+ out:
+       mutex_unlock(&pwm_lock);
++
++      if (!ret)
++              pwmchip_sysfs_export(chip);
++
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
+@@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip)
+       unsigned int i;
+       int ret = 0;
+ 
+-      pwmchip_sysfs_unexport_children(chip);
++      pwmchip_sysfs_unexport(chip);
+ 
+       mutex_lock(&pwm_lock);
+ 
+@@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip)
+ 
+       free_pwms(chip);
+ 
+-      pwmchip_sysfs_unexport(chip);
+-
+ out:
+       mutex_unlock(&pwm_lock);
+       return ret;
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index c1ed641b3e26..f6e738ad7bd9 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -111,6 +111,10 @@ struct meson_pwm {
+       const struct meson_pwm_data *data;
+       void __iomem *base;
+       u8 inverter_mask;
++      /*
++       * Protects register (write) access to the REG_MISC_AB register
++       * that is shared between the two PWMs.
++       */
+       spinlock_t lock;
+ };
+ 
+@@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
+ {
+       u32 value, clk_shift, clk_enable, enable;
+       unsigned int offset;
++      unsigned long flags;
+ 
+       switch (id) {
+       case 0:
+@@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
+               return;
+       }
+ 
++      spin_lock_irqsave(&meson->lock, flags);
++
+       value = readl(meson->base + REG_MISC_AB);
+       value &= ~(MISC_CLK_DIV_MASK << clk_shift);
+       value |= channel->pre_div << clk_shift;
+@@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
+       value = readl(meson->base + REG_MISC_AB);
+       value |= enable;
+       writel(value, meson->base + REG_MISC_AB);
++
++      spin_unlock_irqrestore(&meson->lock, flags);
+ }
+ 
+ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
+ {
+       u32 value, enable;
++      unsigned long flags;
+ 
+       switch (id) {
+       case 0:
+@@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, 
unsigned int id)
+               return;
+       }
+ 
++      spin_lock_irqsave(&meson->lock, flags);
++
+       value = readl(meson->base + REG_MISC_AB);
+       value &= ~enable;
+       writel(value, meson->base + REG_MISC_AB);
++
++      spin_unlock_irqrestore(&meson->lock, flags);
+ }
+ 
+ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+@@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct 
pwm_device *pwm,
+ {
+       struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+       struct meson_pwm *meson = to_meson_pwm(chip);
+-      unsigned long flags;
+       int err = 0;
+ 
+       if (!state)
+               return -EINVAL;
+ 
+-      spin_lock_irqsave(&meson->lock, flags);
+-
+       if (!state->enabled) {
+               meson_pwm_disable(meson, pwm->hwpwm);
+               channel->state.enabled = false;
+ 
+-              goto unlock;
++              return 0;
+       }
+ 
+       if (state->period != channel->state.period ||
+@@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct 
pwm_device *pwm,
+               err = meson_pwm_calc(meson, channel, pwm->hwpwm,
+                                    state->duty_cycle, state->period);
+               if (err < 0)
+-                      goto unlock;
++                      return err;
+ 
+               channel->state.polarity = state->polarity;
+               channel->state.period = state->period;
+@@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct 
pwm_device *pwm,
+               channel->state.enabled = true;
+       }
+ 
+-unlock:
+-      spin_unlock_irqrestore(&meson->lock, flags);
+-      return err;
++      return 0;
+ }
+ 
+ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
+index f7b8a86fa5c5..ad4a40c0f27c 100644
+--- a/drivers/pwm/pwm-tiehrpwm.c
++++ b/drivers/pwm/pwm-tiehrpwm.c
+@@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, 
struct pwm_device *pwm)
+       }
+ 
+       /* Update shadow register first before modifying active register */
++      ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
++                    AQSFRC_RLDCSF_ZRO);
+       ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+       /*
+        * Changes to immediate action on Action Qualifier. This puts
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 7c71cdb8a9d8..1c64fd8e9234 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
+ }
+ 
+ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+-{
+-      struct device *parent;
+-
+-      parent = class_find_device(&pwm_class, NULL, chip,
+-                                 pwmchip_sysfs_match);
+-      if (parent) {
+-              /* for class_find_device() */
+-              put_device(parent);
+-              device_unregister(parent);
+-      }
+-}
+-
+-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+ {
+       struct device *parent;
+       unsigned int i;
+@@ -429,6 +416,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+       }
+ 
+       put_device(parent);
++      device_unregister(parent);
+ }
+ 
+ static int __init pwm_sysfs_init(void)
+diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
+index bad0e0ea4f30..ef989a15aefc 100644
+--- a/drivers/rapidio/rio_cm.c
++++ b/drivers/rapidio/rio_cm.c
+@@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev,
+       mutex_init(&cm->rx_lock);
+       riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
+       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
++      if (!cm->rx_wq) {
++              riocm_error("failed to allocate IBMBOX_%d on %s",
++                          cmbox, mport->name);
++              rio_release_outb_mbox(mport, cmbox);
++              kfree(cm);
++              return -ENOMEM;
++      }
++
+       INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
+ 
+       cm->tx_slot = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index de3f2a097451..1f1a05a90d3d 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -3261,6 +3261,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
+           "Async done-%s res %x, WWPN %8phC \n",
+           sp->name, res, fcport->port_name);
+ 
++      fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
++
+       if (res == QLA_FUNCTION_TIMEOUT)
+               return;
+ 
+@@ -4604,6 +4606,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t 
*fcport)
+ 
+ done_free_sp:
+       sp->free(sp);
++      fcport->flags &= ~FCF_ASYNC_SENT;
+ done:
+       return rval;
+ }
+diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c 
b/drivers/soc/mediatek/mtk-pmic-wrap.c
+index 4e931fdf4d09..011a40b5fb49 100644
+--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
++++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
+@@ -1104,7 +1104,7 @@ static bool pwrap_is_pmic_cipher_ready(struct 
pmic_wrapper *wrp)
+ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
+ {
+       int ret;
+-      u32 rdata;
++      u32 rdata = 0;
+ 
+       pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
+       pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
+diff --git a/drivers/soc/renesas/renesas-soc.c 
b/drivers/soc/renesas/renesas-soc.c
+index d44d0e687ab8..2a43d6e99962 100644
+--- a/drivers/soc/renesas/renesas-soc.c
++++ b/drivers/soc/renesas/renesas-soc.c
+@@ -285,6 +285,9 @@ static int __init renesas_soc_init(void)
+               /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
+               if ((product & 0x7fff) == 0x5210)
+                       product ^= 0x11;
++              /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
++              if ((product & 0x7fff) == 0x5211)
++                      product ^= 0x12;
+               if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+                       pr_warn("SoC mismatch (product = 0x%x)\n", product);
+                       return -ENODEV;
+diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
+index 96882ffde67e..3b81e1d75a97 100644
+--- a/drivers/soc/rockchip/grf.c
++++ b/drivers/soc/rockchip/grf.c
+@@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf 
__initconst = {
+ };
+ 
+ #define RK3288_GRF_SOC_CON0           0x244
++#define RK3288_GRF_SOC_CON2           0x24c
+ 
+ static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
+       { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
++      { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+ };
+ 
+ static const struct rockchip_grf_info rk3288_grf __initconst = {
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 729be74621e3..f41333817c50 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1416,12 +1416,7 @@ static const struct pci_device_id 
pxa2xx_spi_pci_compound_match[] = {
+ 
+ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
+ {
+-      struct device *dev = param;
+-
+-      if (dev != chan->device->dev->parent)
+-              return false;
+-
+-      return true;
++      return param == chan->device->dev;
+ }
+ 
+ static struct pxa2xx_spi_master *
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index a2c9bfae3d86..b139713289a4 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -171,7 +171,8 @@ static int tsens_probe(struct platform_device *pdev)
+       if (tmdev->ops->calibrate) {
+               ret = tmdev->ops->calibrate(tmdev);
+               if (ret < 0) {
+-                      dev_err(dev, "tsens calibration failed\n");
++                      if (ret != -EPROBE_DEFER)
++                              dev_err(dev, "tsens calibration failed\n");
+                       return ret;
+               }
+       }
+diff --git a/drivers/thermal/rcar_gen3_thermal.c 
b/drivers/thermal/rcar_gen3_thermal.c
+index 7aed5337bdd3..704c8ad045bb 100644
+--- a/drivers/thermal/rcar_gen3_thermal.c
++++ b/drivers/thermal/rcar_gen3_thermal.c
+@@ -328,6 +328,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
+ static int rcar_gen3_thermal_remove(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
++      struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev);
++
++      rcar_thermal_irq_set(priv, false);
+ 
+       pm_runtime_put(dev);
+       pm_runtime_disable(dev);
+diff --git a/drivers/tty/serial/8250/8250_dw.c 
b/drivers/tty/serial/8250/8250_dw.c
+index d31b975dd3fd..284e8d052fc3 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan 
*chan, void *param)
+ 
+ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
+ {
+-      return param == chan->device->dev->parent;
++      return param == chan->device->dev;
+ }
+ 
+ /*
+@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct 
dw8250_data *data)
+               data->uart_16550_compatible = true;
+       }
+ 
+-      /* Platforms with iDMA */
++      /* Platforms with iDMA 64-bit */
+       if (platform_get_resource_byname(to_platform_device(p->dev),
+                                        IORESOURCE_MEM, "lpss_priv")) {
+               data->dma.rx_param = p->dev->parent;
+diff --git a/drivers/usb/typec/fusb302/fusb302.c 
b/drivers/usb/typec/fusb302/fusb302.c
+index 82bed9810be6..62a0060d39d8 100644
+--- a/drivers/usb/typec/fusb302/fusb302.c
++++ b/drivers/usb/typec/fusb302/fusb302.c
+@@ -641,6 +641,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
+                       return ret;
+               chip->intr_togdone = false;
+       } else {
++              /* Datasheet says vconn MUST be off when toggling */
++              WARN(chip->vconn_on, "Vconn is on during toggle start");
+               /* unmask TOGDONE interrupt */
+               ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA,
+                                            FUSB_REG_MASKA_TOGDONE);
+diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
+index 64833879f75d..7a386fb30bf1 100644
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -34,6 +34,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/vfio.h>
+ #include <linux/wait.h>
++#include <linux/sched/signal.h>
+ 
+ #define DRIVER_VERSION        "0.3"
+ #define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
+@@ -904,30 +905,17 @@ void *vfio_device_data(struct vfio_device *device)
+ }
+ EXPORT_SYMBOL_GPL(vfio_device_data);
+ 
+-/* Given a referenced group, check if it contains the device */
+-static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
+-{
+-      struct vfio_device *device;
+-
+-      device = vfio_group_get_device(group, dev);
+-      if (!device)
+-              return false;
+-
+-      vfio_device_put(device);
+-      return true;
+-}
+-
+ /*
+  * Decrement the device reference count and wait for the device to be
+  * removed.  Open file descriptors for the device... */
+ void *vfio_del_group_dev(struct device *dev)
+ {
++      DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct vfio_device *device = dev_get_drvdata(dev);
+       struct vfio_group *group = device->group;
+       void *device_data = device->device_data;
+       struct vfio_unbound_dev *unbound;
+       unsigned int i = 0;
+-      long ret;
+       bool interrupted = false;
+ 
+       /*
+@@ -964,6 +952,8 @@ void *vfio_del_group_dev(struct device *dev)
+        * interval with counter to allow the driver to take escalating
+        * measures to release the device if it has the ability to do so.
+        */
++      add_wait_queue(&vfio.release_q, &wait);
++
+       do {
+               device = vfio_group_get_device(group, dev);
+               if (!device)
+@@ -975,12 +965,10 @@ void *vfio_del_group_dev(struct device *dev)
+               vfio_device_put(device);
+ 
+               if (interrupted) {
+-                      ret = wait_event_timeout(vfio.release_q,
+-                                      !vfio_dev_present(group, dev), HZ * 10);
++                      wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
+               } else {
+-                      ret = wait_event_interruptible_timeout(vfio.release_q,
+-                                      !vfio_dev_present(group, dev), HZ * 10);
+-                      if (ret == -ERESTARTSYS) {
++                      wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
++                      if (signal_pending(current)) {
+                               interrupted = true;
+                               dev_warn(dev,
+                                        "Device is currently in use, task"
+@@ -989,8 +977,10 @@ void *vfio_del_group_dev(struct device *dev)
+                                        current->comm, task_pid_nr(current));
+                       }
+               }
+-      } while (ret <= 0);
+ 
++      } while (1);
++
++      remove_wait_queue(&vfio.release_q, &wait);
+       /*
+        * In order to support multiple devices per group, devices can be
+        * plucked from the group while other devices in the group are still
+diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
+index 463028543173..59e1cae57948 100644
+--- a/drivers/video/fbdev/hgafb.c
++++ b/drivers/video/fbdev/hgafb.c
+@@ -285,6 +285,8 @@ static int hga_card_detect(void)
+       hga_vram_len  = 0x08000;
+ 
+       hga_vram = ioremap(0xb0000, hga_vram_len);
++      if (!hga_vram)
++              goto error;
+ 
+       if (request_region(0x3b0, 12, "hgafb"))
+               release_io_ports = 1;
+diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
+index ecdcf358ad5e..ffcf553719a3 100644
+--- a/drivers/video/fbdev/imsttfb.c
++++ b/drivers/video/fbdev/imsttfb.c
+@@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const 
struct pci_device_id *ent)
+       info->fix.smem_start = addr;
+       info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+                                           0x400000 : 0x800000);
++      if (!info->screen_base) {
++              release_mem_region(addr, size);
++              framebuffer_release(info);
++              return -ENOMEM;
++      }
+       info->fix.mmio_start = addr + 0x800000;
+       par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+       par->cmap_regs_phys = addr + 0x840000;
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
+index 5ea8909a41f9..b165c46aca74 100644
+--- a/drivers/watchdog/Kconfig
++++ b/drivers/watchdog/Kconfig
+@@ -1967,6 +1967,7 @@ comment "Watchdog Pretimeout Governors"
+ 
+ config WATCHDOG_PRETIMEOUT_GOV
+       bool "Enable watchdog pretimeout governors"
++      depends on WATCHDOG_CORE
+       help
+         The option allows to select watchdog pretimeout governors.
+ 
+diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
+index 2b52514eaa86..7e7bdcbbc741 100644
+--- a/drivers/watchdog/imx2_wdt.c
++++ b/drivers/watchdog/imx2_wdt.c
+@@ -178,8 +178,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device 
*wdog,
+ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
+                               unsigned int new_timeout)
+ {
+-      __imx2_wdt_set_timeout(wdog, new_timeout);
++      unsigned int actual;
+ 
++      actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000);
++      __imx2_wdt_set_timeout(wdog, actual);
+       wdog->timeout = new_timeout;
+       return 0;
+ }
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 39843fa7e11b..920d350df37b 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -1755,12 +1755,19 @@ int configfs_register_group(struct config_group 
*parent_group,
+ 
+       inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
+       ret = create_default_group(parent_group, group);
+-      if (!ret) {
+-              spin_lock(&configfs_dirent_lock);
+-              configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
+-              spin_unlock(&configfs_dirent_lock);
+-      }
++      if (ret)
++              goto err_out;
++
++      spin_lock(&configfs_dirent_lock);
++      configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
++      spin_unlock(&configfs_dirent_lock);
++      inode_unlock(d_inode(parent));
++      return 0;
++err_out:
+       inode_unlock(d_inode(parent));
++      mutex_lock(&subsys->su_mutex);
++      unlink_group(group);
++      mutex_unlock(&subsys->su_mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL(configfs_register_group);
+diff --git a/fs/dax.c b/fs/dax.c
+index 004c8ac1117c..75a289c31c7e 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -908,7 +908,7 @@ static void dax_mapping_entry_mkclean(struct address_space 
*mapping,
+                               goto unlock_pmd;
+ 
+                       flush_cache_page(vma, address, pfn);
+-                      pmd = pmdp_huge_clear_flush(vma, address, pmdp);
++                      pmd = pmdp_invalidate(vma, address, pmdp);
+                       pmd = pmd_wrprotect(pmd);
+                       pmd = pmd_mkclean(pmd);
+                       set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index a4b6eacf22ea..44ea7ac69ef4 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1744,6 +1744,7 @@ enospc:
+       return -ENOSPC;
+ }
+ 
++void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, 
...);
+ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
+                                               struct inode *inode,
+                                               block_t count)
+@@ -1752,13 +1753,21 @@ static inline void dec_valid_block_count(struct 
f2fs_sb_info *sbi,
+ 
+       spin_lock(&sbi->stat_lock);
+       f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
+-      f2fs_bug_on(sbi, inode->i_blocks < sectors);
+       sbi->total_valid_block_count -= (block_t)count;
+       if (sbi->reserved_blocks &&
+               sbi->current_reserved_blocks < sbi->reserved_blocks)
+               sbi->current_reserved_blocks = min(sbi->reserved_blocks,
+                                       sbi->current_reserved_blocks + count);
+       spin_unlock(&sbi->stat_lock);
++      if (unlikely(inode->i_blocks < sectors)) {
++              f2fs_msg(sbi->sb, KERN_WARNING,
++                      "Inconsistent i_blocks, ino:%lu, iblocks:%llu, 
sectors:%llu",
++                      inode->i_ino,
++                      (unsigned long long)inode->i_blocks,
++                      (unsigned long long)sectors);
++              set_sbi_flag(sbi, SBI_NEED_FSCK);
++              return;
++      }
+       f2fs_i_blocks_write(inode, count, false, true);
+ }
+ 
+@@ -2488,7 +2497,9 @@ static inline void *inline_xattr_addr(struct inode 
*inode, struct page *page)
+ 
+ static inline int inline_xattr_size(struct inode *inode)
+ {
+-      return get_inline_xattr_addrs(inode) * sizeof(__le32);
++      if (f2fs_has_inline_xattr(inode))
++              return get_inline_xattr_addrs(inode) * sizeof(__le32);
++      return 0;
+ }
+ 
+ static inline int f2fs_has_inline_data(struct inode *inode)
+@@ -2727,7 +2738,6 @@ static inline void f2fs_update_iostat(struct 
f2fs_sb_info *sbi,
+ 
+ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+                                       block_t blkaddr, int type);
+-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, 
...);
+ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+                                       block_t blkaddr, int type)
+ {
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index dd608b819a3c..0f31df01e36c 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -179,8 +179,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, 
struct page *page)
+ 
+       if (provided != calculated)
+               f2fs_msg(sbi->sb, KERN_WARNING,
+-                      "checksum invalid, ino = %x, %x vs. %x",
+-                      ino_of_node(page), provided, calculated);
++                      "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. 
%x",
++                      page->index, ino_of_node(page), provided, calculated);
+ 
+       return provided == calculated;
+ }
+@@ -476,6 +476,7 @@ make_now:
+       return inode;
+ 
+ bad_inode:
++      f2fs_inode_synced(inode);
+       iget_failed(inode);
+       trace_f2fs_iget_exit(inode, ret);
+       return ERR_PTR(ret);
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 19a0d83aae65..e2d9edad758c 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -1180,8 +1180,14 @@ int f2fs_remove_inode_page(struct inode *inode)
+               f2fs_put_dnode(&dn);
+               return -EIO;
+       }
+-      f2fs_bug_on(F2FS_I_SB(inode),
+-                      inode->i_blocks != 0 && inode->i_blocks != 8);
++
++      if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
++              f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
++                      "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
++                      inode->i_ino,
++                      (unsigned long long)inode->i_blocks);
++              set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
++      }
+ 
+       /* will put inode & node pages */
+       err = truncate_node(&dn);
+@@ -1276,9 +1282,10 @@ static int read_node_page(struct page *page, int 
op_flags)
+       int err;
+ 
+       if (PageUptodate(page)) {
+-#ifdef CONFIG_F2FS_CHECK_FS
+-              f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+-#endif
++              if (!f2fs_inode_chksum_verify(sbi, page)) {
++                      ClearPageUptodate(page);
++                      return -EBADMSG;
++              }
+               return LOCKED_PAGE;
+       }
+ 
+@@ -2073,6 +2080,9 @@ static bool add_free_nid(struct f2fs_sb_info *sbi,
+       if (unlikely(nid == 0))
+               return false;
+ 
++      if (unlikely(f2fs_check_nid_range(sbi, nid)))
++              return false;
++
+       i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
+       i->nid = nid;
+       i->state = FREE_NID;
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index ae0e5f2e67b4..bf5c5f4fa77e 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -485,7 +485,15 @@ retry_dn:
+               goto err;
+ 
+       f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
+-      f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
++
++      if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
++              f2fs_msg(sbi->sb, KERN_WARNING,
++                      "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
++                      inode->i_ino, ofs_of_node(dn.node_page),
++                      ofs_of_node(page));
++              err = -EFAULT;
++              goto err;
++      }
+ 
+       for (; start < end; start++, dn.ofs_in_node++) {
+               block_t src, dest;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 03fa2c4d3d79..8fc3edb6760c 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3069,13 +3069,18 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
+ {
+       int err;
+       struct f2fs_sb_info *sbi = fio->sbi;
++      unsigned int segno;
+ 
+       fio->new_blkaddr = fio->old_blkaddr;
+       /* i/o temperature is needed for passing down write hints */
+       __get_segment_type(fio);
+ 
+-      f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi,
+-                      GET_SEGNO(sbi, fio->new_blkaddr))->type));
++      segno = GET_SEGNO(sbi, fio->new_blkaddr);
++
++      if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
++              set_sbi_flag(sbi, SBI_NEED_FSCK);
++              return -EFAULT;
++      }
+ 
+       stat_inc_inplace_blocks(fio->sbi);
+ 
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index b3d9e317ff0c..5079532cb176 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -660,7 +660,6 @@ static inline void verify_block_addr(struct f2fs_io_info 
*fio, block_t blk_addr)
+ static inline int check_block_count(struct f2fs_sb_info *sbi,
+               int segno, struct f2fs_sit_entry *raw_sit)
+ {
+-#ifdef CONFIG_F2FS_CHECK_FS
+       bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
+       int valid_blocks = 0;
+       int cur_pos = 0, next_pos;
+@@ -687,7 +686,7 @@ static inline int check_block_count(struct f2fs_sb_info 
*sbi,
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               return -EINVAL;
+       }
+-#endif
++
+       /* check segment usage, and check boundary of a given segment number */
+       if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+                                       || segno > TOTAL_SEGS(sbi) - 1)) {
+diff --git a/fs/fat/file.c b/fs/fat/file.c
+index 4f3d72fb1e60..f86ea08bd6ce 100644
+--- a/fs/fat/file.c
++++ b/fs/fat/file.c
+@@ -193,12 +193,17 @@ static int fat_file_release(struct inode *inode, struct 
file *filp)
+ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+ {
+       struct inode *inode = filp->f_mapping->host;
+-      int res, err;
++      int err;
++
++      err = __generic_file_fsync(filp, start, end, datasync);
++      if (err)
++              return err;
+ 
+-      res = generic_file_fsync(filp, start, end, datasync);
+       err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
++      if (err)
++              return err;
+ 
+-      return res ? res : err;
++      return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ }
+ 
+ 
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 249de20f752a..6ee471b72a34 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1681,7 +1681,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct 
inode *inode,
+       offset = outarg->offset & ~PAGE_MASK;
+       file_size = i_size_read(inode);
+ 
+-      num = outarg->size;
++      num = min(outarg->size, fc->max_write);
+       if (outarg->offset > file_size)
+               num = 0;
+       else if (outarg->offset + num > file_size)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 418fa9c78186..db0beefe65ec 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2413,8 +2413,10 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct 
svc_fh *fhp,
+       __be32 status;
+       int err;
+       struct nfs4_acl *acl = NULL;
++#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+       void *context = NULL;
+       int contextlen;
++#endif
+       bool contextsupport = false;
+       struct nfsd4_compoundres *resp = rqstp->rq_resp;
+       u32 minorversion = resp->cstate.minorversion;
+@@ -2899,12 +2901,14 @@ out_acl:
+                       *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA);
+       }
+ 
++#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+       if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
+               status = nfsd4_encode_security_label(xdr, rqstp, context,
+                                                               contextlen);
+               if (status)
+                       goto out;
+       }
++#endif
+ 
+       attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
+       write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index a7e107309f76..db351247892d 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -120,8 +120,11 @@ void              nfsd_put_raparams(struct file *file, 
struct raparms *ra);
+ 
+ static inline int fh_want_write(struct svc_fh *fh)
+ {
+-      int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
++      int ret;
+ 
++      if (fh->fh_want_write)
++              return 0;
++      ret = mnt_want_write(fh->fh_export->ex_path.mnt);
+       if (!ret)
+               fh->fh_want_write = true;
+       return ret;
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 0c810f20f778..00338b828f76 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -11,6 +11,7 @@
+ #include <linux/mount.h>
+ #include <linux/xattr.h>
+ #include <linux/uio.h>
++#include <linux/uaccess.h>
+ #include "overlayfs.h"
+ 
+ static char ovl_whatisit(struct inode *inode, struct inode *realinode)
+@@ -29,10 +30,11 @@ static struct file *ovl_open_realfile(const struct file 
*file,
+       struct inode *inode = file_inode(file);
+       struct file *realfile;
+       const struct cred *old_cred;
++      int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
+ 
+       old_cred = ovl_override_creds(inode->i_sb);
+-      realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
+-                                     realinode, current_cred());
++      realfile = open_with_fake_path(&file->f_path, flags, realinode,
++                                     current_cred());
+       revert_creds(old_cred);
+ 
+       pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
+@@ -50,7 +52,7 @@ static int ovl_change_flags(struct file *file, unsigned int 
flags)
+       int err;
+ 
+       /* No atime modificaton on underlying */
+-      flags |= O_NOATIME;
++      flags |= O_NOATIME | FMODE_NONOTIFY;
+ 
+       /* If some flag changed that cannot be changed then something's amiss */
+       if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
+@@ -144,11 +146,47 @@ static int ovl_release(struct inode *inode, struct file 
*file)
+ 
+ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
+ {
+-      struct inode *realinode = ovl_inode_real(file_inode(file));
++      struct inode *inode = file_inode(file);
++      struct fd real;
++      const struct cred *old_cred;
++      ssize_t ret;
++
++      /*
++       * The two special cases below do not need to involve real fs,
++       * so we can optimizing concurrent callers.
++       */
++      if (offset == 0) {
++              if (whence == SEEK_CUR)
++                      return file->f_pos;
++
++              if (whence == SEEK_SET)
++                      return vfs_setpos(file, 0, 0);
++      }
++
++      ret = ovl_real_fdget(file, &real);
++      if (ret)
++              return ret;
++
++      /*
++       * Overlay file f_pos is the master copy that is preserved
++       * through copy up and modified on read/write, but only real
++       * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose
++       * limitations that are more strict than ->s_maxbytes for specific
++       * files, so we use the real file to perform seeks.
++       */
++      inode_lock(inode);
++      real.file->f_pos = file->f_pos;
++
++      old_cred = ovl_override_creds(inode->i_sb);
++      ret = vfs_llseek(real.file, offset, whence);
++      revert_creds(old_cred);
++
++      file->f_pos = real.file->f_pos;
++      inode_unlock(inode);
++
++      fdput(real);
+ 
+-      return generic_file_llseek_size(file, offset, whence,
+-                                      realinode->i_sb->s_maxbytes,
+-                                      i_size_read(realinode));
++      return ret;
+ }
+ 
+ static void ovl_file_accessed(struct file *file)
+@@ -371,10 +409,68 @@ static long ovl_real_ioctl(struct file *file, unsigned 
int cmd,
+       return ret;
+ }
+ 
+-static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++static unsigned int ovl_get_inode_flags(struct inode *inode)
++{
++      unsigned int flags = READ_ONCE(inode->i_flags);
++      unsigned int ovl_iflags = 0;
++
++      if (flags & S_SYNC)
++              ovl_iflags |= FS_SYNC_FL;
++      if (flags & S_APPEND)
++              ovl_iflags |= FS_APPEND_FL;
++      if (flags & S_IMMUTABLE)
++              ovl_iflags |= FS_IMMUTABLE_FL;
++      if (flags & S_NOATIME)
++              ovl_iflags |= FS_NOATIME_FL;
++
++      return ovl_iflags;
++}
++
++static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
+ {
+       long ret;
+       struct inode *inode = file_inode(file);
++      unsigned int flags;
++      unsigned int old_flags;
++
++      if (!inode_owner_or_capable(inode))
++              return -EACCES;
++
++      if (get_user(flags, (int __user *) arg))
++              return -EFAULT;
++
++      ret = mnt_want_write_file(file);
++      if (ret)
++              return ret;
++
++      inode_lock(inode);
++
++      /* Check the capability before cred override */
++      ret = -EPERM;
++      old_flags = ovl_get_inode_flags(inode);
++      if (((flags ^ old_flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) &&
++          !capable(CAP_LINUX_IMMUTABLE))
++              goto unlock;
++
++      ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
++      if (ret)
++              goto unlock;
++
++      ret = ovl_real_ioctl(file, FS_IOC_SETFLAGS, arg);
++
++      ovl_copyflags(ovl_inode_real(inode), inode);
++unlock:
++      inode_unlock(inode);
++
++      mnt_drop_write_file(file);
++
++      return ret;
++
++}
++
++static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++      long ret;
+ 
+       switch (cmd) {
+       case FS_IOC_GETFLAGS:
+@@ -382,23 +478,7 @@ static long ovl_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
+               break;
+ 
+       case FS_IOC_SETFLAGS:
+-              if (!inode_owner_or_capable(inode))
+-                      return -EACCES;
+-
+-              ret = mnt_want_write_file(file);
+-              if (ret)
+-                      return ret;
+-
+-              ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
+-              if (!ret) {
+-                      ret = ovl_real_ioctl(file, cmd, arg);
+-
+-                      inode_lock(inode);
+-                      ovl_copyflags(ovl_inode_real(inode), inode);
+-                      inode_unlock(inode);
+-              }
+-
+-              mnt_drop_write_file(file);
++              ret = ovl_ioctl_set_flags(file, arg);
+               break;
+ 
+       default:
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index 56518adc31dd..bd7d611d63e9 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -639,7 +639,6 @@ static inline void pwm_remove_table(struct pwm_lookup 
*table, size_t num)
+ #ifdef CONFIG_PWM_SYSFS
+ void pwmchip_sysfs_export(struct pwm_chip *chip);
+ void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
+ #else
+ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+ {
+@@ -648,10 +647,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip 
*chip)
+ static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+ {
+ }
+-
+-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+-{
+-}
+ #endif /* CONFIG_PWM_SYSFS */
+ 
+ #endif /* __LINUX_PWM_H */
+diff --git a/include/net/bluetooth/hci_core.h 
b/include/net/bluetooth/hci_core.h
+index cc2d0c3b475b..1dfb75057580 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -182,9 +182,6 @@ struct adv_info {
+ 
+ #define HCI_MAX_SHORT_NAME_LENGTH     10
+ 
+-/* Min encryption key size to match with SMP */
+-#define HCI_MIN_ENC_KEY_SIZE          7
+-
+ /* Default LE RPA expiry time, 15 minutes */
+ #define HCI_DEFAULT_RPA_TIMEOUT               (15 * 60)
+ 
+diff --git a/init/initramfs.c b/init/initramfs.c
+index f6f4a1e4cd54..cd5fb00fcb54 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -612,13 +612,12 @@ static int __init populate_rootfs(void)
+               printk(KERN_INFO "Trying to unpack rootfs image as 
initramfs...\n");
+               err = unpack_to_rootfs((char *)initrd_start,
+                       initrd_end - initrd_start);
+-              if (!err) {
+-                      free_initrd();
++              if (!err)
+                       goto done;
+-              } else {
+-                      clean_rootfs();
+-                      unpack_to_rootfs(__initramfs_start, __initramfs_size);
+-              }
++
++              clean_rootfs();
++              unpack_to_rootfs(__initramfs_start, __initramfs_size);
++
+               printk(KERN_INFO "rootfs image is not initramfs (%s)"
+                               "; looks like an initrd\n", err);
+               fd = ksys_open("/initrd.image",
+@@ -632,7 +631,6 @@ static int __init populate_rootfs(void)
+                                      written, initrd_end - initrd_start);
+ 
+                       ksys_close(fd);
+-                      free_initrd();
+               }
+       done:
+               /* empty statement */;
+@@ -642,9 +640,9 @@ static int __init populate_rootfs(void)
+                       initrd_end - initrd_start);
+               if (err)
+                       printk(KERN_EMERG "Initramfs unpacking failed: %s\n", 
err);
+-              free_initrd();
+ #endif
+       }
++      free_initrd();
+       flush_delayed_fput();
+       /*
+        * Try loading default modules from initramfs.  This gives
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index c0d58f390c3b..bce7af1546d9 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -391,7 +391,8 @@ static void mqueue_evict_inode(struct inode *inode)
+       struct user_struct *user;
+       unsigned long mq_bytes, mq_treesize;
+       struct ipc_namespace *ipc_ns;
+-      struct msg_msg *msg;
++      struct msg_msg *msg, *nmsg;
++      LIST_HEAD(tmp_msg);
+ 
+       clear_inode(inode);
+ 
+@@ -402,10 +403,15 @@ static void mqueue_evict_inode(struct inode *inode)
+       info = MQUEUE_I(inode);
+       spin_lock(&info->lock);
+       while ((msg = msg_get(info)) != NULL)
+-              free_msg(msg);
++              list_add_tail(&msg->m_list, &tmp_msg);
+       kfree(info->node_cache);
+       spin_unlock(&info->lock);
+ 
++      list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
++              list_del(&msg->m_list);
++              free_msg(msg);
++      }
++
+       /* Total amount of bytes accounted for the mqueue */
+       mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+               min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index 84598025a6ad..e65593742e2b 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -18,6 +18,7 @@
+ #include <linux/utsname.h>
+ #include <linux/proc_ns.h>
+ #include <linux/uaccess.h>
++#include <linux/sched.h>
+ 
+ #include "util.h"
+ 
+@@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len)
+       pseg = &msg->next;
+       while (len > 0) {
+               struct msg_msgseg *seg;
++
++              cond_resched();
++
+               alen = min(len, DATALEN_SEG);
+               seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
+               if (seg == NULL)
+@@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg)
+       kfree(msg);
+       while (seg != NULL) {
+               struct msg_msgseg *tmp = seg->next;
++
++              cond_resched();
+               kfree(seg);
+               seg = tmp;
+       }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index acc2305ad895..d3580a68dbef 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5743,7 +5743,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env 
*env)
+                                                                       
insn->dst_reg,
+                                                                       shift);
+                               insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, 
insn->dst_reg,
+-                                                              (1 << size * 8) 
- 1);
++                                                              (1ULL << size * 
8) - 1);
+                       }
+               }
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 123bd73046ec..096932a45046 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1919,7 +1919,7 @@ static int validate_prctl_map(struct prctl_mm_map 
*prctl_map)
+       ((unsigned long)prctl_map->__m1 __op                            \
+        (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
+       error  = __prctl_check_order(start_code, <, end_code);
+-      error |= __prctl_check_order(start_data, <, end_data);
++      error |= __prctl_check_order(start_data,<=, end_data);
+       error |= __prctl_check_order(start_brk, <=, brk);
+       error |= __prctl_check_order(arg_start, <=, arg_end);
+       error |= __prctl_check_order(env_start, <=, env_end);
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 9a85c7ae7362..f8576509c7be 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -2791,8 +2791,10 @@ static int __do_proc_doulongvec_minmax(void *data, 
struct ctl_table *table, int
+                       if (neg)
+                               continue;
+                       val = convmul * val / convdiv;
+-                      if ((min && val < *min) || (max && val > *max))
+-                              continue;
++                      if ((min && val < *min) || (max && val > *max)) {
++                              err = -EINVAL;
++                              break;
++                      }
+                       *i = val;
+               } else {
+                       val = convdiv * (*i) / convmul;
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index c5e0cba3b39c..6b23cd584295 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -698,7 +698,7 @@ static inline void process_adjtimex_modes(const struct 
timex *txc, s32 *time_tai
+               time_constant = max(time_constant, 0l);
+       }
+ 
+-      if (txc->modes & ADJ_TAI && txc->constant > 0)
++      if (txc->modes & ADJ_TAI && txc->constant >= 0)
+               *time_tai = txc->constant;
+ 
+       if (txc->modes & ADJ_OFFSET)
+diff --git a/mm/Kconfig b/mm/Kconfig
+index de64ea658716..b457e94ae618 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -700,12 +700,12 @@ config DEV_PAGEMAP_OPS
+ 
+ config HMM
+       bool
++      select MMU_NOTIFIER
+       select MIGRATE_VMA_HELPER
+ 
+ config HMM_MIRROR
+       bool "HMM mirror CPU page table into a device page table"
+       depends on ARCH_HAS_HMM
+-      select MMU_NOTIFIER
+       select HMM
+       help
+         Select HMM_MIRROR if you want to mirror range of the CPU page table 
of a
+diff --git a/mm/cma.c b/mm/cma.c
+index bfe9f5397165..476dfe13a701 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -106,8 +106,10 @@ static int __init cma_activate_area(struct cma *cma)
+ 
+       cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ 
+-      if (!cma->bitmap)
++      if (!cma->bitmap) {
++              cma->count = 0;
+               return -ENOMEM;
++      }
+ 
+       WARN_ON_ONCE(!pfn_valid(pfn));
+       zone = page_zone(pfn_to_page(pfn));
+@@ -369,23 +371,26 @@ err:
+ #ifdef CONFIG_CMA_DEBUG
+ static void cma_debug_show_areas(struct cma *cma)
+ {
+-      unsigned long next_zero_bit, next_set_bit;
++      unsigned long next_zero_bit, next_set_bit, nr_zero;
+       unsigned long start = 0;
+-      unsigned int nr_zero, nr_total = 0;
++      unsigned long nr_part, nr_total = 0;
++      unsigned long nbits = cma_bitmap_maxno(cma);
+ 
+       mutex_lock(&cma->lock);
+       pr_info("number of available pages: ");
+       for (;;) {
+-              next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, 
start);
+-              if (next_zero_bit >= cma->count)
++              next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
++              if (next_zero_bit >= nbits)
+                       break;
+-              next_set_bit = find_next_bit(cma->bitmap, cma->count, 
next_zero_bit);
++              next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
+               nr_zero = next_set_bit - next_zero_bit;
+-              pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, 
next_zero_bit);
+-              nr_total += nr_zero;
++              nr_part = nr_zero << cma->order_per_bit;
++              pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
++                      next_zero_bit);
++              nr_total += nr_part;
+               start = next_zero_bit + nr_zero;
+       }
+-      pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count);
++      pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
+       mutex_unlock(&cma->lock);
+ }
+ #else
+diff --git a/mm/cma_debug.c b/mm/cma_debug.c
+index ad6723e9d110..3e0415076cc9 100644
+--- a/mm/cma_debug.c
++++ b/mm/cma_debug.c
+@@ -58,7 +58,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
+       mutex_lock(&cma->lock);
+       for (;;) {
+               start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
+-              if (start >= cma->count)
++              if (start >= bitmap_maxno)
+                       break;
+               end = find_next_bit(cma->bitmap, bitmap_maxno, start);
+               maxchunk = max(end - start, maxchunk);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0bbb033d7d8c..65179513c2b2 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1256,12 +1256,23 @@ void free_huge_page(struct page *page)
+       ClearPagePrivate(page);
+ 
+       /*
+-       * A return code of zero implies that the subpool will be under its
+-       * minimum size if the reservation is not restored after page is free.
+-       * Therefore, force restore_reserve operation.
++       * If PagePrivate() was set on page, page allocation consumed a
++       * reservation.  If the page was associated with a subpool, there
++       * would have been a page reserved in the subpool before allocation
++       * via hugepage_subpool_get_pages().  Since we are 'restoring' the
++       * reservtion, do not call hugepage_subpool_put_pages() as this will
++       * remove the reserved page from the subpool.
+        */
+-      if (hugepage_subpool_put_pages(spool, 1) == 0)
+-              restore_reserve = true;
++      if (!restore_reserve) {
++              /*
++               * A return code of zero implies that the subpool will be
++               * under its minimum size if the reservation is not restored
++               * after page is free.  Therefore, force restore_reserve
++               * operation.
++               */
++              if (hugepage_subpool_put_pages(spool, 1) == 0)
++                      restore_reserve = true;
++      }
+ 
+       spin_lock(&hugetlb_lock);
+       clear_page_huge_active(page);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8e6932a140b8..2d04bd2e1ced 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5937,13 +5937,15 @@ static unsigned long __meminit 
zone_spanned_pages_in_node(int nid,
+                                       unsigned long *zone_end_pfn,
+                                       unsigned long *ignored)
+ {
++      unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
++      unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
+       /* When hotadd a new node from cpu_up(), the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+ 
+       /* Get the start and end of the zone */
+-      *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
+-      *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
++      *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
++      *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
+       adjust_zone_range_for_zone_movable(nid, zone_type,
+                               node_start_pfn, node_end_pfn,
+                               zone_start_pfn, zone_end_pfn);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 41e58f3d8fbf..ff76fa0b7528 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -988,7 +988,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int 
alloc_bits,
+       /*
+        * Search to find a fit.
+        */
+-      end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS;
++      end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
++                  pcpu_chunk_map_bits(chunk));
+       bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
+                                            alloc_bits, align_mask);
+       if (bit_off >= end)
+@@ -1721,6 +1722,7 @@ void free_percpu(void __percpu *ptr)
+       struct pcpu_chunk *chunk;
+       unsigned long flags;
+       int off;
++      bool need_balance = false;
+ 
+       if (!ptr)
+               return;
+@@ -1742,7 +1744,7 @@ void free_percpu(void __percpu *ptr)
+ 
+               list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+                       if (pos != chunk) {
+-                              pcpu_schedule_balance_work();
++                              need_balance = true;
+                               break;
+                       }
+       }
+@@ -1750,6 +1752,9 @@ void free_percpu(void __percpu *ptr)
+       trace_percpu_free_percpu(chunk->base_addr, off, ptr);
+ 
+       spin_unlock_irqrestore(&pcpu_lock, flags);
++
++      if (need_balance)
++              pcpu_schedule_balance_work();
+ }
+ EXPORT_SYMBOL_GPL(free_percpu);
+ 
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 85b7f9423352..f048c2651954 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -926,7 +926,7 @@ static bool page_mkclean_one(struct page *page, struct 
vm_area_struct *vma,
+                               continue;
+ 
+                       flush_cache_page(vma, address, page_to_pfn(page));
+-                      entry = pmdp_huge_clear_flush(vma, address, pmd);
++                      entry = pmdp_invalidate(vma, address, pmd);
+                       entry = pmd_wrprotect(entry);
+                       entry = pmd_mkclean(entry);
+                       set_pmd_at(vma->vm_mm, address, pmd, entry);
+diff --git a/mm/slab.c b/mm/slab.c
+index 018d32496e8d..46f21e73db2f 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -4326,8 +4326,12 @@ static int leaks_show(struct seq_file *m, void *p)
+        * whole processing.
+        */
+       do {
+-              set_store_user_clean(cachep);
+               drain_cpu_caches(cachep);
++              /*
++               * drain_cpu_caches() could make kmemleak_object and
++               * debug_objects_cache dirty, so reset afterwards.
++               */
++              set_store_user_clean(cachep);
+ 
+               x[1] = 0;
+ 
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 3cf0764d5793..bd4978ce8c45 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1276,14 +1276,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
+           !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+               return 0;
+ 
+-      /* The minimum encryption key size needs to be enforced by the
+-       * host stack before establishing any L2CAP connections. The
+-       * specification in theory allows a minimum of 1, but to align
+-       * BR/EDR and LE transports, a minimum of 7 is chosen.
+-       */
+-      if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
+-              return 0;
+-
+       return 1;
+ }
+ 
+diff --git a/net/netfilter/nf_conntrack_h323_asn1.c 
b/net/netfilter/nf_conntrack_h323_asn1.c
+index 1601275efe2d..4c2ef42e189c 100644
+--- a/net/netfilter/nf_conntrack_h323_asn1.c
++++ b/net/netfilter/nf_conntrack_h323_asn1.c
+@@ -172,7 +172,7 @@ static int nf_h323_error_boundary(struct bitstr *bs, 
size_t bytes, size_t bits)
+       if (bits % BITS_PER_BYTE > 0)
+               bytes++;
+ 
+-      if (*bs->cur + bytes > *bs->end)
++      if (bs->cur + bytes > bs->end)
+               return 1;
+ 
+       return 0;
+diff --git a/net/netfilter/nf_flow_table_core.c 
b/net/netfilter/nf_flow_table_core.c
+index e1537ace2b90..5df7486bb416 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -185,14 +185,25 @@ static const struct rhashtable_params 
nf_flow_offload_rhash_params = {
+ 
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload 
*flow)
+ {
+-      flow->timeout = (u32)jiffies;
++      int err;
+ 
+-      rhashtable_insert_fast(&flow_table->rhashtable,
+-                             &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+-                             nf_flow_offload_rhash_params);
+-      rhashtable_insert_fast(&flow_table->rhashtable,
+-                             &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+-                             nf_flow_offload_rhash_params);
++      err = rhashtable_insert_fast(&flow_table->rhashtable,
++                                   &flow->tuplehash[0].node,
++                                   nf_flow_offload_rhash_params);
++      if (err < 0)
++              return err;
++
++      err = rhashtable_insert_fast(&flow_table->rhashtable,
++                                   &flow->tuplehash[1].node,
++                                   nf_flow_offload_rhash_params);
++      if (err < 0) {
++              rhashtable_remove_fast(&flow_table->rhashtable,
++                                     &flow->tuplehash[0].node,
++                                     nf_flow_offload_rhash_params);
++              return err;
++      }
++
++      flow->timeout = (u32)jiffies;
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+diff --git a/net/netfilter/nf_flow_table_ip.c 
b/net/netfilter/nf_flow_table_ip.c
+index 15ed91309992..129e9ec99ec9 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -181,6 +181,9 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const 
struct net_device *dev,
+           iph->protocol != IPPROTO_UDP)
+               return -1;
+ 
++      if (iph->ttl <= 1)
++              return -1;
++
+       thoff = iph->ihl * 4;
+       if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+               return -1;
+@@ -412,6 +415,9 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const 
struct net_device *dev,
+           ip6h->nexthdr != IPPROTO_UDP)
+               return -1;
+ 
++      if (ip6h->hop_limit <= 1)
++              return -1;
++
+       thoff = sizeof(*ip6h);
+       if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+               return -1;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index ebfcfe1dcbdb..29ff59dd99ac 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1142,6 +1142,9 @@ static int nft_dump_stats(struct sk_buff *skb, struct 
nft_stats __percpu *stats)
+       u64 pkts, bytes;
+       int cpu;
+ 
++      if (!stats)
++              return 0;
++
+       memset(&total, 0, sizeof(total));
+       for_each_possible_cpu(cpu) {
+               cpu_stats = per_cpu_ptr(stats, cpu);
+@@ -1199,6 +1202,7 @@ static int nf_tables_fill_chain_info(struct sk_buff 
*skb, struct net *net,
+       if (nft_is_base_chain(chain)) {
+               const struct nft_base_chain *basechain = nft_base_chain(chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
++              struct nft_stats __percpu *stats;
+               struct nlattr *nest;
+ 
+               nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
+@@ -1220,8 +1224,9 @@ static int nf_tables_fill_chain_info(struct sk_buff 
*skb, struct net *net,
+               if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
+                       goto nla_put_failure;
+ 
+-              if (rcu_access_pointer(basechain->stats) &&
+-                  nft_dump_stats(skb, rcu_dereference(basechain->stats)))
++              stats = rcu_dereference_check(basechain->stats,
++                                            lockdep_commit_lock_is_held(net));
++              if (nft_dump_stats(skb, stats))
+                       goto nla_put_failure;
+       }
+ 
+diff --git a/net/netfilter/nft_flow_offload.c 
b/net/netfilter/nft_flow_offload.c
+index 436cc14cfc59..7f85af4c40ff 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -113,6 +113,7 @@ static void nft_flow_offload_eval(const struct nft_expr 
*expr,
+       if (ret < 0)
+               goto err_flow_add;
+ 
++      dst_release(route.tuple[!dir].dst);
+       return;
+ 
+ err_flow_add:
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 24d90abfc64d..da31aa8e216e 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct 
snd_seq_client *client,
+               list_del_init(list);
+       grp->exclusive = 0;
+       write_unlock_irq(&grp->list_lock);
+-      up_write(&grp->list_mutex);
+ 
+       if (!empty)
+               unsubscribe_port(client, port, grp, &subs->info, ack);
++      up_write(&grp->list_mutex);
+ }
+ 
+ /* connect two ports */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 9bc8a7cb40ea..45bf89ed31de 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1883,9 +1883,6 @@ static int azx_first_init(struct azx *chip)
+                       chip->msi = 0;
+       }
+ 
+-      if (azx_acquire_irq(chip, 0) < 0)
+-              return -EBUSY;
+-
+       pci_set_master(pci);
+       synchronize_irq(bus->irq);
+ 
+@@ -2000,6 +1997,9 @@ static int azx_first_init(struct azx *chip)
+               return -ENODEV;
+       }
+ 
++      if (azx_acquire_irq(chip, 0) < 0)
++              return -EBUSY;
++
+       strcpy(card->driver, "HDA-Intel");
+       strlcpy(card->shortname, driver_short_names[chip->driver_type],
+               sizeof(card->shortname));
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 46be34576620..02a47e365e52 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -28,6 +28,8 @@
+ #include <linux/hashtable.h>
+ #include <linux/kernel.h>
+ 
++#define FAKE_JUMP_OFFSET -1
++
+ struct alternative {
+       struct list_head list;
+       struct instruction *insn;
+@@ -501,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
+                   insn->type != INSN_JUMP_UNCONDITIONAL)
+                       continue;
+ 
+-              if (insn->ignore)
++              if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
+                       continue;
+ 
+               rela = find_rela_by_dest_range(insn->sec, insn->offset,
+@@ -670,10 +672,10 @@ static int handle_group_alt(struct objtool_file *file,
+               clear_insn_state(&fake_jump->state);
+ 
+               fake_jump->sec = special_alt->new_sec;
+-              fake_jump->offset = -1;
++              fake_jump->offset = FAKE_JUMP_OFFSET;
+               fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+               fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+-              fake_jump->ignore = true;
++              fake_jump->func = orig_insn->func;
+       }
+ 
+       if (!special_alt->new_len) {

Reply via email to