commit: bdcd3bfdc48daf8627143f390bbc6bc05265e2b6 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Sat Jun 15 15:05:32 2019 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Sat Jun 15 15:05:32 2019 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bdcd3bfd
Linux patch 4.14.126 0000_README | 4 + 1125_linux-4.14.126.patch | 2105 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2109 insertions(+) diff --git a/0000_README b/0000_README index bc18bb1..f9fbb2a 100644 --- a/0000_README +++ b/0000_README @@ -543,6 +543,10 @@ Patch: 1124_4.14.125.patch From: http://www.kernel.org Desc: Linux 4.14.125 +Patch: 1125_4.14.126.patch +From: http://www.kernel.org +Desc: Linux 4.14.126 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1125_linux-4.14.126.patch b/1125_linux-4.14.126.patch new file mode 100644 index 0000000..c192bde --- /dev/null +++ b/1125_linux-4.14.126.patch @@ -0,0 +1,2105 @@ +diff --git a/Makefile b/Makefile +index 9182c0b13988..631f8a8e28e0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 125 ++SUBLEVEL = 126 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +index ee1bb9b8b366..38538211a967 100644 +--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts ++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +@@ -109,6 +109,7 @@ + regulator-name = "PVDD_APIO_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; ++ regulator-always-on; + }; + + ldo3_reg: LDO3 { +@@ -147,6 +148,7 @@ + regulator-name = "PVDD_ABB_1V8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; ++ regulator-always-on; + }; + + ldo9_reg: LDO9 { +diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi +index 3747d80104f4..63e1d2fe2e19 100644 +--- a/arch/arm/boot/dts/imx50.dtsi ++++ b/arch/arm/boot/dts/imx50.dtsi +@@ -441,7 +441,7 @@ + reg = <0x63fb0000 0x4000>; + interrupts = <6>; + clocks = <&clks IMX5_CLK_SDMA_GATE>, +- <&clks IMX5_CLK_SDMA_GATE>; ++ <&clks IMX5_CLK_AHB>; + clock-names = "ipg", "ahb"; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin"; + }; +diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi +index 1ee1d542d9ad..29c965126817 100644 +--- a/arch/arm/boot/dts/imx51.dtsi ++++ b/arch/arm/boot/dts/imx51.dtsi +@@ -476,7 +476,7 @@ + reg = <0x83fb0000 0x4000>; + interrupts = <6>; + clocks = <&clks IMX5_CLK_SDMA_GATE>, +- <&clks IMX5_CLK_SDMA_GATE>; ++ <&clks IMX5_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin"; +diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi +index 2e516f4985e4..ddc3ce67c29a 100644 +--- a/arch/arm/boot/dts/imx53.dtsi ++++ b/arch/arm/boot/dts/imx53.dtsi +@@ -676,7 +676,7 @@ + reg = <0x63fb0000 0x4000>; + interrupts = <6>; + clocks = <&clks IMX5_CLK_SDMA_GATE>, +- <&clks IMX5_CLK_SDMA_GATE>; ++ <&clks IMX5_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin"; +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi +index 8884b4a3cafb..0fedb0c24eca 100644 +--- a/arch/arm/boot/dts/imx6qdl.dtsi ++++ b/arch/arm/boot/dts/imx6qdl.dtsi +@@ -909,7 +909,7 @@ + compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma"; + reg = <0x020ec000 0x4000>; + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX6QDL_CLK_SDMA>, ++ clocks = <&clks IMX6QDL_CLK_IPG>, + <&clks IMX6QDL_CLK_SDMA>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; +diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi +index 3f76f980947e..bd9308b222ba 100644 +--- a/arch/arm/boot/dts/imx6sl.dtsi ++++ b/arch/arm/boot/dts/imx6sl.dtsi +@@ -718,7 +718,7 @@ + reg = <0x020ec000 0x4000>; + interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6SL_CLK_SDMA>, +- <&clks IMX6SL_CLK_SDMA>; ++ <&clks IMX6SL_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + /* imx6sl reuses imx6q sdma firmware */ +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi +index d64438bfa68b..61ad4e296257 100644 +--- a/arch/arm/boot/dts/imx6sx.dtsi ++++ b/arch/arm/boot/dts/imx6sx.dtsi +@@ -766,7 +766,7 @@ + compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma"; + reg = <0x020ec000 0x4000>; + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX6SX_CLK_SDMA>, ++ clocks = <&clks IMX6SX_CLK_IPG>, + <&clks IMX6SX_CLK_SDMA>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; +diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi +index f11a241a340d..036aeba4f02c 100644 +--- a/arch/arm/boot/dts/imx6ul.dtsi ++++ b/arch/arm/boot/dts/imx6ul.dtsi +@@ -680,7 +680,7 @@ + "fsl,imx35-sdma"; + reg = <0x020ec000 0x4000>; + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX6UL_CLK_SDMA>, ++ clocks = <&clks IMX6UL_CLK_IPG>, + <&clks IMX6UL_CLK_SDMA>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; +diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi +index 82ad26e766eb..bf15efbe8a71 100644 +--- a/arch/arm/boot/dts/imx7s.dtsi ++++ b/arch/arm/boot/dts/imx7s.dtsi +@@ -997,8 +997,8 @@ + compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma"; + reg = <0x30bd0000 0x10000>; + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&clks IMX7D_SDMA_CORE_CLK>, +- <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>; ++ clocks = <&clks IMX7D_IPG_ROOT_CLK>, ++ <&clks IMX7D_SDMA_CORE_CLK>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; +diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h +index cba23eaa6072..7a88f160b1fb 100644 +--- a/arch/arm/include/asm/hardirq.h ++++ b/arch/arm/include/asm/hardirq.h +@@ -6,6 +6,7 @@ + #include <linux/threads.h> + #include <asm/irq.h> + ++/* number of IPIS _not_ including IPI_CPU_BACKTRACE */ + #define NR_IPI 7 + + typedef struct { +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index 844bb2f1ddef..dc06483c2603 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -76,6 +76,10 @@ enum ipi_msg_type { + IPI_CPU_STOP, + IPI_IRQ_WORK, + IPI_COMPLETION, ++ /* ++ * CPU_BACKTRACE is special and not included in NR_IPI ++ * or tracable with trace_ipi_* ++ */ + IPI_CPU_BACKTRACE, + /* + * SGI8-15 can be reserved by secure firmware, and thus may +@@ -801,7 +805,7 @@ core_initcall(register_cpufreq_notifier); + + static void raise_nmi(cpumask_t *mask) + { +- smp_cross_call(mask, IPI_CPU_BACKTRACE); ++ __smp_cross_call(mask, IPI_CPU_BACKTRACE); + } + + void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c +index 9be92073f847..1c17d991bcde 100644 +--- a/arch/arm/mach-exynos/suspend.c ++++ b/arch/arm/mach-exynos/suspend.c +@@ -441,8 +441,27 @@ early_wakeup: + + static void exynos5420_prepare_pm_resume(void) + { ++ unsigned int mpidr, cluster; ++ ++ mpidr = read_cpuid_mpidr(); ++ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); ++ + if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) + WARN_ON(mcpm_cpu_powered_up()); ++ ++ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { ++ /* ++ * When system is resumed on the LITTLE/KFC core (cluster 1), ++ * the DSCR is not properly updated until the power is turned ++ * on also for the cluster 0. Enable it for a while to ++ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG ++ * block and avoid undefined instruction issue on CP14 reset. ++ */ ++ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN, ++ EXYNOS_COMMON_CONFIGURATION(0)); ++ pmu_raw_writel(0, ++ EXYNOS_COMMON_CONFIGURATION(0)); ++ } + } + + static void exynos5420_pm_resume(void) +diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c +index 7f69d17de354..9b21ae892009 100644 +--- a/arch/um/kernel/time.c ++++ b/arch/um/kernel/time.c +@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt) + static struct clock_event_device timer_clockevent = { + .name = "posix-timer", + .rating = 250, +- .cpumask = cpu_all_mask, ++ .cpumask = cpu_possible_mask, + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = itimer_shutdown, +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 0b93f5519dda..d44bb077c6cf 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3051,7 +3051,7 @@ static int intel_pmu_hw_config(struct perf_event *event) + return ret; + + if (event->attr.precise_ip) { +- if (!(event->attr.freq || event->attr.wakeup_events)) { ++ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) { + event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; + if (!(event->attr.sample_type & + ~intel_pmu_free_running_flags(event))) +diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c +index 0452629148be..c77f565a04f2 100644 +--- a/arch/x86/pci/irq.c ++++ b/arch/x86/pci/irq.c +@@ -1118,6 +1118,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = { + + void __init pcibios_irq_init(void) + { ++ struct irq_routing_table *rtable = NULL; ++ + DBG(KERN_DEBUG "PCI: IRQ init\n"); + + if (raw_pci_ops == NULL) +@@ -1128,8 +1130,10 @@ void __init pcibios_irq_init(void) + pirq_table = pirq_find_routing_table(); + + #ifdef CONFIG_PCI_BIOS +- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) ++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { + pirq_table = pcibios_get_irq_routing_table(); ++ rtable = pirq_table; ++ } + #endif + if (pirq_table) { + pirq_peer_trick(); +@@ -1144,8 +1148,10 @@ void __init pcibios_irq_init(void) + * If we're using the I/O APIC, avoid using the PCI IRQ + * routing table + */ +- if (io_apic_assign_pci_irqs) ++ if (io_apic_assign_pci_irqs) { ++ kfree(rtable); + pirq_table = NULL; ++ } + } + + x86_init.pci.fixup_irqs(); +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 3b44bd28fc45..7d45ac451745 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -2282,6 +2282,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) + if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && + bfq_symmetric_scenario(bfqd)) + sl = min_t(u64, sl, BFQ_MIN_TT); ++ else if (bfqq->wr_coeff > 1) ++ sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); + + bfqd->last_idling_start = ktime_get(); + hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), +diff --git a/block/blk-core.c b/block/blk-core.c +index 0b14aebfd1a8..4e04c79aa2c2 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -339,7 +339,6 @@ void blk_sync_queue(struct request_queue *q) + struct blk_mq_hw_ctx *hctx; + int i; + +- cancel_delayed_work_sync(&q->requeue_work); + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } else { +diff --git a/block/blk-mq.c b/block/blk-mq.c +index eac444804736..55139d2fca3e 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2294,6 +2294,8 @@ void blk_mq_release(struct request_queue *q) + struct blk_mq_hw_ctx *hctx; + unsigned int i; + ++ cancel_delayed_work_sync(&q->requeue_work); ++ + /* hctx kobj stays in hctx */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx) +diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c +index 64191694ff6e..9cfdbea493bb 100644 +--- a/drivers/clk/rockchip/clk-rk3288.c ++++ b/drivers/clk/rockchip/clk-rk3288.c +@@ -835,6 +835,9 @@ static const int rk3288_saved_cru_reg_ids[] = { + RK3288_CLKSEL_CON(10), + RK3288_CLKSEL_CON(33), + RK3288_CLKSEL_CON(37), ++ ++ /* We turn aclk_dmac1 on for suspend; this will restore it */ ++ RK3288_CLKGATE_CON(10), + }; + + static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)]; +@@ -850,6 +853,14 @@ static int rk3288_clk_suspend(void) + readl_relaxed(rk3288_cru_base + reg_id); + } + ++ /* ++ * Going into deep sleep (specifically setting PMU_CLR_DMA in ++ * RK3288_PMU_PWRMODE_CON1) appears to fail unless ++ * "aclk_dmac1" is on. ++ */ ++ writel_relaxed(1 << (12 + 16), ++ rk3288_cru_base + RK3288_CLKGATE_CON(10)); ++ + /* + * Switch PLLs other than DPLL (for SDRAM) to slow mode to + * avoid crashes on resume. The Mask ROM on the system will +diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c +index 1953e57505f4..f17a4c7a1781 100644 +--- a/drivers/dma/idma64.c ++++ b/drivers/dma/idma64.c +@@ -589,7 +589,7 @@ static int idma64_probe(struct idma64_chip *chip) + idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + +- idma64->dma.dev = chip->dev; ++ idma64->dma.dev = chip->sysdev; + + dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); + +@@ -629,6 +629,7 @@ static int idma64_platform_probe(struct platform_device *pdev) + { + struct idma64_chip *chip; + struct device *dev = &pdev->dev; ++ struct device *sysdev = dev->parent; + struct resource *mem; + int ret; + +@@ -645,11 +646,12 @@ static int idma64_platform_probe(struct platform_device *pdev) + if (IS_ERR(chip->regs)) + return PTR_ERR(chip->regs); + +- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + chip->dev = dev; ++ chip->sysdev = sysdev; + + ret = idma64_probe(chip); + if (ret) +diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h +index 6b816878e5e7..baa32e1425de 100644 +--- a/drivers/dma/idma64.h ++++ b/drivers/dma/idma64.h +@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) + /** + * struct idma64_chip - representation of iDMA 64-bit controller hardware + * @dev: struct device of the DMA controller ++ * @sysdev: struct device of the physical device that does DMA + * @irq: irq line + * @regs: memory mapped I/O space + * @idma64: struct idma64 that is filed by idma64_probe() + */ + struct idma64_chip { + struct device *dev; ++ struct device *sysdev; + int irq; + void __iomem *regs; + struct idma64 *idma64; +diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig +index 96afb2aeed18..aaaa8ce8d3fd 100644 +--- a/drivers/edac/Kconfig ++++ b/drivers/edac/Kconfig +@@ -246,8 +246,8 @@ config EDAC_PND2 + micro-server but may appear on others in the future. + + config EDAC_MPC85XX +- tristate "Freescale MPC83xx / MPC85xx" +- depends on FSL_SOC ++ bool "Freescale MPC83xx / MPC85xx" ++ depends on FSL_SOC && EDAC=y + help + Support for error detection and correction on the Freescale + MPC8349, MPC8560, MPC8540, MPC8548, T4240 +diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c +index 148e81eea35a..83c6843db50c 100644 +--- a/drivers/gpio/gpio-omap.c ++++ b/drivers/gpio/gpio-omap.c +@@ -296,6 +296,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) + } + } + ++/* ++ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. ++ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs ++ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none ++ * are capable waking up the system from off mode. ++ */ ++static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) ++{ ++ u32 no_wake = bank->non_wakeup_gpios; ++ ++ if (no_wake) ++ return !!(~no_wake & gpio_mask); ++ ++ return false; ++} ++ + static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + unsigned trigger) + { +@@ -327,13 +343,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + } + + /* This part needs to be executed always for OMAP{34xx, 44xx} */ +- if (!bank->regs->irqctrl) { +- /* On omap24xx proceed only when valid GPIO bit is set */ +- if (bank->non_wakeup_gpios) { +- if (!(bank->non_wakeup_gpios & gpio_bit)) +- goto exit; +- } +- ++ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { + /* + * Log the edge gpio and manually trigger the IRQ + * after resume if the input level changes +@@ -346,7 +356,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, + bank->enabled_non_wakeup_gpios &= ~gpio_bit; + } + +-exit: + bank->level_mask = + readl_relaxed(bank->base + bank->regs->leveldetect0) | + readl_relaxed(bank->base + bank->regs->leveldetect1); +diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c +index 1309b444720e..3210fba16a9b 100644 +--- a/drivers/gpio/gpio-vf610.c ++++ b/drivers/gpio/gpio-vf610.c +@@ -37,6 +37,7 @@ struct fsl_gpio_soc_data { + + struct vf610_gpio_port { + struct gpio_chip gc; ++ struct irq_chip ic; + void __iomem *base; + void __iomem *gpio_base; + const struct fsl_gpio_soc_data *sdata; +@@ -66,8 +67,6 @@ struct vf610_gpio_port { + #define PORT_INT_EITHER_EDGE 0xb + #define PORT_INT_LOGIC_ONE 0xc + +-static struct irq_chip vf610_gpio_irq_chip; +- + static const struct fsl_gpio_soc_data imx_data = { + .have_paddr = true, + }; +@@ -243,15 +242,6 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable) + return 0; + } + +-static struct irq_chip vf610_gpio_irq_chip = { +- .name = "gpio-vf610", +- .irq_ack = vf610_gpio_irq_ack, +- .irq_mask = vf610_gpio_irq_mask, +- .irq_unmask = vf610_gpio_irq_unmask, +- .irq_set_type = vf610_gpio_irq_set_type, +- .irq_set_wake = vf610_gpio_irq_set_wake, +-}; +- + static int vf610_gpio_probe(struct platform_device *pdev) + { + const struct of_device_id *of_id = of_match_device(vf610_gpio_dt_ids, +@@ -261,6 +251,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) + struct vf610_gpio_port *port; + struct resource *iores; + struct gpio_chip *gc; ++ struct irq_chip *ic; + int i; + int ret; + +@@ -297,6 +288,14 @@ static int vf610_gpio_probe(struct platform_device *pdev) + gc->direction_output = vf610_gpio_direction_output; + gc->set = vf610_gpio_set; + ++ ic = &port->ic; ++ ic->name = "gpio-vf610"; ++ ic->irq_ack = vf610_gpio_irq_ack; ++ ic->irq_mask = vf610_gpio_irq_mask; ++ ic->irq_unmask = vf610_gpio_irq_unmask; ++ ic->irq_set_type = vf610_gpio_irq_set_type; ++ ic->irq_set_wake = vf610_gpio_irq_set_wake; ++ + ret = gpiochip_add_data(gc, port); + if (ret < 0) + return ret; +@@ -308,14 +307,13 @@ static int vf610_gpio_probe(struct platform_device *pdev) + /* Clear the interrupt status register for all GPIO's */ + vf610_gpio_writel(~0, port->base + PORT_ISFR); + +- ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0, +- handle_edge_irq, IRQ_TYPE_NONE); ++ ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "failed to add irqchip\n"); + gpiochip_remove(gc); + return ret; + } +- gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq, ++ gpiochip_set_chained_irqchip(gc, ic, port->irq, + vf610_gpio_irq_handler); + + return 0; +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +index f5091827628a..3c94d838863e 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +@@ -735,11 +735,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511, + vsync_polarity = 1; + } + +- if (mode->vrefresh <= 24000) ++ if (drm_mode_vrefresh(mode) <= 24) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; +- else if (mode->vrefresh <= 25000) ++ else if (drm_mode_vrefresh(mode) <= 25) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; +- else if (mode->vrefresh <= 30000) ++ else if (drm_mode_vrefresh(mode) <= 30) + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; + else + low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index d05ed0521e20..331478bd2ff8 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -1462,6 +1462,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check); + * drm_atomic_async_check() succeeds. Async commits are not supposed to swap + * the states like normal sync commits, but just do in-place changes on the + * current state. ++ * ++ * TODO: Implement full swap instead of doing in-place changes. + */ + void drm_atomic_helper_async_commit(struct drm_device *dev, + struct drm_atomic_state *state) +@@ -1472,8 +1474,16 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, + int i; + + for_each_new_plane_in_state(state, plane, plane_state, i) { ++ struct drm_framebuffer *old_fb = plane->state->fb; ++ + funcs = plane->helper_private; + funcs->atomic_async_update(plane, plane_state); ++ ++ /* ++ * Make sure the FBs have been swapped so that cleanups in the ++ * new_state performs a cleanup in the old FB. ++ */ ++ WARN_ON_ONCE(plane_state->fb != old_fb); + } + } + EXPORT_SYMBOL(drm_atomic_helper_async_commit); +diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig +index a0c0630a0857..c02a13406a81 100644 +--- a/drivers/gpu/drm/nouveau/Kconfig ++++ b/drivers/gpu/drm/nouveau/Kconfig +@@ -16,20 +16,9 @@ config DRM_NOUVEAU + select INPUT if ACPI && X86 + select THERMAL if ACPI && X86 + select ACPI_VIDEO if ACPI && X86 +- help +- Choose this option for open-source NVIDIA support. +- +-config NOUVEAU_LEGACY_CTX_SUPPORT +- bool "Nouveau legacy context support" +- depends on DRM_NOUVEAU + select DRM_VM +- default y + help +- There was a version of the nouveau DDX that relied on legacy +- ctx ioctls not erroring out. But that was back in time a long +- ways, so offer a way to disable it now. For uapi compat with +- old nouveau ddx this should be on by default, but modern distros +- should consider turning it off. ++ Choose this option for open-source NVIDIA support. + + config NOUVEAU_PLATFORM_DRIVER + bool "Nouveau (NVIDIA) SoC GPUs" +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index 70a8d0b0c4f1..7d3c7bb0ebfa 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -967,11 +967,8 @@ nouveau_driver_fops = { + static struct drm_driver + driver_stub = { + .driver_features = +- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER +-#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT) +- | DRIVER_KMS_LEGACY_CONTEXT +-#endif +- , ++ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | ++ DRIVER_KMS_LEGACY_CONTEXT, + + .load = nouveau_drm_load, + .unload = nouveau_drm_unload, +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +index 6160a6158cf2..5e51a5c1eb01 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +@@ -364,8 +364,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) + * and it's better to have a failed modeset than that. + */ + for (cfg = nvkm_dp_rates; cfg->rate; cfg++) { +- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) +- failsafe = cfg; ++ if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) { ++ /* Try to respect sink limits too when selecting ++ * lowest link configuration. ++ */ ++ if (!failsafe || ++ (cfg->nr <= sink_nr && cfg->bw <= sink_bw)) ++ failsafe = cfg; ++ } ++ + if (failsafe && cfg[1].rate < dataKBps) + break; + } +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index fe935293fa7b..baa4c58e2736 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -4019,9 +4019,7 @@ static void __init init_no_remapping_devices(void) + + /* This IOMMU has *only* gfx devices. Either bypass it or + set the gfx_mapped flag, as appropriate */ +- if (dmar_map_gfx) { +- intel_iommu_gfx_mapped = 1; +- } else { ++ if (!dmar_map_gfx) { + drhd->ignored = 1; + for_each_active_dev_scope(drhd->devices, + drhd->devices_cnt, i, dev) +@@ -4807,6 +4805,9 @@ int __init intel_iommu_init(void) + goto out_free_reserved_range; + } + ++ if (dmar_map_gfx) ++ intel_iommu_gfx_mapped = 1; ++ + init_no_remapping_devices(); + + ret = init_dmars(); +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index 40e8d9b59d07..b5c4f8f974aa 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) + { + u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; + ++ /* Set the device in reset state */ ++ writel(0, lpss->priv + LPSS_PRIV_RESETS); ++ + intel_lpss_deassert_reset(lpss); + + intel_lpss_set_remap_addr(lpss); +diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c +index 3bd75061f777..f78be039e463 100644 +--- a/drivers/mfd/tps65912-spi.c ++++ b/drivers/mfd/tps65912-spi.c +@@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = { + { .compatible = "ti,tps65912", }, + { /* sentinel */ } + }; ++MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table); + + static int tps65912_spi_probe(struct spi_device *spi) + { +diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c +index dd19f17a1b63..2b8c479dbfa6 100644 +--- a/drivers/mfd/twl6040.c ++++ b/drivers/mfd/twl6040.c +@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on) + } + } + ++ /* ++ * Register access can produce errors after power-up unless we ++ * wait at least 8ms based on measurements on duovero. ++ */ ++ usleep_range(10000, 12000); ++ + /* Sync with the HW */ +- regcache_sync(twl6040->regmap); ++ ret = regcache_sync(twl6040->regmap); ++ if (ret) { ++ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n", ++ ret); ++ goto out; ++ } + + /* Default PLL configuration after power up */ + twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; +diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c +index e089bb6dde3a..9849bf183299 100644 +--- a/drivers/misc/pci_endpoint_test.c ++++ b/drivers/misc/pci_endpoint_test.c +@@ -479,6 +479,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, + data = (struct pci_endpoint_test_data *)ent->driver_data; + if (data) { + test_reg_bar = data->test_reg_bar; ++ test->test_reg_bar = test_reg_bar; + test->alignment = data->alignment; + no_msi = data->no_msi; + } +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c +index f1f54a818489..77f18729ee96 100644 +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -1320,9 +1320,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) + } + + /* +- * Don't poll for busy completion in irq context. ++ * Busy detection has been handled by mmci_cmd_irq() above. ++ * Clear the status bit to prevent polling in IRQ context. + */ +- if (host->variant->busy_detect && host->busy_status) ++ if (host->variant->busy_detect_flag) + status &= ~host->variant->busy_detect_flag; + + ret = 1; +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 06355ca832db..cd11cced3678 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2105,8 +2105,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) + * must flush all entered requests to their failed completion to avoid + * deadlocking blk-mq hot-cpu notifier. + */ +- if (shutdown) ++ if (shutdown) { + nvme_start_queues(&dev->ctrl); ++ if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) ++ blk_mq_unquiesce_queue(dev->ctrl.admin_q); ++ } + mutex_unlock(&dev->shutdown_lock); + } + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 635886e4835c..dba3f4d0a63d 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -954,7 +954,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + void *buf) + { + u8 *p, *b; +- int i, bit_offset = cell->bit_offset; ++ int i, extra, bit_offset = cell->bit_offset; + + p = b = buf; + if (bit_offset) { +@@ -969,11 +969,16 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, + p = b; + *b++ >>= bit_offset; + } +- +- /* result fits in less bytes */ +- if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) +- *p-- = 0; ++ } else { ++ /* point to the msb */ ++ p += cell->bytes - 1; + } ++ ++ /* result fits in less bytes */ ++ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); ++ while (--extra >= 0) ++ *p-- = 0; ++ + /* clear msb bits if any leftover in the last byte */ + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); + } +diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c +index 39405598b22d..9bc52e4cf52a 100644 +--- a/drivers/pci/dwc/pci-keystone.c ++++ b/drivers/pci/dwc/pci-keystone.c +@@ -240,6 +240,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) + ks_dw_pcie_enable_error_irq(ks_pcie); + } + ++#ifdef CONFIG_ARM + /* + * When a PCI device does not exist during config cycles, keystone host gets a + * bus error instead of returning 0xffffffff. This handler always returns 0 +@@ -259,6 +260,7 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, + + return 0; + } ++#endif + + static int __init ks_pcie_host_init(struct pcie_port *pp) + { +@@ -282,12 +284,14 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) + val |= BIT(12); + writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + ++#ifdef CONFIG_ARM + /* + * PCIe access errors that result into OCP errors are caught by ARM as + * "External aborts" + */ + hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, + "Asynchronous external abort"); ++#endif + + return 0; + } +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c +index 41edce16a07c..2b0a1f3b8265 100644 +--- a/drivers/pci/host/pcie-rcar.c ++++ b/drivers/pci/host/pcie-rcar.c +@@ -849,7 +849,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) + { + struct device *dev = pcie->dev; + struct rcar_msi *msi = &pcie->msi; +- unsigned long base; ++ phys_addr_t base; + int err, i; + + mutex_init(&msi->lock); +@@ -888,10 +888,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) + + /* setup MSI data target */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); ++ if (!msi->pages) { ++ err = -ENOMEM; ++ goto err; ++ } + base = virt_to_phys((void *)msi->pages); + +- rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); +- rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); ++ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); ++ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); + + /* enable all MSI interrupts */ + rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); +diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c +index 29f024f0ed7f..a8a44afa81ec 100644 +--- a/drivers/pci/host/pcie-xilinx.c ++++ b/drivers/pci/host/pcie-xilinx.c +@@ -338,14 +338,19 @@ static const struct irq_domain_ops msi_domain_ops = { + * xilinx_pcie_enable_msi - Enable MSI support + * @port: PCIe port information + */ +-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) ++static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) + { + phys_addr_t msg_addr; + + port->msi_pages = __get_free_pages(GFP_KERNEL, 0); ++ if (!port->msi_pages) ++ return -ENOMEM; ++ + msg_addr = virt_to_phys((void *)port->msi_pages); + pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); + pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); ++ ++ return 0; + } + + /* INTx Functions */ +@@ -500,6 +505,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; ++ int ret; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); +@@ -528,7 +534,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) + return -ENODEV; + } + +- xilinx_pcie_enable_msi(port); ++ ret = xilinx_pcie_enable_msi(port); ++ if (ret) ++ return ret; + } + + return 0; +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c +index a3449d717a99..f8f26684c67d 100644 +--- a/drivers/pci/hotplug/rpadlpar_core.c ++++ b/drivers/pci/hotplug/rpadlpar_core.c +@@ -55,6 +55,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) + if ((rc == 0) && (!strcmp(drc_name, name))) + break; + } ++ of_node_put(parent); + + return dn; + } +@@ -78,6 +79,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, + return np; + } + ++/* Returns a device_node with its reference count incremented */ + static struct device_node *find_dlpar_node(char *drc_name, int *node_type) + { + struct device_node *dn; +@@ -313,6 +315,7 @@ int dlpar_add_slot(char *drc_name) + rc = dlpar_add_phb(drc_name, dn); + break; + } ++ of_node_put(dn); + + printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); + exit: +@@ -446,6 +449,7 @@ int dlpar_remove_slot(char *drc_name) + rc = dlpar_remove_pci_slot(drc_name, dn); + break; + } ++ of_node_put(dn); + vm_unmap_aliases(); + + printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c +index 2ac4a7178470..dbad14716da4 100644 +--- a/drivers/platform/chrome/cros_ec_proto.c ++++ b/drivers/platform/chrome/cros_ec_proto.c +@@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev, + else + xfer_fxn = ec_dev->cmd_xfer; + ++ if (!xfer_fxn) { ++ /* ++ * This error can happen if a communication error happened and ++ * the EC is trying to use protocol v2, on an underlying ++ * communication mechanism that does not support v2. ++ */ ++ dev_err_once(ec_dev->dev, ++ "missing EC transfer API, cannot send command\n"); ++ return -EIO; ++ } ++ + ret = (*xfer_fxn)(ec_dev, msg); + if (msg->result == EC_RES_IN_PROGRESS) { + int i; +diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c +index e03fa31446ca..c9b3ef333743 100644 +--- a/drivers/platform/x86/intel_pmc_ipc.c ++++ b/drivers/platform/x86/intel_pmc_ipc.c +@@ -747,13 +747,17 @@ static int ipc_create_pmc_devices(void) + if (ret) { + dev_err(ipcdev.dev, "Failed to add punit platform device\n"); + platform_device_unregister(ipcdev.tco_dev); ++ return ret; + } + + if (!ipcdev.telem_res_inval) { + ret = ipc_create_telemetry_device(); +- if (ret) ++ if (ret) { + dev_warn(ipcdev.dev, + "Failed to add telemetry platform device\n"); ++ platform_device_unregister(ipcdev.punit_dev); ++ platform_device_unregister(ipcdev.tco_dev); ++ } + } + + return ret; +diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c +index b91b1d2999dc..d19307f791c6 100644 +--- a/drivers/power/supply/max14656_charger_detector.c ++++ b/drivers/power/supply/max14656_charger_detector.c +@@ -280,6 +280,13 @@ static int max14656_probe(struct i2c_client *client, + + INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker); + ++ chip->detect_psy = devm_power_supply_register(dev, ++ &chip->psy_desc, &psy_cfg); ++ if (IS_ERR(chip->detect_psy)) { ++ dev_err(dev, "power_supply_register failed\n"); ++ return -EINVAL; ++ } ++ + ret = devm_request_irq(dev, chip->irq, max14656_irq, + IRQF_TRIGGER_FALLING, + MAX14656_NAME, chip); +@@ -289,13 +296,6 @@ static int max14656_probe(struct i2c_client *client, + } + enable_irq_wake(chip->irq); + +- chip->detect_psy = devm_power_supply_register(dev, +- &chip->psy_desc, &psy_cfg); +- if (IS_ERR(chip->detect_psy)) { +- dev_err(dev, "power_supply_register failed\n"); +- return -EINVAL; +- } +- + schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000)); + + return 0; +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c +index 1581f6ab1b1f..c45e5719ba17 100644 +--- a/drivers/pwm/core.c ++++ b/drivers/pwm/core.c +@@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip, + if (IS_ENABLED(CONFIG_OF)) + of_pwmchip_add(chip); + +- pwmchip_sysfs_export(chip); +- + out: + mutex_unlock(&pwm_lock); ++ ++ if (!ret) ++ pwmchip_sysfs_export(chip); ++ + return ret; + } + EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); +@@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip) + unsigned int i; + int ret = 0; + +- pwmchip_sysfs_unexport_children(chip); ++ pwmchip_sysfs_unexport(chip); + + mutex_lock(&pwm_lock); + +@@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip) + + free_pwms(chip); + +- pwmchip_sysfs_unexport(chip); +- + out: + mutex_unlock(&pwm_lock); + return ret; +diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c +index 3540d00425d0..9b79cbc7a715 100644 +--- a/drivers/pwm/pwm-meson.c ++++ b/drivers/pwm/pwm-meson.c +@@ -111,6 +111,10 @@ struct meson_pwm { + const struct meson_pwm_data *data; + void __iomem *base; + u8 inverter_mask; ++ /* ++ * Protects register (write) access to the REG_MISC_AB register ++ * that is shared between the two PWMs. ++ */ + spinlock_t lock; + }; + +@@ -235,6 +239,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, + { + u32 value, clk_shift, clk_enable, enable; + unsigned int offset; ++ unsigned long flags; + + switch (id) { + case 0: +@@ -255,6 +260,8 @@ static void meson_pwm_enable(struct meson_pwm *meson, + return; + } + ++ spin_lock_irqsave(&meson->lock, flags); ++ + value = readl(meson->base + REG_MISC_AB); + value &= ~(MISC_CLK_DIV_MASK << clk_shift); + value |= channel->pre_div << clk_shift; +@@ -267,11 +274,14 @@ static void meson_pwm_enable(struct meson_pwm *meson, + value = readl(meson->base + REG_MISC_AB); + value |= enable; + writel(value, meson->base + REG_MISC_AB); ++ ++ spin_unlock_irqrestore(&meson->lock, flags); + } + + static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) + { + u32 value, enable; ++ unsigned long flags; + + switch (id) { + case 0: +@@ -286,9 +296,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id) + return; + } + ++ spin_lock_irqsave(&meson->lock, flags); ++ + value = readl(meson->base + REG_MISC_AB); + value &= ~enable; + writel(value, meson->base + REG_MISC_AB); ++ ++ spin_unlock_irqrestore(&meson->lock, flags); + } + + static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, +@@ -296,19 +310,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + { + struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct meson_pwm *meson = to_meson_pwm(chip); +- unsigned long flags; + int err = 0; + + if (!state) + return -EINVAL; + +- spin_lock_irqsave(&meson->lock, flags); +- + if (!state->enabled) { + meson_pwm_disable(meson, pwm->hwpwm); + channel->state.enabled = false; + +- goto unlock; ++ return 0; + } + + if (state->period != channel->state.period || +@@ -329,7 +340,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + err = meson_pwm_calc(meson, channel, pwm->hwpwm, + state->duty_cycle, state->period); + if (err < 0) +- goto unlock; ++ return err; + + channel->state.polarity = state->polarity; + channel->state.period = state->period; +@@ -341,9 +352,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + channel->state.enabled = true; + } + +-unlock: +- spin_unlock_irqrestore(&meson->lock, flags); +- return err; ++ return 0; + } + + static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c +index f7b8a86fa5c5..ad4a40c0f27c 100644 +--- a/drivers/pwm/pwm-tiehrpwm.c ++++ b/drivers/pwm/pwm-tiehrpwm.c +@@ -382,6 +382,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) + } + + /* Update shadow register first before modifying active register */ ++ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, ++ AQSFRC_RLDCSF_ZRO); + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); + /* + * Changes to immediate action on Action Qualifier. This puts +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c +index a813239300c3..0850b11dfd83 100644 +--- a/drivers/pwm/sysfs.c ++++ b/drivers/pwm/sysfs.c +@@ -397,19 +397,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip) + } + + void pwmchip_sysfs_unexport(struct pwm_chip *chip) +-{ +- struct device *parent; +- +- parent = class_find_device(&pwm_class, NULL, chip, +- pwmchip_sysfs_match); +- if (parent) { +- /* for class_find_device() */ +- put_device(parent); +- device_unregister(parent); +- } +-} +- +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) + { + struct device *parent; + unsigned int i; +@@ -427,6 +414,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) + } + + put_device(parent); ++ device_unregister(parent); + } + + static int __init pwm_sysfs_init(void) +diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c +index bad0e0ea4f30..ef989a15aefc 100644 +--- a/drivers/rapidio/rio_cm.c ++++ b/drivers/rapidio/rio_cm.c +@@ -2145,6 +2145,14 @@ static int riocm_add_mport(struct device *dev, + mutex_init(&cm->rx_lock); + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); + cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); ++ if (!cm->rx_wq) { ++ riocm_error("failed to allocate IBMBOX_%d on %s", ++ cmbox, mport->name); ++ rio_release_outb_mbox(mport, cmbox); ++ kfree(cm); ++ return -ENOMEM; ++ } ++ + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); + + cm->tx_slot = 0; +diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c +index e3df1e96b141..d7a31bf3c9d6 100644 +--- a/drivers/soc/mediatek/mtk-pmic-wrap.c ++++ b/drivers/soc/mediatek/mtk-pmic-wrap.c +@@ -778,7 +778,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) + static int pwrap_init_cipher(struct pmic_wrapper *wrp) + { + int ret; +- u32 rdata; ++ u32 rdata = 0; + + pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); + pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST); +diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c +index 15e71fd6c513..0931ddb0b384 100644 +--- a/drivers/soc/rockchip/grf.c ++++ b/drivers/soc/rockchip/grf.c +@@ -44,9 +44,11 @@ static const struct rockchip_grf_info rk3036_grf __initconst = { + }; + + #define RK3288_GRF_SOC_CON0 0x244 ++#define RK3288_GRF_SOC_CON2 0x24c + + static const struct rockchip_grf_value rk3288_defaults[] __initconst = { + { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) }, ++ { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) }, + }; + + static const struct rockchip_grf_info rk3288_grf __initconst = { +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index efdae686a761..9bf3e5f945c7 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1505,12 +1505,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { + + static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) + { +- struct device *dev = param; +- +- if (dev != chan->device->dev->parent) +- return false; +- +- return true; ++ return param == chan->device->dev; + } + + static struct pxa2xx_spi_master * +diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c +index 7d9f25db1add..794dbc34973f 100644 +--- a/drivers/staging/typec/fusb302/fusb302.c ++++ b/drivers/staging/typec/fusb302/fusb302.c +@@ -671,6 +671,8 @@ static int fusb302_set_toggling(struct fusb302_chip *chip, + return ret; + chip->intr_togdone = false; + } else { ++ /* Datasheet says vconn MUST be off when toggling */ ++ WARN(chip->vconn_on, "Vconn is on during toggle start"); + /* unmask TOGDONE interrupt */ + ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, + FUSB_REG_MASKA_TOGDONE); +diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c +index 3f9fe6aa51cc..ebbe1ec7b9e8 100644 +--- a/drivers/thermal/qcom/tsens.c ++++ b/drivers/thermal/qcom/tsens.c +@@ -162,7 +162,8 @@ static int tsens_probe(struct platform_device *pdev) + if (tmdev->ops->calibrate) { + ret = tmdev->ops->calibrate(tmdev); + if (ret < 0) { +- dev_err(dev, "tsens calibration failed\n"); ++ if (ret != -EPROBE_DEFER) ++ dev_err(dev, "tsens calibration failed\n"); + return ret; + } + } +diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c +index 203aca44a2bb..0afdda2db3a0 100644 +--- a/drivers/thermal/rcar_gen3_thermal.c ++++ b/drivers/thermal/rcar_gen3_thermal.c +@@ -342,6 +342,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids); + static int rcar_gen3_thermal_remove(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; ++ struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev); ++ ++ rcar_thermal_irq_set(priv, false); + + pm_runtime_put(dev); + pm_runtime_disable(dev); +diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c +index 27c5b2b46b8d..4ca46aa64699 100644 +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -315,7 +315,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) + + static bool dw8250_idma_filter(struct dma_chan *chan, void *param) + { +- return param == chan->device->dev->parent; ++ return param == chan->device->dev; + } + + static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) +@@ -356,7 +356,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) + } + } + +- /* Platforms with iDMA */ ++ /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->dma.rx_param = p->dev->parent; +diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c +index f5a86f651f38..0d73d913c18b 100644 +--- a/drivers/vfio/vfio.c ++++ b/drivers/vfio/vfio.c +@@ -34,6 +34,7 @@ + #include <linux/uaccess.h> + #include <linux/vfio.h> + #include <linux/wait.h> ++#include <linux/sched/signal.h> + + #define DRIVER_VERSION "0.3" + #define DRIVER_AUTHOR "Alex Williamson <[email protected]>" +@@ -909,30 +910,17 @@ void *vfio_device_data(struct vfio_device *device) + } + EXPORT_SYMBOL_GPL(vfio_device_data); + +-/* Given a referenced group, check if it contains the device */ +-static bool vfio_dev_present(struct vfio_group *group, struct device *dev) +-{ +- struct vfio_device *device; +- +- device = vfio_group_get_device(group, dev); +- if (!device) +- return false; +- +- vfio_device_put(device); +- return true; +-} +- + /* + * Decrement the device reference count and wait for the device to be + * removed. Open file descriptors for the device... */ + void *vfio_del_group_dev(struct device *dev) + { ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct vfio_device *device = dev_get_drvdata(dev); + struct vfio_group *group = device->group; + void *device_data = device->device_data; + struct vfio_unbound_dev *unbound; + unsigned int i = 0; +- long ret; + bool interrupted = false; + + /* +@@ -969,6 +957,8 @@ void *vfio_del_group_dev(struct device *dev) + * interval with counter to allow the driver to take escalating + * measures to release the device if it has the ability to do so. + */ ++ add_wait_queue(&vfio.release_q, &wait); ++ + do { + device = vfio_group_get_device(group, dev); + if (!device) +@@ -980,12 +970,10 @@ void *vfio_del_group_dev(struct device *dev) + vfio_device_put(device); + + if (interrupted) { +- ret = wait_event_timeout(vfio.release_q, +- !vfio_dev_present(group, dev), HZ * 10); ++ wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10); + } else { +- ret = wait_event_interruptible_timeout(vfio.release_q, +- !vfio_dev_present(group, dev), HZ * 10); +- if (ret == -ERESTARTSYS) { ++ wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10); ++ if (signal_pending(current)) { + interrupted = true; + dev_warn(dev, + "Device is currently in use, task" +@@ -994,8 +982,10 @@ void *vfio_del_group_dev(struct device *dev) + current->comm, task_pid_nr(current)); + } + } +- } while (ret <= 0); + ++ } while (1); ++ ++ remove_wait_queue(&vfio.release_q, &wait); + /* + * In order to support multiple devices per group, devices can be + * plucked from the group while other devices in the group are still +diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c +index 463028543173..59e1cae57948 100644 +--- a/drivers/video/fbdev/hgafb.c ++++ b/drivers/video/fbdev/hgafb.c +@@ -285,6 +285,8 @@ static int hga_card_detect(void) + hga_vram_len = 0x08000; + + hga_vram = ioremap(0xb0000, hga_vram_len); ++ if (!hga_vram) ++ goto error; + + if (request_region(0x3b0, 12, "hgafb")) + release_io_ports = 1; +diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c +index ecdcf358ad5e..ffcf553719a3 100644 +--- a/drivers/video/fbdev/imsttfb.c ++++ b/drivers/video/fbdev/imsttfb.c +@@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + info->fix.smem_start = addr; + info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? + 0x400000 : 0x800000); ++ if (!info->screen_base) { ++ release_mem_region(addr, size); ++ framebuffer_release(info); ++ return -ENOMEM; ++ } + info->fix.mmio_start = addr + 0x800000; + par->dc_regs = ioremap(addr + 0x800000, 0x1000); + par->cmap_regs_phys = addr + 0x840000; +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig +index 3ece1335ba84..f55328a31629 100644 +--- a/drivers/watchdog/Kconfig ++++ b/drivers/watchdog/Kconfig +@@ -1941,6 +1941,7 @@ comment "Watchdog Pretimeout Governors" + + config WATCHDOG_PRETIMEOUT_GOV + bool "Enable watchdog pretimeout governors" ++ depends on WATCHDOG_CORE + help + The option allows to select watchdog pretimeout governors. + +diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c +index 518dfa1047cb..5098982e1a58 100644 +--- a/drivers/watchdog/imx2_wdt.c ++++ b/drivers/watchdog/imx2_wdt.c +@@ -181,8 +181,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, + static int imx2_wdt_set_timeout(struct watchdog_device *wdog, + unsigned int new_timeout) + { +- __imx2_wdt_set_timeout(wdog, new_timeout); ++ unsigned int actual; + ++ actual = min(new_timeout, wdog->max_hw_heartbeat_ms * 1000); ++ __imx2_wdt_set_timeout(wdog, actual); + wdog->timeout = new_timeout; + return 0; + } +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c +index d2a1a79fa324..d7955dc56737 100644 +--- a/fs/configfs/dir.c ++++ b/fs/configfs/dir.c +@@ -1755,12 +1755,19 @@ int configfs_register_group(struct config_group *parent_group, + + inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); + ret = create_default_group(parent_group, group); +- if (!ret) { +- spin_lock(&configfs_dirent_lock); +- configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); +- spin_unlock(&configfs_dirent_lock); +- } ++ if (ret) ++ goto err_out; ++ ++ spin_lock(&configfs_dirent_lock); ++ configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); ++ spin_unlock(&configfs_dirent_lock); ++ inode_unlock(d_inode(parent)); ++ return 0; ++err_out: + inode_unlock(d_inode(parent)); ++ mutex_lock(&subsys->su_mutex); ++ unlink_group(group); ++ mutex_unlock(&subsys->su_mutex); + return ret; + } + EXPORT_SYMBOL(configfs_register_group); +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index 406d93b51a0b..6caae471c1a4 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -1550,6 +1550,7 @@ enospc: + return -ENOSPC; + } + ++void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); + static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, + struct inode *inode, + block_t count) +@@ -1558,9 +1559,17 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, + + spin_lock(&sbi->stat_lock); + f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); +- f2fs_bug_on(sbi, inode->i_blocks < sectors); + sbi->total_valid_block_count -= (block_t)count; + spin_unlock(&sbi->stat_lock); ++ if (unlikely(inode->i_blocks < sectors)) { ++ f2fs_msg(sbi->sb, KERN_WARNING, ++ "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", ++ inode->i_ino, ++ (unsigned long long)inode->i_blocks, ++ (unsigned long long)sectors); ++ set_sbi_flag(sbi, SBI_NEED_FSCK); ++ return; ++ } + f2fs_i_blocks_write(inode, count, false, true); + } + +@@ -2382,7 +2391,6 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, + + bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, + block_t blkaddr, int type); +-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); + static inline void verify_blkaddr(struct f2fs_sb_info *sbi, + block_t blkaddr, int type) + { +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c +index 50818b519df8..e02ed16bc35c 100644 +--- a/fs/f2fs/inode.c ++++ b/fs/f2fs/inode.c +@@ -397,6 +397,7 @@ make_now: + return inode; + + bad_inode: ++ f2fs_inode_synced(inode); + iget_failed(inode); + trace_f2fs_iget_exit(inode, ret); + return ERR_PTR(ret); +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c +index 6ea445377767..65a82c5bafcb 100644 +--- a/fs/f2fs/recovery.c ++++ b/fs/f2fs/recovery.c +@@ -445,7 +445,15 @@ retry_dn: + + get_node_info(sbi, dn.nid, &ni); + f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); +- f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); ++ ++ if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { ++ f2fs_msg(sbi->sb, KERN_WARNING, ++ "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", ++ inode->i_ino, ofs_of_node(dn.node_page), ++ ofs_of_node(page)); ++ err = -EFAULT; ++ goto err; ++ } + + for (; start < end; start++, dn.ofs_in_node++) { + block_t src, dest; +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h +index 47348d98165b..e3d8826c5113 100644 +--- a/fs/f2fs/segment.h ++++ b/fs/f2fs/segment.h +@@ -641,7 +641,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr) + static inline int check_block_count(struct f2fs_sb_info *sbi, + int segno, struct f2fs_sit_entry *raw_sit) + { +-#ifdef CONFIG_F2FS_CHECK_FS + bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; + int valid_blocks = 0; + int cur_pos = 0, next_pos; +@@ -668,7 +667,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi, + set_sbi_flag(sbi, SBI_NEED_FSCK); + return -EINVAL; + } +-#endif ++ + /* check segment usage, and check boundary of a given segment number */ + if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg + || segno > TOTAL_SEGS(sbi) - 1)) { +diff --git a/fs/fat/file.c b/fs/fat/file.c +index 4724cc9ad650..62581de09bf1 100644 +--- a/fs/fat/file.c ++++ b/fs/fat/file.c +@@ -160,12 +160,17 @@ static int fat_file_release(struct inode *inode, struct file *filp) + int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) + { + struct inode *inode = filp->f_mapping->host; +- int res, err; ++ int err; ++ ++ err = __generic_file_fsync(filp, start, end, datasync); ++ if (err) ++ return err; + +- res = generic_file_fsync(filp, start, end, datasync); + err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); ++ if (err) ++ return err; + +- return res ? res : err; ++ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + } + + +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index c934fab44452..f580695b7bb9 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -1678,7 +1678,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, + offset = outarg->offset & ~PAGE_MASK; + file_size = i_size_read(inode); + +- num = outarg->size; ++ num = min(outarg->size, fc->max_write); + if (outarg->offset > file_size) + num = 0; + else if (outarg->offset + num > file_size) +diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h +index be6d8e00453f..85f544007311 100644 +--- a/fs/nfsd/vfs.h ++++ b/fs/nfsd/vfs.h +@@ -117,8 +117,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra); + + static inline int fh_want_write(struct svc_fh *fh) + { +- int ret = mnt_want_write(fh->fh_export->ex_path.mnt); ++ int ret; + ++ if (fh->fh_want_write) ++ return 0; ++ ret = mnt_want_write(fh->fh_export->ex_path.mnt); + if (!ret) + fh->fh_want_write = true; + return ret; +diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h +index c55cf3ff6847..2f6d0f3ae985 100644 +--- a/include/drm/drm_modeset_helper_vtables.h ++++ b/include/drm/drm_modeset_helper_vtables.h +@@ -1159,6 +1159,14 @@ struct drm_plane_helper_funcs { + * current one with the new plane configurations in the new + * plane_state. + * ++ * Drivers should also swap the framebuffers between current plane ++ * state (&drm_plane.state) and new_state. ++ * This is required since cleanup for async commits is performed on ++ * the new state, rather than old state like for traditional commits. ++ * Since we want to give up the reference on the current (old) fb ++ * instead of our brand new one, swap them in the driver during the ++ * async commit. ++ * + * FIXME: + * - It only works for single plane updates + * - Async Pageflips are not supported yet +diff --git a/include/linux/pwm.h b/include/linux/pwm.h +index 56518adc31dd..bd7d611d63e9 100644 +--- a/include/linux/pwm.h ++++ b/include/linux/pwm.h +@@ -639,7 +639,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) + #ifdef CONFIG_PWM_SYSFS + void pwmchip_sysfs_export(struct pwm_chip *chip); + void pwmchip_sysfs_unexport(struct pwm_chip *chip); +-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip); + #else + static inline void pwmchip_sysfs_export(struct pwm_chip *chip) + { +@@ -648,10 +647,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip) + static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) + { + } +- +-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) +-{ +-} + #endif /* CONFIG_PWM_SYSFS */ + + #endif /* __LINUX_PWM_H */ +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 6cd4d5b48239..b619a190ff12 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -178,9 +178,6 @@ struct adv_info { + + #define HCI_MAX_SHORT_NAME_LENGTH 10 + +-/* Min encryption key size to match with SMP */ +-#define HCI_MIN_ENC_KEY_SIZE 7 +- + /* Default LE RPA expiry time, 15 minutes */ + #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) + +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index d24025626310..5c0ae912f2f2 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -374,7 +374,8 @@ static void mqueue_evict_inode(struct inode *inode) + struct user_struct *user; + unsigned long mq_bytes, mq_treesize; + struct ipc_namespace *ipc_ns; +- struct msg_msg *msg; ++ struct msg_msg *msg, *nmsg; ++ LIST_HEAD(tmp_msg); + + clear_inode(inode); + +@@ -385,10 +386,15 @@ static void mqueue_evict_inode(struct inode *inode) + info = MQUEUE_I(inode); + spin_lock(&info->lock); + while ((msg = msg_get(info)) != NULL) +- free_msg(msg); ++ list_add_tail(&msg->m_list, &tmp_msg); + kfree(info->node_cache); + spin_unlock(&info->lock); + ++ list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) { ++ list_del(&msg->m_list); ++ free_msg(msg); ++ } ++ + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * +diff --git a/ipc/msgutil.c b/ipc/msgutil.c +index 84598025a6ad..e65593742e2b 100644 +--- a/ipc/msgutil.c ++++ b/ipc/msgutil.c +@@ -18,6 +18,7 @@ + #include <linux/utsname.h> + #include <linux/proc_ns.h> + #include <linux/uaccess.h> ++#include <linux/sched.h> + + #include "util.h" + +@@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len) + pseg = &msg->next; + while (len > 0) { + struct msg_msgseg *seg; ++ ++ cond_resched(); ++ + alen = min(len, DATALEN_SEG); + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); + if (seg == NULL) +@@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg) + kfree(msg); + while (seg != NULL) { + struct msg_msgseg *tmp = seg->next; ++ ++ cond_resched(); + kfree(seg); + seg = tmp; + } +diff --git a/kernel/sys.c b/kernel/sys.c +index e25ec93aea22..ab96b9882347 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1861,7 +1861,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) + ((unsigned long)prctl_map->__m1 __op \ + (unsigned long)prctl_map->__m2) ? 0 : -EINVAL + error = __prctl_check_order(start_code, <, end_code); +- error |= __prctl_check_order(start_data, <, end_data); ++ error |= __prctl_check_order(start_data,<=, end_data); + error |= __prctl_check_order(start_brk, <=, brk); + error |= __prctl_check_order(arg_start, <=, arg_end); + error |= __prctl_check_order(env_start, <=, env_end); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index f13601a616ad..cfc2c0d1369a 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -2732,8 +2732,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int + if (neg) + continue; + val = convmul * val / convdiv; +- if ((min && val < *min) || (max && val > *max)) +- continue; ++ if ((min && val < *min) || (max && val > *max)) { ++ err = -EINVAL; ++ break; ++ } + *i = val; + } else { + val = convdiv * (*i) / convmul; +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c +index 99e03bec68e4..4bb9b66338be 100644 +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c +@@ -640,7 +640,7 @@ static inline void process_adjtimex_modes(struct timex *txc, + time_constant = max(time_constant, 0l); + } + +- if (txc->modes & ADJ_TAI && txc->constant > 0) ++ if (txc->modes & ADJ_TAI && txc->constant >= 0) + *time_tai = txc->constant; + + if (txc->modes & ADJ_OFFSET) +diff --git a/mm/Kconfig b/mm/Kconfig +index 59efbd3337e0..cc4d633947bf 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -708,12 +708,12 @@ config MIGRATE_VMA_HELPER + + config HMM + bool ++ select MMU_NOTIFIER + select MIGRATE_VMA_HELPER + + config HMM_MIRROR + bool "HMM mirror CPU page table into a device page table" + depends on ARCH_HAS_HMM +- select MMU_NOTIFIER + select HMM + help + Select HMM_MIRROR if you want to mirror range of the CPU page table of a +diff --git a/mm/cma.c b/mm/cma.c +index 5749c9b3b5d0..56761e40d191 100644 +--- a/mm/cma.c ++++ b/mm/cma.c +@@ -105,8 +105,10 @@ static int __init cma_activate_area(struct cma *cma) + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + +- if (!cma->bitmap) ++ if (!cma->bitmap) { ++ cma->count = 0; + return -ENOMEM; ++ } + + WARN_ON_ONCE(!pfn_valid(pfn)); + zone = page_zone(pfn_to_page(pfn)); +@@ -364,23 +366,26 @@ err: + #ifdef CONFIG_CMA_DEBUG + static void cma_debug_show_areas(struct cma *cma) + { +- unsigned long next_zero_bit, next_set_bit; ++ unsigned long next_zero_bit, next_set_bit, nr_zero; + unsigned long start = 0; +- unsigned int nr_zero, nr_total = 0; ++ unsigned long nr_part, nr_total = 0; ++ unsigned long nbits = cma_bitmap_maxno(cma); + + mutex_lock(&cma->lock); + pr_info("number of available pages: "); + for (;;) { +- next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); +- if (next_zero_bit >= cma->count) ++ next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); ++ if (next_zero_bit >= nbits) + break; +- next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); ++ next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); + nr_zero = next_set_bit - next_zero_bit; +- pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); +- nr_total += nr_zero; ++ nr_part = nr_zero << cma->order_per_bit; ++ pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, ++ next_zero_bit); ++ nr_total += nr_part; + start = next_zero_bit + nr_zero; + } +- pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); ++ pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); + mutex_unlock(&cma->lock); + } + #else +diff --git a/mm/cma_debug.c b/mm/cma_debug.c +index 275df8b5b22e..c47ea3cfee79 100644 +--- a/mm/cma_debug.c ++++ b/mm/cma_debug.c +@@ -58,7 +58,7 @@ static int cma_maxchunk_get(void *data, u64 *val) + mutex_lock(&cma->lock); + for (;;) { + start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); +- if (start >= cma->count) ++ if (start >= bitmap_maxno) + break; + end = find_next_bit(cma->bitmap, bitmap_maxno, start); + maxchunk = max(end - start, maxchunk); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 741bdde54954..8ca0075a5464 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1271,12 +1271,23 @@ void free_huge_page(struct page *page) + ClearPagePrivate(page); + + /* +- * A return code of zero implies that the subpool will be under its +- * minimum size if the reservation is not restored after page is free. +- * Therefore, force restore_reserve operation. ++ * If PagePrivate() was set on page, page allocation consumed a ++ * reservation. If the page was associated with a subpool, there ++ * would have been a page reserved in the subpool before allocation ++ * via hugepage_subpool_get_pages(). Since we are 'restoring' the ++ * reservtion, do not call hugepage_subpool_put_pages() as this will ++ * remove the reserved page from the subpool. + */ +- if (hugepage_subpool_put_pages(spool, 1) == 0) +- restore_reserve = true; ++ if (!restore_reserve) { ++ /* ++ * A return code of zero implies that the subpool will be ++ * under its minimum size if the reservation is not restored ++ * after page is free. Therefore, force restore_reserve ++ * operation. ++ */ ++ if (hugepage_subpool_put_pages(spool, 1) == 0) ++ restore_reserve = true; ++ } + + spin_lock(&hugetlb_lock); + clear_page_huge_active(page); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 923deb33bf34..6f71518a4558 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5727,13 +5727,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, + unsigned long *zone_end_pfn, + unsigned long *ignored) + { ++ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; ++ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; + /* When hotadd a new node from cpu_up(), the node should be empty */ + if (!node_start_pfn && !node_end_pfn) + return 0; + + /* Get the start and end of the zone */ +- *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; +- *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; ++ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); ++ *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); + adjust_zone_range_for_zone_movable(nid, zone_type, + node_start_pfn, node_end_pfn, + zone_start_pfn, zone_end_pfn); +diff --git a/mm/percpu.c b/mm/percpu.c +index 0c06e2f549a7..9beb84800d8d 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -984,7 +984,8 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, + /* + * Search to find a fit. + */ +- end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS; ++ end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, ++ pcpu_chunk_map_bits(chunk)); + bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start, + alloc_bits, align_mask); + if (bit_off >= end) +@@ -1702,6 +1703,7 @@ void free_percpu(void __percpu *ptr) + struct pcpu_chunk *chunk; + unsigned long flags; + int off; ++ bool need_balance = false; + + if (!ptr) + return; +@@ -1723,7 +1725,7 @@ void free_percpu(void __percpu *ptr) + + list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) + if (pos != chunk) { +- pcpu_schedule_balance_work(); ++ need_balance = true; + break; + } + } +@@ -1731,6 +1733,9 @@ void free_percpu(void __percpu *ptr) + trace_percpu_free_percpu(chunk->base_addr, off, ptr); + + spin_unlock_irqrestore(&pcpu_lock, flags); ++ ++ if (need_balance) ++ pcpu_schedule_balance_work(); + } + EXPORT_SYMBOL_GPL(free_percpu); + +diff --git a/mm/slab.c b/mm/slab.c +index 843ecea9e336..a04aeae42306 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -4320,8 +4320,12 @@ static int leaks_show(struct seq_file *m, void *p) + * whole processing. + */ + do { +- set_store_user_clean(cachep); + drain_cpu_caches(cachep); ++ /* ++ * drain_cpu_caches() could make kmemleak_object and ++ * debug_objects_cache dirty, so reset afterwards. ++ */ ++ set_store_user_clean(cachep); + + x[1] = 0; + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index fe4fb0c1fa61..cc061495f653 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1165,14 +1165,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn) + !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + return 0; + +- /* The minimum encryption key size needs to be enforced by the +- * host stack before establishing any L2CAP connections. The +- * specification in theory allows a minimum of 1, but to align +- * BR/EDR and LE transports, a minimum of 7 is chosen. +- */ +- if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE) +- return 0; +- + return 1; + } + +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index d21ece9f8d73..d3fc73ac230b 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + list_del_init(list); + grp->exclusive = 0; + write_unlock_irq(&grp->list_lock); +- up_write(&grp->list_mutex); + + if (!empty) + unsubscribe_port(client, port, grp, &subs->info, ack); ++ up_write(&grp->list_mutex); + } + + /* connect two ports */ +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index afa591cf840a..65fb1e7edb9c 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1839,9 +1839,6 @@ static int azx_first_init(struct azx *chip) + chip->msi = 0; + } + +- if (azx_acquire_irq(chip, 0) < 0) +- return -EBUSY; +- + pci_set_master(pci); + synchronize_irq(bus->irq); + +@@ -1956,6 +1953,9 @@ static int azx_first_init(struct azx *chip) + return -ENODEV; + } + ++ if (azx_acquire_irq(chip, 0) < 0) ++ return -EBUSY; ++ + strcpy(card->driver, "HDA-Intel"); + strlcpy(card->shortname, driver_short_names[chip->driver_type], + sizeof(card->shortname)); +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index ae3446768181..95326c6a7a24 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -28,6 +28,8 @@ + #include <linux/hashtable.h> + #include <linux/kernel.h> + ++#define FAKE_JUMP_OFFSET -1 ++ + struct alternative { + struct list_head list; + struct instruction *insn; +@@ -498,7 +500,7 @@ static int add_jump_destinations(struct objtool_file *file) + insn->type != INSN_JUMP_UNCONDITIONAL) + continue; + +- if (insn->ignore) ++ if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) + continue; + + rela = find_rela_by_dest_range(insn->sec, insn->offset, +@@ -645,10 +647,10 @@ static int handle_group_alt(struct objtool_file *file, + clear_insn_state(&fake_jump->state); + + fake_jump->sec = special_alt->new_sec; +- fake_jump->offset = -1; ++ fake_jump->offset = FAKE_JUMP_OFFSET; + fake_jump->type = INSN_JUMP_UNCONDITIONAL; + fake_jump->jump_dest = list_next_entry(last_orig_insn, list); +- fake_jump->ignore = true; ++ fake_jump->func = orig_insn->func; + } + + if (!special_alt->new_len) {
