commit: 21dcc2a4615d3241e7e3a5d50cc2676b2f2cb3f7 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed Jul 10 11:03:59 2019 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed Jul 10 11:03:59 2019 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=21dcc2a4
Linux patch 4.14.133 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1132_linux-4.14.133.patch | 1428 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1432 insertions(+) diff --git a/0000_README b/0000_README index f51c89c..dd56b3e 100644 --- a/0000_README +++ b/0000_README @@ -571,6 +571,10 @@ Patch: 1131_linux-4.14.132.patch From: https://www.kernel.org Desc: Linux 4.14.132 +Patch: 1132_linux-4.14.133.patch +From: https://www.kernel.org +Desc: Linux 4.14.133 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1132_linux-4.14.133.patch b/1132_linux-4.14.133.patch new file mode 100644 index 0000000..4d51a54 --- /dev/null +++ b/1132_linux-4.14.133.patch @@ -0,0 +1,1428 @@ +diff --git a/Makefile b/Makefile +index 23b2916ef0ff..c36e64bd9ae7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 132 ++SUBLEVEL = 133 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c +index bcd7c9fc5d0f..869ac2d421c8 100644 +--- a/arch/arc/kernel/traps.c ++++ b/arch/arc/kernel/traps.c +@@ -155,3 +155,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs) + + insterror_is_error(address, regs); + } ++ ++/* ++ * abort() call generated by older gcc for __builtin_trap() ++ */ ++void abort(void) ++{ ++ __asm__ __volatile__("trap_s 5\n"); ++} +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c +index f469e0435903..73ecccc514e1 100644 +--- a/arch/arm64/kernel/module.c ++++ b/arch/arm64/kernel/module.c +@@ -32,6 +32,7 @@ + + void *module_alloc(unsigned long size) + { ++ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; + gfp_t gfp_mask = GFP_KERNEL; + void *p; + +@@ -39,9 +40,12 @@ void *module_alloc(unsigned long size) + if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + gfp_mask |= __GFP_NOWARN; + ++ if (IS_ENABLED(CONFIG_KASAN)) ++ /* don't exceed the static module region - see below */ ++ module_alloc_end = MODULES_END; ++ + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, +- module_alloc_base + MODULES_VSIZE, +- gfp_mask, PAGE_KERNEL_EXEC, 0, ++ module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0, + NUMA_NO_NODE, __builtin_return_address(0)); + + if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && +diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h +index 5604db3d1836..d79c68fa78d9 100644 +--- a/arch/mips/include/asm/netlogic/xlr/fmn.h ++++ b/arch/mips/include/asm/netlogic/xlr/fmn.h +@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code, + for (i = 0; i < 8; i++) { + nlm_msgsnd(dest); + status = nlm_read_c2_status0(); +- if ((status & 0x2) == 1) +- pr_info("Send pending fail!\n"); + if ((status & 0x4) == 0) + return 0; + } +diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c +index 91ad023ead8c..60c58005fd20 100644 +--- a/arch/mips/mm/mmap.c ++++ b/arch/mips/mm/mmap.c +@@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) + + int __virt_addr_valid(const volatile void *kaddr) + { +- unsigned long vaddr = (unsigned long)vaddr; ++ unsigned long vaddr = (unsigned long)kaddr; + + if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) + return 0; +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c +index 79b9f2ad3ff5..c2a6869418f7 100644 +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -388,6 +388,7 @@ static struct work_registers build_get_work_registers(u32 **p) + static void build_restore_work_registers(u32 **p) + { + if (scratch_reg >= 0) { ++ uasm_i_ehb(p); + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); + return; + } +@@ -671,10 +672,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, + uasm_i_mtc0(p, 0, C0_PAGEMASK); + uasm_il_b(p, r, lid); + } +- if (scratch_reg >= 0) ++ if (scratch_reg >= 0) { ++ uasm_i_ehb(p); + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); +- else ++ } else { + UASM_i_LW(p, 1, scratchpad_offset(0), 0); ++ } + } else { + /* Reset default page size */ + if (PM_DEFAULT_MASK >> 16) { +@@ -939,10 +942,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, + uasm_i_jr(p, ptr); + + if (mode == refill_scratch) { +- if (scratch_reg >= 0) ++ if (scratch_reg >= 0) { ++ uasm_i_ehb(p); + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); +- else ++ } else { + UASM_i_LW(p, 1, scratchpad_offset(0), 0); ++ } + } else { + uasm_i_nop(p); + } +@@ -1259,6 +1264,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, + UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ + + if (c0_scratch_reg >= 0) { ++ uasm_i_ehb(p); + UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); + build_tlb_write_entry(p, l, r, tlb_random); + uasm_l_leave(l, *p); +@@ -1615,15 +1621,17 @@ static void build_setup_pgd(void) + uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); + uasm_l_tlbl_goaround1(&l, p); + UASM_i_SLL(&p, a0, a0, 11); +- uasm_i_jr(&p, 31); + UASM_i_MTC0(&p, a0, C0_CONTEXT); ++ uasm_i_jr(&p, 31); ++ uasm_i_ehb(&p); + } else { + /* PGD in c0_KScratch */ +- uasm_i_jr(&p, 31); + if (cpu_has_ldpte) + UASM_i_MTC0(&p, a0, C0_PWBASE); + else + UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); ++ uasm_i_jr(&p, 31); ++ uasm_i_ehb(&p); + } + #else + #ifdef CONFIG_SMP +@@ -1637,13 +1645,16 @@ static void build_setup_pgd(void) + UASM_i_LA_mostly(&p, a2, pgdc); + UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); + #endif /* SMP */ +- uasm_i_jr(&p, 31); + + /* if pgd_reg is allocated, save PGD also to scratch register */ +- if (pgd_reg != -1) ++ if (pgd_reg != -1) { + UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); +- else ++ uasm_i_jr(&p, 31); ++ uasm_i_ehb(&p); ++ } else { ++ uasm_i_jr(&p, 31); + uasm_i_nop(&p); ++ } + #endif + if (p >= tlbmiss_handler_setup_pgd_end) + panic("tlbmiss_handler_setup_pgd space exceeded"); +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index c020ba4b7eb6..ccc2b9d2956a 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -22,6 +22,7 @@ + #include <linux/init.h> + #include <linux/list.h> + #include <linux/module.h> ++#include <linux/memory.h> + + #include <trace/syscall.h> + +@@ -36,6 +37,7 @@ + + int ftrace_arch_code_modify_prepare(void) + { ++ mutex_lock(&text_mutex); + set_kernel_text_rw(); + set_all_modules_text_rw(); + return 0; +@@ -45,6 +47,7 @@ int ftrace_arch_code_modify_post_process(void) + { + set_all_modules_text_ro(); + set_kernel_text_ro(); ++ mutex_unlock(&text_mutex); + return 0; + } + +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 053e4937af0c..3aa0b8123c91 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2161,7 +2161,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) + struct kvm_lapic *apic = vcpu->arch.apic; + u32 ppr; + +- if (!apic_enabled(apic)) ++ if (!kvm_apic_hw_enabled(apic)) + return -1; + + __apic_update_ppr(apic, &ppr); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 858dd0d89b02..a8526042d176 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1392,7 +1392,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) + vcpu->arch.tsc_always_catchup = 1; + return 0; + } else { +- WARN(1, "user requested TSC rate below hardware speed\n"); ++ pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); + return -1; + } + } +@@ -1402,8 +1402,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) + user_tsc_khz, tsc_khz); + + if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { +- WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", +- user_tsc_khz); ++ pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", ++ user_tsc_khz); + return -1; + } + +diff --git a/crypto/cryptd.c b/crypto/cryptd.c +index 248f6ba41688..4cc1871646a8 100644 +--- a/crypto/cryptd.c ++++ b/crypto/cryptd.c +@@ -585,6 +585,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst) + struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->spawn); ++ kfree(inst); + } + + static int cryptd_create_skcipher(struct crypto_template *tmpl, +diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c +index b5758768920b..2b8fb8f1391e 100644 +--- a/crypto/crypto_user.c ++++ b/crypto/crypto_user.c +@@ -55,6 +55,9 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) + list_for_each_entry(q, &crypto_alg_list, cra_list) { + int match = 0; + ++ if (crypto_is_larval(q)) ++ continue; ++ + if ((q->cra_flags ^ p->cru_type) & p->cru_mask) + continue; + +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c +index a67ec1bdc4e0..b8e7c2d8915e 100644 +--- a/drivers/dma/imx-sdma.c ++++ b/drivers/dma/imx-sdma.c +@@ -632,7 +632,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, + spin_lock_irqsave(&sdma->channel_0_lock, flags); + + bd0->mode.command = C0_SETPM; +- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; ++ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; + bd0->mode.count = size / 2; + bd0->buffer_addr = buf_phys; + bd0->ext_buffer_addr = address; +@@ -909,7 +909,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) + context->gReg[7] = sdmac->watermark_level; + + bd0->mode.command = C0_SETDM; +- bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; ++ bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD; + bd0->mode.count = sizeof(*context) / 4; + bd0->buffer_addr = sdma->context_phys; + bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index b2eecfc9042e..53186c5e1066 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -1534,25 +1534,6 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) + mutex_unlock(&adev->srbm_mutex); + + gfx_v9_0_init_compute_vmid(adev); +- +- mutex_lock(&adev->grbm_idx_mutex); +- /* +- * making sure that the following register writes will be broadcasted +- * to all the shaders +- */ +- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +- +- WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, +- (adev->gfx.config.sc_prim_fifo_size_frontend << +- PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | +- (adev->gfx.config.sc_prim_fifo_size_backend << +- PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | +- (adev->gfx.config.sc_hiz_tile_fifo_size << +- PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | +- (adev->gfx.config.sc_earlyz_tile_fifo_size << +- PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); +- mutex_unlock(&adev->grbm_idx_mutex); +- + } + + static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c +index 92c1f8e166dc..0bdbbd4027fe 100644 +--- a/drivers/gpu/drm/i915/intel_csr.c ++++ b/drivers/gpu/drm/i915/intel_csr.c +@@ -277,10 +277,17 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, + uint32_t i; + uint32_t *dmc_payload; + uint32_t required_version; ++ size_t fsize; + + if (!fw) + return NULL; + ++ fsize = sizeof(struct intel_css_header) + ++ sizeof(struct intel_package_header) + ++ sizeof(struct intel_dmc_header); ++ if (fsize > fw->size) ++ goto error_truncated; ++ + /* Extract CSS Header information*/ + css_header = (struct intel_css_header *)fw->data; + if (sizeof(struct intel_css_header) != +@@ -350,6 +357,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, + return NULL; + } + readcount += dmc_offset; ++ fsize += dmc_offset; ++ if (fsize > fw->size) ++ goto error_truncated; + + /* Extract dmc_header information. */ + dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; +@@ -380,6 +390,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, + + /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ + nbytes = dmc_header->fw_size * 4; ++ fsize += nbytes; ++ if (fsize > fw->size) ++ goto error_truncated; ++ + if (nbytes > CSR_MAX_FW_SIZE) { + DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); + return NULL; +@@ -393,6 +407,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, + } + + return memcpy(dmc_payload, &fw->data[readcount], nbytes); ++ ++error_truncated: ++ DRM_ERROR("Truncated DMC firmware, rejecting.\n"); ++ return NULL; + } + + static void csr_load_work_fn(struct work_struct *work) +diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c +index 957fbf8c55eb..12dd261fc308 100644 +--- a/drivers/gpu/drm/imx/ipuv3-crtc.c ++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c +@@ -99,14 +99,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, + ipu_dc_disable(ipu); + ipu_prg_disable(ipu); + ++ drm_crtc_vblank_off(crtc); ++ + spin_lock_irq(&crtc->dev->event_lock); +- if (crtc->state->event) { ++ if (crtc->state->event && !crtc->state->active) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +- +- drm_crtc_vblank_off(crtc); + } + + static void imx_drm_crtc_reset(struct drm_crtc *crtc) +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +index a2ca90fc403c..cada1c75c41c 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +@@ -270,6 +270,7 @@ err_config_cleanup: + static void mtk_drm_kms_deinit(struct drm_device *drm) + { + drm_kms_helper_poll_fini(drm); ++ drm_atomic_helper_shutdown(drm); + + component_unbind_all(drm->dev, drm); + drm_mode_config_cleanup(drm); +diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c +index 7e5e24c2152a..c1b8caad65e6 100644 +--- a/drivers/gpu/drm/mediatek/mtk_dsi.c ++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c +@@ -631,6 +631,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) + if (--dsi->refcount != 0) + return; + ++ /* ++ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since ++ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), ++ * which needs irq for vblank, and mtk_dsi_stop() will disable irq. ++ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), ++ * after dsi is fully set. ++ */ ++ mtk_dsi_stop(dsi); ++ + if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) { + if (dsi->panel) { + if (drm_panel_unprepare(dsi->panel)) { +@@ -697,7 +706,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) + } + } + +- mtk_dsi_stop(dsi); + mtk_dsi_poweroff(dsi); + + dsi->enabled = false; +@@ -851,6 +859,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) + /* Skip connector cleanup if creation was delegated to the bridge */ + if (dsi->conn.dev) + drm_connector_cleanup(&dsi->conn); ++ if (dsi->panel) ++ drm_panel_detach(dsi->panel); + } + + static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c +index 4f3de2a8c4df..9aced80f31a2 100644 +--- a/drivers/platform/x86/mlx-platform.c ++++ b/drivers/platform/x86/mlx-platform.c +@@ -318,7 +318,7 @@ static int __init mlxplat_init(void) + + for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + priv->pdev_mux[i] = platform_device_register_resndata( +- &mlxplat_dev->dev, ++ &priv->pdev_i2c->dev, + "i2c-mux-reg", i, NULL, + 0, &mlxplat_mux_data[i], + sizeof(mlxplat_mux_data[i])); +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index 5b4b7f9be2d7..6d520e8945f7 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -4800,7 +4800,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; +- curr_sg->chain_indicator = 0x80; ++ curr_sg->chain_indicator = IOACCEL2_CHAIN; + + curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; + } +@@ -4817,6 +4817,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, + curr_sg++; + } + ++ /* ++ * Set the last s/g element bit ++ */ ++ (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; ++ + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; +diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h +index 078afe448115..ecf15344b55d 100644 +--- a/drivers/scsi/hpsa_cmd.h ++++ b/drivers/scsi/hpsa_cmd.h +@@ -516,6 +516,7 @@ struct ioaccel2_sg_element { + u8 reserved[3]; + u8 chain_indicator; + #define IOACCEL2_CHAIN 0x80 ++#define IOACCEL2_LAST_SG 0x40 + }; + + /* +diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c +index 3aa9e6e3dac8..4ef54436b9d4 100644 +--- a/drivers/spi/spi-bitbang.c ++++ b/drivers/spi/spi-bitbang.c +@@ -392,7 +392,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) + if (ret) + spi_master_put(master); + +- return 0; ++ return ret; + } + EXPORT_SYMBOL_GPL(spi_bitbang_start); + +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c +index 070733ca94d5..32943afacffd 100644 +--- a/drivers/tty/rocket.c ++++ b/drivers/tty/rocket.c +@@ -279,7 +279,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1 + module_param_array(pc104_4, ulong, NULL, 0); + MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,..."); + +-static int rp_init(void); ++static int __init rp_init(void); + static void rp_cleanup_module(void); + + module_init(rp_init); +diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c +index e0c1b0099265..089f39de6897 100644 +--- a/drivers/usb/gadget/udc/fusb300_udc.c ++++ b/drivers/usb/gadget/udc/fusb300_udc.c +@@ -1345,12 +1345,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = { + static int fusb300_remove(struct platform_device *pdev) + { + struct fusb300 *fusb300 = platform_get_drvdata(pdev); ++ int i; + + usb_del_gadget_udc(&fusb300->gadget); + iounmap(fusb300->reg); + free_irq(platform_get_irq(pdev, 0), fusb300); + + fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); ++ for (i = 0; i < FUSB300_MAX_NUM_EP; i++) ++ kfree(fusb300->ep[i]); + kfree(fusb300); + + return 0; +@@ -1494,6 +1497,8 @@ clean_up: + if (fusb300->ep0_req) + fusb300_free_request(&fusb300->ep[0]->ep, + fusb300->ep0_req); ++ for (i = 0; i < FUSB300_MAX_NUM_EP; i++) ++ kfree(fusb300->ep[i]); + kfree(fusb300); + } + if (reg) +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c +index 8f32b5ee7734..6df1aded4503 100644 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c +@@ -935,8 +935,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc) + dma_addr_t dma; + struct lpc32xx_usbd_dd_gad *dd; + +- dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc( +- udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma); ++ dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma); + if (dd) + dd->this_dma = dma; + +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 4eba9ee179e3..b40e8ded49c6 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -44,6 +44,12 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" + * Using this limit prevents one virtqueue from starving others. */ + #define VHOST_NET_WEIGHT 0x80000 + ++/* Max number of packets transferred before requeueing the job. ++ * Using this limit prevents one virtqueue from starving others with small ++ * pkts. ++ */ ++#define VHOST_NET_PKT_WEIGHT 256 ++ + /* MAX number of TX used buffers for outstanding zerocopy */ + #define VHOST_MAX_PEND 128 + #define VHOST_GOODCOPY_LEN 256 +@@ -461,6 +467,7 @@ static void handle_tx(struct vhost_net *net) + struct socket *sock; + struct vhost_net_ubuf_ref *uninitialized_var(ubufs); + bool zcopy, zcopy_used; ++ int sent_pkts = 0; + + mutex_lock(&vq->mutex); + sock = vq->private_data; +@@ -475,7 +482,7 @@ static void handle_tx(struct vhost_net *net) + hdr_size = nvq->vhost_hlen; + zcopy = nvq->ubufs; + +- for (;;) { ++ do { + /* Release DMAs done buffers first */ + if (zcopy) + vhost_zerocopy_signal_used(net, vq); +@@ -543,7 +550,6 @@ static void handle_tx(struct vhost_net *net) + msg.msg_control = NULL; + ubufs = NULL; + } +- + total_len += len; + if (total_len < VHOST_NET_WEIGHT && + !vhost_vq_avail_empty(&net->dev, vq) && +@@ -572,11 +578,7 @@ static void handle_tx(struct vhost_net *net) + else + vhost_zerocopy_signal_used(net, vq); + vhost_net_tx_packet(net); +- if (unlikely(total_len >= VHOST_NET_WEIGHT)) { +- vhost_poll_queue(&vq->poll); +- break; +- } +- } ++ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); + out: + mutex_unlock(&vq->mutex); + } +@@ -754,6 +756,7 @@ static void handle_rx(struct vhost_net *net) + struct socket *sock; + struct iov_iter fixup; + __virtio16 num_buffers; ++ int recv_pkts = 0; + + mutex_lock_nested(&vq->mutex, 0); + sock = vq->private_data; +@@ -773,7 +776,11 @@ static void handle_rx(struct vhost_net *net) + vq->log : NULL; + mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); + +- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { ++ do { ++ sock_len = vhost_net_rx_peek_head_len(net, sock->sk); ++ ++ if (!sock_len) ++ break; + sock_len += sock_hlen; + vhost_len = sock_len + vhost_hlen; + headcount = get_rx_bufs(vq, vq->heads, vhost_len, +@@ -854,11 +861,8 @@ static void handle_rx(struct vhost_net *net) + vhost_log_write(vq, vq_log, log, vhost_len, + vq->iov, in); + total_len += vhost_len; +- if (unlikely(total_len >= VHOST_NET_WEIGHT)) { +- vhost_poll_queue(&vq->poll); +- goto out; +- } +- } ++ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); ++ + vhost_net_enable_vq(net, vq); + out: + mutex_unlock(&vq->mutex); +@@ -936,7 +940,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) + n->vqs[i].sock_hlen = 0; + vhost_net_buf_init(&n->vqs[i].rxq); + } +- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); ++ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, ++ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT); + + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c +index 35ebf06d9ecb..cb4ab5b955ad 100644 +--- a/drivers/vhost/scsi.c ++++ b/drivers/vhost/scsi.c +@@ -58,6 +58,12 @@ + #define VHOST_SCSI_PREALLOC_UPAGES 2048 + #define VHOST_SCSI_PREALLOC_PROT_SGLS 512 + ++/* Max number of requests before requeueing the job. ++ * Using this limit prevents one virtqueue from starving others with ++ * request. ++ */ ++#define VHOST_SCSI_WEIGHT 256 ++ + struct vhost_scsi_inflight { + /* Wait for the flush operation to finish */ + struct completion comp; +@@ -840,7 +846,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) + u64 tag; + u32 exp_data_len, data_direction; + unsigned int out = 0, in = 0; +- int head, ret, prot_bytes; ++ int head, ret, prot_bytes, c = 0; + size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); + size_t out_size, in_size; + u16 lun; +@@ -859,7 +865,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) + + vhost_disable_notify(&vs->dev, vq); + +- for (;;) { ++ do { + head = vhost_get_vq_desc(vq, vq->iov, + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); +@@ -1074,7 +1080,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) + */ + INIT_WORK(&cmd->work, vhost_scsi_submission_work); + queue_work(vhost_scsi_workqueue, &cmd->work); +- } ++ } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); + out: + mutex_unlock(&vq->mutex); + } +@@ -1427,7 +1433,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) + vqs[i] = &vs->vqs[i].vq; + vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; + } +- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); ++ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, VHOST_SCSI_WEIGHT, 0); + + vhost_scsi_init_inflight(vs, NULL); + +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index 0e93ac888a5f..88fa81c482e8 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -412,8 +412,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) + vhost_vq_free_iovecs(dev->vqs[i]); + } + ++bool vhost_exceeds_weight(struct vhost_virtqueue *vq, ++ int pkts, int total_len) ++{ ++ struct vhost_dev *dev = vq->dev; ++ ++ if ((dev->byte_weight && total_len >= dev->byte_weight) || ++ pkts >= dev->weight) { ++ vhost_poll_queue(&vq->poll); ++ return true; ++ } ++ ++ return false; ++} ++EXPORT_SYMBOL_GPL(vhost_exceeds_weight); ++ + void vhost_dev_init(struct vhost_dev *dev, +- struct vhost_virtqueue **vqs, int nvqs) ++ struct vhost_virtqueue **vqs, int nvqs, ++ int weight, int byte_weight) + { + struct vhost_virtqueue *vq; + int i; +@@ -427,6 +443,8 @@ void vhost_dev_init(struct vhost_dev *dev, + dev->iotlb = NULL; + dev->mm = NULL; + dev->worker = NULL; ++ dev->weight = weight; ++ dev->byte_weight = byte_weight; + init_llist_head(&dev->work_list); + init_waitqueue_head(&dev->wait); + INIT_LIST_HEAD(&dev->read_list); +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h +index 75d21d4a8354..950c5c4e4ee3 100644 +--- a/drivers/vhost/vhost.h ++++ b/drivers/vhost/vhost.h +@@ -173,9 +173,13 @@ struct vhost_dev { + struct list_head read_list; + struct list_head pending_list; + wait_queue_head_t wait; ++ int weight; ++ int byte_weight; + }; + +-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); ++bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); ++void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, ++ int nvqs, int weight, int byte_weight); + long vhost_dev_set_owner(struct vhost_dev *dev); + bool vhost_dev_has_owner(struct vhost_dev *dev); + long vhost_dev_check_owner(struct vhost_dev *); +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index d0cf3d5aa570..5b9db5deffbb 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -21,6 +21,14 @@ + #include "vhost.h" + + #define VHOST_VSOCK_DEFAULT_HOST_CID 2 ++/* Max number of bytes transferred before requeueing the job. ++ * Using this limit prevents one virtqueue from starving others. */ ++#define VHOST_VSOCK_WEIGHT 0x80000 ++/* Max number of packets transferred before requeueing the job. ++ * Using this limit prevents one virtqueue from starving others with ++ * small pkts. ++ */ ++#define VHOST_VSOCK_PKT_WEIGHT 256 + + enum { + VHOST_VSOCK_FEATURES = VHOST_FEATURES, +@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + struct vhost_virtqueue *vq) + { + struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; ++ int pkts = 0, total_len = 0; + bool added = false; + bool restart_tx = false; + +@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + /* Avoid further vmexits, we're already processing the virtqueue */ + vhost_disable_notify(&vsock->dev, vq); + +- for (;;) { ++ do { + struct virtio_vsock_pkt *pkt; + struct iov_iter iov_iter; + unsigned out, in; +@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + */ + virtio_transport_deliver_tap_pkt(pkt); + ++ total_len += pkt->len; + virtio_transport_free_pkt(pkt); +- } ++ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); + if (added) + vhost_signal(&vsock->dev, vq); + +@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) + struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, + dev); + struct virtio_vsock_pkt *pkt; +- int head; ++ int head, pkts = 0, total_len = 0; + unsigned int out, in; + bool added = false; + +@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) + goto out; + + vhost_disable_notify(&vsock->dev, vq); +- for (;;) { ++ do { + u32 len; + + if (!vhost_vsock_more_replies(vsock)) { +@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) + else + virtio_transport_free_pkt(pkt); + +- vhost_add_used(vq, head, sizeof(pkt->hdr) + len); ++ len += sizeof(pkt->hdr); ++ vhost_add_used(vq, head, len); ++ total_len += len; + added = true; +- } ++ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); + + no_more_replies: + if (added) +@@ -531,7 +543,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) + vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; + vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; + +- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); ++ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), ++ VHOST_VSOCK_PKT_WEIGHT, VHOST_VSOCK_WEIGHT); + + file->private_data = vsock; + spin_lock_init(&vsock->send_pkt_list_lock); +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index d826fbaf7d50..e4d5e6eae409 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -1290,8 +1290,6 @@ again: + ret = -EIO; + goto out; + } +- btrfs_tree_read_lock(eb); +- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + if (!path->skip_locking) { + btrfs_tree_read_lock(eb); + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c +index f86457713e60..f1e9dd246ab0 100644 +--- a/fs/btrfs/dev-replace.c ++++ b/fs/btrfs/dev-replace.c +@@ -512,18 +512,27 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, + } + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); + +- trans = btrfs_start_transaction(root, 0); +- if (IS_ERR(trans)) { +- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); +- return PTR_ERR(trans); ++ while (1) { ++ trans = btrfs_start_transaction(root, 0); ++ if (IS_ERR(trans)) { ++ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); ++ return PTR_ERR(trans); ++ } ++ ret = btrfs_commit_transaction(trans); ++ WARN_ON(ret); ++ mutex_lock(&uuid_mutex); ++ /* keep away write_all_supers() during the finishing procedure */ ++ mutex_lock(&fs_info->fs_devices->device_list_mutex); ++ mutex_lock(&fs_info->chunk_mutex); ++ if (src_device->has_pending_chunks) { ++ mutex_unlock(&root->fs_info->chunk_mutex); ++ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); ++ mutex_unlock(&uuid_mutex); ++ } else { ++ break; ++ } + } +- ret = btrfs_commit_transaction(trans); +- WARN_ON(ret); + +- mutex_lock(&uuid_mutex); +- /* keep away write_all_supers() during the finishing procedure */ +- mutex_lock(&fs_info->fs_devices->device_list_mutex); +- mutex_lock(&fs_info->chunk_mutex); + btrfs_dev_replace_lock(dev_replace, 1); + dev_replace->replace_state = + scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 38ed8e259e00..85294fef1051 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -4851,6 +4851,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + for (i = 0; i < map->num_stripes; i++) { + num_bytes = map->stripes[i].dev->bytes_used + stripe_size; + btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes); ++ map->stripes[i].dev->has_pending_chunks = true; + } + + atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); +@@ -7310,6 +7311,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info, + for (i = 0; i < map->num_stripes; i++) { + dev = map->stripes[i].dev; + dev->commit_bytes_used = dev->bytes_used; ++ dev->has_pending_chunks = false; + } + } + mutex_unlock(&fs_info->chunk_mutex); +diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h +index 76fb6e84f201..f6ae6cdf233d 100644 +--- a/fs/btrfs/volumes.h ++++ b/fs/btrfs/volumes.h +@@ -61,6 +61,11 @@ struct btrfs_device { + + spinlock_t io_lock ____cacheline_aligned; + int running_pending; ++ /* When true means this device has pending chunk alloc in ++ * current transaction. Protected by chunk_mutex. ++ */ ++ bool has_pending_chunks; ++ + /* regular prio bios */ + struct btrfs_pending_bios pending_bios; + /* sync bios */ +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 4657e2924ecb..0a0e1aa11f5e 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -2436,10 +2436,23 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) + spin_unlock_irqrestore(&callback_lock, flags); + } + ++/** ++ * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. ++ * @tsk: pointer to task_struct with which the scheduler is struggling ++ * ++ * Description: In the case that the scheduler cannot find an allowed cpu in ++ * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy ++ * mode however, this value is the same as task_cs(tsk)->effective_cpus, ++ * which will not contain a sane cpumask during cases such as cpu hotplugging. ++ * This is the absolute last resort for the scheduler and it is only used if ++ * _every_ other avenue has been traveled. ++ **/ ++ + void cpuset_cpus_allowed_fallback(struct task_struct *tsk) + { + rcu_read_lock(); +- do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); ++ do_set_cpus_allowed(tsk, is_in_v2_mode() ? ++ task_cs(tsk)->cpus_allowed : cpu_possible_mask); + rcu_read_unlock(); + + /* +diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c +index 7c51f065b212..88754e9790f9 100644 +--- a/kernel/livepatch/core.c ++++ b/kernel/livepatch/core.c +@@ -30,6 +30,7 @@ + #include <linux/elf.h> + #include <linux/moduleloader.h> + #include <linux/completion.h> ++#include <linux/memory.h> + #include <asm/cacheflush.h> + #include "core.h" + #include "patch.h" +@@ -635,16 +636,21 @@ static int klp_init_object_loaded(struct klp_patch *patch, + struct klp_func *func; + int ret; + ++ mutex_lock(&text_mutex); ++ + module_disable_ro(patch->mod); + ret = klp_write_object_relocations(patch->mod, obj); + if (ret) { + module_enable_ro(patch->mod, true); ++ mutex_unlock(&text_mutex); + return ret; + } + + arch_klp_init_object_loaded(patch, obj); + module_enable_ro(patch->mod, true); + ++ mutex_unlock(&text_mutex); ++ + klp_for_each_func(obj, func) { + ret = klp_find_object_symbol(obj->name, func->old_name, + func->old_sympos, +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index a75959dc149e..09fb3f58a838 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -78,9 +78,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, + */ + static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) + { +- rcu_read_lock(); +- __ptrace_link(child, new_parent, __task_cred(new_parent)); +- rcu_read_unlock(); ++ __ptrace_link(child, new_parent, current_cred()); + } + + /** +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 3e92852c8b23..c4a0ad18c859 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -4280,10 +4280,13 @@ void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, + struct ftrace_func_entry *entry; + struct ftrace_func_map *map; + struct hlist_head *hhd; +- int size = 1 << mapper->hash.size_bits; +- int i; ++ int size, i; ++ ++ if (!mapper) ++ return; + + if (free_func && mapper->hash.count) { ++ size = 1 << mapper->hash.size_bits; + for (i = 0; i < size; i++) { + hhd = &mapper->hash.buckets[i]; + hlist_for_each_entry(entry, hhd, hlist) { +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 76267d82f157..91227e339ef6 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6394,11 +6394,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, + break; + } + #endif +- if (!tr->allocated_snapshot) { ++ if (tr->allocated_snapshot) ++ ret = resize_buffer_duplicate_size(&tr->max_buffer, ++ &tr->trace_buffer, iter->cpu_file); ++ else + ret = tracing_alloc_snapshot_instance(tr); +- if (ret < 0) +- break; +- } ++ if (ret < 0) ++ break; + local_irq_disable(); + /* Now, we're going to swap */ + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c +index 468fb7cd1221..edf345b7f06b 100644 +--- a/lib/mpi/mpi-pow.c ++++ b/lib/mpi/mpi-pow.c +@@ -37,6 +37,7 @@ + int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + { + mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; ++ struct karatsuba_ctx karactx = {}; + mpi_ptr_t xp_marker = NULL; + mpi_ptr_t tspace = NULL; + mpi_ptr_t rp, ep, mp, bp; +@@ -164,13 +165,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + int c; + mpi_limb_t e; + mpi_limb_t carry_limb; +- struct karatsuba_ctx karactx; + + xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); + if (!xp) + goto enomem; + +- memset(&karactx, 0, sizeof karactx); + negative_result = (ep[0] & 1) && base->sign; + + i = esize - 1; +@@ -295,8 +294,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + if (mod_shift_cnt) + mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); + MPN_NORMALIZE(rp, rsize); +- +- mpihelp_release_karatsuba_ctx(&karactx); + } + + if (negative_result && rsize) { +@@ -313,6 +310,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + leave: + rc = 0; + enomem: ++ mpihelp_release_karatsuba_ctx(&karactx); + if (assign_rp) + mpi_assign_limb_space(res, rp, size); + if (mp_marker) +diff --git a/mm/mlock.c b/mm/mlock.c +index 46af369c13e5..1f9ee86672e8 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -629,11 +629,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, + * is also counted. + * Return value: previously mlocked page counts + */ +-static int count_mm_mlocked_page_nr(struct mm_struct *mm, ++static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, + unsigned long start, size_t len) + { + struct vm_area_struct *vma; +- int count = 0; ++ unsigned long count = 0; + + if (mm == NULL) + mm = current->mm; +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 414c5cf87234..290078e8d4b1 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -3439,19 +3439,18 @@ out: + } + + /* +- * pgdat->kswapd_classzone_idx is the highest zone index that a recent +- * allocation request woke kswapd for. When kswapd has not woken recently, +- * the value is MAX_NR_ZONES which is not a valid index. This compares a +- * given classzone and returns it or the highest classzone index kswapd +- * was recently woke for. ++ * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be ++ * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not ++ * a valid index then either kswapd runs for first time or kswapd couldn't sleep ++ * after previous reclaim attempt (node is still unbalanced). In that case ++ * return the zone index of the previous kswapd reclaim cycle. + */ + static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, +- enum zone_type classzone_idx) ++ enum zone_type prev_classzone_idx) + { + if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) +- return classzone_idx; +- +- return max(pgdat->kswapd_classzone_idx, classzone_idx); ++ return prev_classzone_idx; ++ return pgdat->kswapd_classzone_idx; + } + + static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, +@@ -3592,7 +3591,7 @@ kswapd_try_sleep: + + /* Read the new order and classzone_idx */ + alloc_order = reclaim_order = pgdat->kswapd_order; +- classzone_idx = kswapd_classzone_idx(pgdat, 0); ++ classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); + pgdat->kswapd_order = 0; + pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + +@@ -3643,8 +3642,12 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) + if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) + return; + pgdat = zone->zone_pgdat; +- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, +- classzone_idx); ++ ++ if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) ++ pgdat->kswapd_classzone_idx = classzone_idx; ++ else ++ pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, ++ classzone_idx); + pgdat->kswapd_order = max(pgdat->kswapd_order, order); + if (!waitqueue_active(&pgdat->kswapd_wait)) + return; +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index e9698592cf98..0ee64f67300a 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -1352,7 +1352,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) + * actually encrypted before enforcing a key size. + */ + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || +- hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE); ++ hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); + } + + static void l2cap_do_start(struct l2cap_chan *chan) +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c +index 5caf8e722a11..51e6cf2dc277 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c +@@ -524,9 +524,14 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, + /* Save client advertised inbound read limit for use later in accept. */ + newxprt->sc_ord = param->initiator_depth; + +- /* Set the local and remote addresses in the transport */ + sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; + svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); ++ /* The remote port is arbitrary and not under the control of the ++ * client ULP. Set it to a fixed value so that the DRC continues ++ * to be effective after a reconnect. ++ */ ++ rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0); ++ + sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; + svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); + +diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh +index 98a7d63a723e..c4a9ddb174bc 100755 +--- a/scripts/decode_stacktrace.sh ++++ b/scripts/decode_stacktrace.sh +@@ -66,7 +66,7 @@ parse_symbol() { + if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then + local code=${cache[$module,$address]} + else +- local code=$(addr2line -i -e "$objfile" "$address") ++ local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address") + cache[$module,$address]=$code + fi + +diff --git a/sound/core/seq/oss/seq_oss_ioctl.c b/sound/core/seq/oss/seq_oss_ioctl.c +index 5b8520177b0e..7d72e3d48ad5 100644 +--- a/sound/core/seq/oss/seq_oss_ioctl.c ++++ b/sound/core/seq/oss/seq_oss_ioctl.c +@@ -62,7 +62,7 @@ static int snd_seq_oss_oob_user(struct seq_oss_devinfo *dp, void __user *arg) + if (copy_from_user(ev, arg, 8)) + return -EFAULT; + memset(&tmpev, 0, sizeof(tmpev)); +- snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.port, dp->addr.client); ++ snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.client, dp->addr.port); + tmpev.time.tick = 0; + if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) { + snd_seq_oss_dispatch(dp, &tmpev, 0, 0); +diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c +index 6a7b6aceeca9..499f3e8f4949 100644 +--- a/sound/core/seq/oss/seq_oss_rw.c ++++ b/sound/core/seq/oss/seq_oss_rw.c +@@ -174,7 +174,7 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt) + memset(&event, 0, sizeof(event)); + /* set dummy -- to be sure */ + event.type = SNDRV_SEQ_EVENT_NOTEOFF; +- snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); ++ snd_seq_oss_fill_addr(dp, &event, dp->addr.client, dp->addr.port); + + if (snd_seq_oss_process_event(dp, rec, &event)) + return 0; /* invalid event - no need to insert queue */ +diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c +index 23ccddb20de1..8fc554d745f6 100644 +--- a/sound/firewire/amdtp-am824.c ++++ b/sound/firewire/amdtp-am824.c +@@ -321,7 +321,7 @@ static void read_midi_messages(struct amdtp_stream *s, + u8 *b; + + for (f = 0; f < frames; f++) { +- port = (s->data_block_counter + f) % 8; ++ port = (8 - s->tx_first_dbc + s->data_block_counter + f) % 8; + b = (u8 *)&buffer[p->midi_position]; + + len = b[0] - 0x80; +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 6d06f5d029aa..3552b4b1f902 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6590,6 +6590,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), ++ SND_PCI_QUIRK(0x17aa, 0x3111, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), +diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c +index 6e8eb1f5a041..bed64723e5d9 100644 +--- a/sound/soc/codecs/cs4265.c ++++ b/sound/soc/codecs/cs4265.c +@@ -60,7 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = { + static bool cs4265_readable_register(struct device *dev, unsigned int reg) + { + switch (reg) { +- case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2: ++ case CS4265_CHIP_ID ... CS4265_MAX_REGISTER: + return true; + default: + return false; +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c +index cc66ea5cc776..3fe09828745a 100644 +--- a/sound/soc/codecs/max98090.c ++++ b/sound/soc/codecs/max98090.c +@@ -1924,6 +1924,21 @@ static int max98090_configure_dmic(struct max98090_priv *max98090, + return 0; + } + ++static int max98090_dai_startup(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct snd_soc_component *component = dai->component; ++ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component); ++ unsigned int fmt = max98090->dai_fmt; ++ ++ /* Remove 24-bit format support if it is not in right justified mode. */ ++ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_RIGHT_J) { ++ substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; ++ snd_pcm_hw_constraint_msbits(substream->runtime, 0, 16, 16); ++ } ++ return 0; ++} ++ + static int max98090_dai_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +@@ -2331,6 +2346,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect); + #define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) + + static const struct snd_soc_dai_ops max98090_dai_ops = { ++ .startup = max98090_dai_startup, + .set_sysclk = max98090_dai_set_sysclk, + .set_fmt = max98090_dai_set_fmt, + .set_tdm_slot = max98090_set_tdm_slot, +diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c +index cd048df76232..43086ac9ffec 100644 +--- a/sound/soc/codecs/rt274.c ++++ b/sound/soc/codecs/rt274.c +@@ -398,6 +398,8 @@ static int rt274_mic_detect(struct snd_soc_codec *codec, + { + struct rt274_priv *rt274 = snd_soc_codec_get_drvdata(codec); + ++ rt274->jack = jack; ++ + if (jack == NULL) { + /* Disable jack detection */ + regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL, +@@ -405,7 +407,6 @@ static int rt274_mic_detect(struct snd_soc_codec *codec, + + return 0; + } +- rt274->jack = jack; + + regmap_update_bits(rt274->regmap, RT274_EAPD_GPIO_IRQ_CTRL, + RT274_IRQ_EN, RT274_IRQ_EN); +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 584b7ffe78f5..052b6294a428 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2233,7 +2233,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) + + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND)) ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) && ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) + continue; + + dev_dbg(be->dev, "ASoC: prepare BE %s\n", +diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c +index b4af5ce78ecb..da0a2083e12a 100644 +--- a/sound/soc/sunxi/sun4i-i2s.c ++++ b/sound/soc/sunxi/sun4i-i2s.c +@@ -110,7 +110,7 @@ + + #define SUN8I_I2S_TX_CHAN_MAP_REG 0x44 + #define SUN8I_I2S_TX_CHAN_SEL_REG 0x34 +-#define SUN8I_I2S_TX_CHAN_OFFSET_MASK GENMASK(13, 11) ++#define SUN8I_I2S_TX_CHAN_OFFSET_MASK GENMASK(13, 12) + #define SUN8I_I2S_TX_CHAN_OFFSET(offset) (offset << 12) + #define SUN8I_I2S_TX_CHAN_EN_MASK GENMASK(11, 4) + #define SUN8I_I2S_TX_CHAN_EN(num_chan) (((1 << num_chan) - 1) << 4) +@@ -442,6 +442,10 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + regmap_update_bits(i2s->regmap, SUN8I_I2S_TX_CHAN_SEL_REG, + SUN8I_I2S_TX_CHAN_OFFSET_MASK, + SUN8I_I2S_TX_CHAN_OFFSET(offset)); ++ ++ regmap_update_bits(i2s->regmap, SUN8I_I2S_RX_CHAN_SEL_REG, ++ SUN8I_I2S_TX_CHAN_OFFSET_MASK, ++ SUN8I_I2S_TX_CHAN_OFFSET(offset)); + } + + regmap_field_write(i2s->field_fmt_mode, val); +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c +index b3854f8c0c67..f5614507a81c 100644 +--- a/sound/usb/line6/pcm.c ++++ b/sound/usb/line6/pcm.c +@@ -558,6 +558,11 @@ int line6_init_pcm(struct usb_line6 *line6, + line6pcm->max_packet_size_out = + usb_maxpacket(line6->usbdev, + usb_sndisocpipe(line6->usbdev, ep_write), 1); ++ if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) { ++ dev_err(line6pcm->line6->ifcdev, ++ "cannot get proper max packet size\n"); ++ return -EINVAL; ++ } + + spin_lock_init(&line6pcm->out.lock); + spin_lock_init(&line6pcm->in.lock); +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c +index e1e7ce9ab217..b54f7dab8372 100644 +--- a/sound/usb/mixer_quirks.c ++++ b/sound/usb/mixer_quirks.c +@@ -754,7 +754,7 @@ static int snd_ni_control_init_val(struct usb_mixer_interface *mixer, + return err; + } + +- kctl->private_value |= (value << 24); ++ kctl->private_value |= ((unsigned int)value << 24); + return 0; + } + +@@ -915,7 +915,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer, + if (err < 0) + return err; + +- kctl->private_value |= value[0] << 24; ++ kctl->private_value |= (unsigned int)value[0] << 24; + return 0; + } +
