commit: 040d55ca4b181afe5b6151820d220af1af18b017 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Wed Mar 16 13:33:04 2022 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Wed Mar 16 13:33:04 2022 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=040d55ca
Linux patch 5.10.106 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> 0000_README | 4 + 1105_linux-5.10.106.patch | 3053 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3057 insertions(+) diff --git a/0000_README b/0000_README index 6fbdd908..acb932f5 100644 --- a/0000_README +++ b/0000_README @@ -463,6 +463,10 @@ Patch: 1104_linux-5.10.105.patch From: http://www.kernel.org Desc: Linux 5.10.105 +Patch: 1105_linux-5.10.106.patch +From: http://www.kernel.org +Desc: Linux 5.10.106 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1105_linux-5.10.106.patch b/1105_linux-5.10.106.patch new file mode 100644 index 00000000..e5a68d3f --- /dev/null +++ b/1105_linux-5.10.106.patch @@ -0,0 +1,3053 @@ +diff --git a/Makefile b/Makefile +index ea665736db040..7b0dffadf6a89 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 105 ++SUBLEVEL = 106 + EXTRAVERSION = + NAME = Dare mighty things + +diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi +index 910eacc8ad3bd..a362714ae9fc0 100644 +--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi ++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi +@@ -118,7 +118,7 @@ + }; + + pinctrl_fwqspid_default: fwqspid_default { +- function = "FWQSPID"; ++ function = "FWSPID"; + groups = "FWQSPID"; + }; + +diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi +index 55ec83bde5a61..e46a3f4ad350a 100644 +--- a/arch/arm/boot/dts/bcm2711.dtsi ++++ b/arch/arm/boot/dts/bcm2711.dtsi +@@ -290,6 +290,7 @@ + + hvs: hvs@7e400000 { + compatible = "brcm,bcm2711-hvs"; ++ reg = <0x7e400000 0x8000>; + interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; + }; + +diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h +index d1fa5607d3aa3..85f9e538fb325 100644 +--- a/arch/arm/include/asm/spectre.h ++++ b/arch/arm/include/asm/spectre.h +@@ -25,7 +25,13 @@ enum { + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), + }; + ++#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES + void spectre_v2_update_state(unsigned int state, unsigned int methods); ++#else ++static inline void spectre_v2_update_state(unsigned int state, ++ unsigned int methods) ++{} ++#endif + + int spectre_bhb_update_vectors(unsigned int method); + +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S +index 3cbd35c82a66c..c3ebe3584103b 100644 +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -1043,9 +1043,9 @@ vector_bhb_loop8_\name: + + @ bhb workaround + mov r0, #8 +-1: b . + 4 ++3: b . + 4 + subs r0, r0, #1 +- bne 1b ++ bne 3b + dsb + isb + b 2b +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +index 2e437f20da39b..00e5dbf4b8236 100644 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +@@ -18,6 +18,7 @@ + + aliases { + spi0 = &spi0; ++ ethernet0 = ð0; + ethernet1 = ð1; + mmc0 = &sdhci0; + mmc1 = &sdhci1; +@@ -137,7 +138,9 @@ + /* + * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property + * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and +- * 2 size cells and also expects that the second range starts at 16 MB offset. If these ++ * 2 size cells and also expects that the second range starts at 16 MB offset. Also it ++ * expects that first range uses same address for PCI (child) and CPU (parent) cells (so ++ * no remapping) and that this address is the lowest from all specified ranges. If these + * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address + * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window + * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB. +@@ -146,6 +149,9 @@ + * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7 + * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf + * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33 ++ * Bug related to requirement of same child and parent addresses for first range is fixed ++ * in U-Boot version 2022.04 by following commit: ++ * https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17 + */ + #address-cells = <3>; + #size-cells = <2>; +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +index 2a2015a153627..0f4bcd15d8580 100644 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +@@ -495,7 +495,7 @@ + * (totaling 127 MiB) for MEM. + */ + ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */ +- 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ ++ 0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, + <0 0 0 2 &pcie_intc 1>, +diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c +index 104fba889cf76..c3310a68ac463 100644 +--- a/arch/riscv/kernel/module.c ++++ b/arch/riscv/kernel/module.c +@@ -13,6 +13,19 @@ + #include <linux/pgtable.h> + #include <asm/sections.h> + ++/* ++ * The auipc+jalr instruction pair can reach any PC-relative offset ++ * in the range [-2^31 - 2^11, 2^31 - 2^11) ++ */ ++static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) ++{ ++#ifdef CONFIG_32BIT ++ return true; ++#else ++ return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11)); ++#endif ++} ++ + static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) + { + if (v != (u32)v) { +@@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, + ptrdiff_t offset = (void *)v - (void *)location; + s32 hi20; + +- if (offset != (s32)offset) { ++ if (!riscv_insn_valid_32bit_offset(offset)) { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", + me->name, (long long)v, location); +@@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, + Elf_Addr v) + { + ptrdiff_t offset = (void *)v - (void *)location; +- s32 fill_v = offset; + u32 hi20, lo12; + +- if (offset != fill_v) { ++ if (!riscv_insn_valid_32bit_offset(offset)) { + /* Only emit the plt entry if offset over 32-bit range */ + if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { + offset = module_emit_plt_entry(me, v); +@@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, + Elf_Addr v) + { + ptrdiff_t offset = (void *)v - (void *)location; +- s32 fill_v = offset; + u32 hi20, lo12; + +- if (offset != fill_v) { ++ if (!riscv_insn_valid_32bit_offset(offset)) { + pr_err( + "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", + me->name, (long long)v, location); +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c +index 629c4994f1654..7f57110f958e1 100644 +--- a/arch/x86/kernel/e820.c ++++ b/arch/x86/kernel/e820.c +@@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt); + */ + void __init e820__reserve_setup_data(void) + { ++ struct setup_indirect *indirect; + struct setup_data *data; +- u64 pa_data; ++ u64 pa_data, pa_next; ++ u32 len; + + pa_data = boot_params.hdr.setup_data; + if (!pa_data) +@@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void) + + while (pa_data) { + data = early_memremap(pa_data, sizeof(*data)); ++ if (!data) { ++ pr_warn("e820: failed to memremap setup_data entry\n"); ++ return; ++ } ++ ++ len = sizeof(*data); ++ pa_next = data->next; ++ + e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + + /* +@@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void) + sizeof(*data) + data->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { +- e820__range_update(((struct setup_indirect *)data->data)->addr, +- ((struct setup_indirect *)data->data)->len, +- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); +- e820__range_update_kexec(((struct setup_indirect *)data->data)->addr, +- ((struct setup_indirect *)data->data)->len, +- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); ++ if (data->type == SETUP_INDIRECT) { ++ len += data->len; ++ early_memunmap(data, sizeof(*data)); ++ data = early_memremap(pa_data, len); ++ if (!data) { ++ pr_warn("e820: failed to memremap indirect setup_data\n"); ++ return; ++ } ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) { ++ e820__range_update(indirect->addr, indirect->len, ++ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); ++ e820__range_update_kexec(indirect->addr, indirect->len, ++ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); ++ } + } + +- pa_data = data->next; +- early_memunmap(data, sizeof(*data)); ++ pa_data = pa_next; ++ early_memunmap(data, len); + } + + e820__update_table(e820_table); +diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c +index 64b6da95af984..e2e89bebcbc32 100644 +--- a/arch/x86/kernel/kdebugfs.c ++++ b/arch/x86/kernel/kdebugfs.c +@@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no, + + static int __init create_setup_data_nodes(struct dentry *parent) + { ++ struct setup_indirect *indirect; + struct setup_data_node *node; + struct setup_data *data; +- int error; ++ u64 pa_data, pa_next; + struct dentry *d; +- u64 pa_data; ++ int error; ++ u32 len; + int no = 0; + + d = debugfs_create_dir("setup_data", parent); +@@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent) + error = -ENOMEM; + goto err_dir; + } +- +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { +- node->paddr = ((struct setup_indirect *)data->data)->addr; +- node->type = ((struct setup_indirect *)data->data)->type; +- node->len = ((struct setup_indirect *)data->data)->len; ++ pa_next = data->next; ++ ++ if (data->type == SETUP_INDIRECT) { ++ len = sizeof(*data) + data->len; ++ memunmap(data); ++ data = memremap(pa_data, len, MEMREMAP_WB); ++ if (!data) { ++ kfree(node); ++ error = -ENOMEM; ++ goto err_dir; ++ } ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) { ++ node->paddr = indirect->addr; ++ node->type = indirect->type; ++ node->len = indirect->len; ++ } else { ++ node->paddr = pa_data; ++ node->type = data->type; ++ node->len = data->len; ++ } + } else { + node->paddr = pa_data; + node->type = data->type; +@@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) + } + + create_setup_data_node(d, no, node); +- pa_data = data->next; ++ pa_data = pa_next; + + memunmap(data); + no++; +diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c +index d0a19121c6a4f..257892fcefa79 100644 +--- a/arch/x86/kernel/ksysfs.c ++++ b/arch/x86/kernel/ksysfs.c +@@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr) + + static int __init get_setup_data_size(int nr, size_t *size) + { +- int i = 0; ++ u64 pa_data = boot_params.hdr.setup_data, pa_next; ++ struct setup_indirect *indirect; + struct setup_data *data; +- u64 pa_data = boot_params.hdr.setup_data; ++ int i = 0; ++ u32 len; + + while (pa_data) { + data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); + if (!data) + return -ENOMEM; ++ pa_next = data->next; ++ + if (nr == i) { +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) +- *size = ((struct setup_indirect *)data->data)->len; +- else ++ if (data->type == SETUP_INDIRECT) { ++ len = sizeof(*data) + data->len; ++ memunmap(data); ++ data = memremap(pa_data, len, MEMREMAP_WB); ++ if (!data) ++ return -ENOMEM; ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) ++ *size = indirect->len; ++ else ++ *size = data->len; ++ } else { + *size = data->len; ++ } + + memunmap(data); + return 0; + } + +- pa_data = data->next; ++ pa_data = pa_next; + memunmap(data); + i++; + } +@@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size) + static ssize_t type_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) + { ++ struct setup_indirect *indirect; ++ struct setup_data *data; + int nr, ret; + u64 paddr; +- struct setup_data *data; ++ u32 len; + + ret = kobj_to_setup_data_nr(kobj, &nr); + if (ret) +@@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj, + if (!data) + return -ENOMEM; + +- if (data->type == SETUP_INDIRECT) +- ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type); +- else ++ if (data->type == SETUP_INDIRECT) { ++ len = sizeof(*data) + data->len; ++ memunmap(data); ++ data = memremap(paddr, len, MEMREMAP_WB); ++ if (!data) ++ return -ENOMEM; ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ ret = sprintf(buf, "0x%x\n", indirect->type); ++ } else { + ret = sprintf(buf, "0x%x\n", data->type); ++ } ++ + memunmap(data); + return ret; + } +@@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp, + char *buf, + loff_t off, size_t count) + { ++ struct setup_indirect *indirect; ++ struct setup_data *data; + int nr, ret = 0; + u64 paddr, len; +- struct setup_data *data; + void *p; + + ret = kobj_to_setup_data_nr(kobj, &nr); +@@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp, + if (!data) + return -ENOMEM; + +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { +- paddr = ((struct setup_indirect *)data->data)->addr; +- len = ((struct setup_indirect *)data->data)->len; ++ if (data->type == SETUP_INDIRECT) { ++ len = sizeof(*data) + data->len; ++ memunmap(data); ++ data = memremap(paddr, len, MEMREMAP_WB); ++ if (!data) ++ return -ENOMEM; ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) { ++ paddr = indirect->addr; ++ len = indirect->len; ++ } else { ++ /* ++ * Even though this is technically undefined, return ++ * the data as though it is a normal setup_data struct. ++ * This will at least allow it to be inspected. ++ */ ++ paddr += sizeof(*data); ++ len = data->len; ++ } + } else { + paddr += sizeof(*data); + len = data->len; +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index 28c89fce0dab8..065152d9265e4 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -371,21 +371,41 @@ static void __init parse_setup_data(void) + + static void __init memblock_x86_reserve_range_setup_data(void) + { ++ struct setup_indirect *indirect; + struct setup_data *data; +- u64 pa_data; ++ u64 pa_data, pa_next; ++ u32 len; + + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = early_memremap(pa_data, sizeof(*data)); ++ if (!data) { ++ pr_warn("setup: failed to memremap setup_data entry\n"); ++ return; ++ } ++ ++ len = sizeof(*data); ++ pa_next = data->next; ++ + memblock_reserve(pa_data, sizeof(*data) + data->len); + +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) +- memblock_reserve(((struct setup_indirect *)data->data)->addr, +- ((struct setup_indirect *)data->data)->len); ++ if (data->type == SETUP_INDIRECT) { ++ len += data->len; ++ early_memunmap(data, sizeof(*data)); ++ data = early_memremap(pa_data, len); ++ if (!data) { ++ pr_warn("setup: failed to memremap indirect setup_data\n"); ++ return; ++ } + +- pa_data = data->next; +- early_memunmap(data, sizeof(*data)); ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) ++ memblock_reserve(indirect->addr, indirect->len); ++ } ++ ++ pa_data = pa_next; ++ early_memunmap(data, len); + } + } + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 2d4ecd50e69b8..2a39a2df6f43e 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -651,6 +651,7 @@ static bool do_int3(struct pt_regs *regs) + + return res == NOTIFY_STOP; + } ++NOKPROBE_SYMBOL(do_int3); + + static void do_int3_user(struct pt_regs *regs) + { +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c +index 356b746dfbe7a..91e61dbba3e0c 100644 +--- a/arch/x86/mm/ioremap.c ++++ b/arch/x86/mm/ioremap.c +@@ -633,6 +633,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr, + static bool memremap_is_setup_data(resource_size_t phys_addr, + unsigned long size) + { ++ struct setup_indirect *indirect; + struct setup_data *data; + u64 paddr, paddr_next; + +@@ -645,6 +646,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, + + data = memremap(paddr, sizeof(*data), + MEMREMAP_WB | MEMREMAP_DEC); ++ if (!data) { ++ pr_warn("failed to memremap setup_data entry\n"); ++ return false; ++ } + + paddr_next = data->next; + len = data->len; +@@ -654,10 +659,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, + return true; + } + +- if (data->type == SETUP_INDIRECT && +- ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { +- paddr = ((struct setup_indirect *)data->data)->addr; +- len = ((struct setup_indirect *)data->data)->len; ++ if (data->type == SETUP_INDIRECT) { ++ memunmap(data); ++ data = memremap(paddr, sizeof(*data) + len, ++ MEMREMAP_WB | MEMREMAP_DEC); ++ if (!data) { ++ pr_warn("failed to memremap indirect setup_data\n"); ++ return false; ++ } ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) { ++ paddr = indirect->addr; ++ len = indirect->len; ++ } + } + + memunmap(data); +@@ -678,22 +694,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, + static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, + unsigned long size) + { ++ struct setup_indirect *indirect; + struct setup_data *data; + u64 paddr, paddr_next; + + paddr = boot_params.hdr.setup_data; + while (paddr) { +- unsigned int len; ++ unsigned int len, size; + + if (phys_addr == paddr) + return true; + + data = early_memremap_decrypted(paddr, sizeof(*data)); ++ if (!data) { ++ pr_warn("failed to early memremap setup_data entry\n"); ++ return false; ++ } ++ ++ size = sizeof(*data); + + paddr_next = data->next; + len = data->len; + +- early_memunmap(data, sizeof(*data)); ++ if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { ++ early_memunmap(data, sizeof(*data)); ++ return true; ++ } ++ ++ if (data->type == SETUP_INDIRECT) { ++ size += len; ++ early_memunmap(data, sizeof(*data)); ++ data = early_memremap_decrypted(paddr, size); ++ if (!data) { ++ pr_warn("failed to early memremap indirect setup_data\n"); ++ return false; ++ } ++ ++ indirect = (struct setup_indirect *)data->data; ++ ++ if (indirect->type != SETUP_INDIRECT) { ++ paddr = indirect->addr; ++ len = indirect->len; ++ } ++ } ++ ++ early_memunmap(data, size); + + if ((phys_addr > paddr) && (phys_addr < (paddr + len))) + return true; +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c +index 42acf9587ef38..a03390127741f 100644 +--- a/drivers/block/virtio_blk.c ++++ b/drivers/block/virtio_blk.c +@@ -869,9 +869,15 @@ static int virtblk_probe(struct virtio_device *vdev) + + virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, + &v); ++ ++ /* ++ * max_discard_seg == 0 is out of spec but we always ++ * handled it. ++ */ ++ if (!v) ++ v = sg_elems - 2; + blk_queue_max_discard_segments(q, +- min_not_zero(v, +- MAX_DISCARD_SEGMENTS)); ++ min(v, MAX_DISCARD_SEGMENTS)); + + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } +diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c +index 4ece326ea233e..cf23cfd7e4674 100644 +--- a/drivers/clk/qcom/gdsc.c ++++ b/drivers/clk/qcom/gdsc.c +@@ -1,6 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0-only + /* +- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. + */ + + #include <linux/bitops.h> +@@ -34,9 +34,14 @@ + #define CFG_GDSCR_OFFSET 0x4 + + /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ +-#define EN_REST_WAIT_VAL (0x2 << 20) +-#define EN_FEW_WAIT_VAL (0x8 << 16) +-#define CLK_DIS_WAIT_VAL (0x2 << 12) ++#define EN_REST_WAIT_VAL 0x2 ++#define EN_FEW_WAIT_VAL 0x8 ++#define CLK_DIS_WAIT_VAL 0x2 ++ ++/* Transition delay shifts */ ++#define EN_REST_WAIT_SHIFT 20 ++#define EN_FEW_WAIT_SHIFT 16 ++#define CLK_DIS_WAIT_SHIFT 12 + + #define RETAIN_MEM BIT(14) + #define RETAIN_PERIPH BIT(13) +@@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc) + */ + mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | + EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; +- val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; ++ ++ if (!sc->en_rest_wait_val) ++ sc->en_rest_wait_val = EN_REST_WAIT_VAL; ++ if (!sc->en_few_wait_val) ++ sc->en_few_wait_val = EN_FEW_WAIT_VAL; ++ if (!sc->clk_dis_wait_val) ++ sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; ++ ++ val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | ++ sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | ++ sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; ++ + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); + if (ret) + return ret; +diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h +index 5bb396b344d16..762f1b5e1ec51 100644 +--- a/drivers/clk/qcom/gdsc.h ++++ b/drivers/clk/qcom/gdsc.h +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0-only */ + /* +- * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. + */ + + #ifndef __QCOM_GDSC_H__ +@@ -22,6 +22,9 @@ struct reset_controller_dev; + * @cxcs: offsets of branch registers to toggle mem/periph bits in + * @cxc_count: number of @cxcs + * @pwrsts: Possible powerdomain power states ++ * @en_rest_wait_val: transition delay value for receiving enr ack signal ++ * @en_few_wait_val: transition delay value for receiving enf ack signal ++ * @clk_dis_wait_val: transition delay value for halting clock + * @resets: ids of resets associated with this gdsc + * @reset_count: number of @resets + * @rcdev: reset controller +@@ -35,6 +38,9 @@ struct gdsc { + unsigned int clamp_io_ctrl; + unsigned int *cxcs; + unsigned int cxc_count; ++ unsigned int en_rest_wait_val; ++ unsigned int en_few_wait_val; ++ unsigned int clk_dis_wait_val; + const u8 pwrsts; + /* Powerdomain allowable state bitfields */ + #define PWRSTS_OFF BIT(0) +diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c +index d885032cf814d..d918d2df4de2c 100644 +--- a/drivers/gpio/gpio-ts4900.c ++++ b/drivers/gpio/gpio-ts4900.c +@@ -1,7 +1,7 @@ + /* + * Digital I/O driver for Technologic Systems I2C FPGA Core + * +- * Copyright (C) 2015 Technologic Systems ++ * Copyright (C) 2015, 2018 Technologic Systems + * Copyright (C) 2016 Savoir-Faire Linux + * + * This program is free software; you can redistribute it and/or +@@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, + { + struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + +- /* +- * This will clear the output enable bit, the other bits are +- * dontcare when this is cleared ++ /* Only clear the OE bit here, requires a RMW. Prevents potential issue ++ * with OE and data getting to the physical pin at different times. + */ +- return regmap_write(priv->regmap, offset, 0); ++ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); + } + + static int ts4900_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) + { + struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); ++ unsigned int reg; + int ret; + ++ /* If changing from an input to an output, we need to first set the ++ * proper data bit to what is requested and then set OE bit. This ++ * prevents a glitch that can occur on the IO line ++ */ ++ regmap_read(priv->regmap, offset, ®); ++ if (!(reg & TS4900_GPIO_OE)) { ++ if (value) ++ reg = TS4900_GPIO_OUT; ++ else ++ reg &= ~TS4900_GPIO_OUT; ++ ++ regmap_write(priv->regmap, offset, reg); ++ } ++ + if (value) + ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | + TS4900_GPIO_OUT); +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index af5bb8fedfea7..00526fdd7691f 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -3215,6 +3215,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) + + return retirq; + } ++#ifdef CONFIG_GPIOLIB_IRQCHIP ++ if (gc->irq.chip) { ++ /* ++ * Avoid race condition with other code, which tries to lookup ++ * an IRQ before the irqchip has been properly registered, ++ * i.e. while gpiochip is still being brought up. ++ */ ++ return -EPROBE_DEFER; ++ } ++#endif + return -ENXIO; + } + EXPORT_SYMBOL_GPL(gpiod_to_irq); +diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h +index 7576b523fdbb1..b0178c045267c 100644 +--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h ++++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h +@@ -113,10 +113,10 @@ + /* format 13 is semi-planar YUV411 VUVU */ + #define SUN8I_MIXER_FBFMT_YUV411 14 + /* format 15 doesn't exist */ +-/* format 16 is P010 YVU */ +-#define SUN8I_MIXER_FBFMT_P010_YUV 17 +-/* format 18 is P210 YVU */ +-#define SUN8I_MIXER_FBFMT_P210_YUV 19 ++#define SUN8I_MIXER_FBFMT_P010_YUV 16 ++/* format 17 is P010 YVU */ ++#define SUN8I_MIXER_FBFMT_P210_YUV 18 ++/* format 19 is P210 YVU */ + /* format 20 is packed YVU444 10-bit */ + /* format 21 is packed YUV444 10-bit */ + +diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c +index 576518e704ee6..d57ec17670379 100644 +--- a/drivers/hid/hid-vivaldi.c ++++ b/drivers/hid/hid-vivaldi.c +@@ -143,7 +143,7 @@ out: + static int vivaldi_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) + { +- return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); ++ return devm_device_add_group(&hdev->dev, &input_attribute_group); + } + + static const struct hid_device_id vivaldi_table[] = { +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c +index b0e2820a2d578..71798fde2ef0c 100644 +--- a/drivers/hwmon/pmbus/pmbus_core.c ++++ b/drivers/hwmon/pmbus/pmbus_core.c +@@ -898,6 +898,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, + pmbus_update_sensor_data(client, s2); + + regval = status & mask; ++ if (regval) { ++ ret = pmbus_write_byte_data(client, page, reg, regval); ++ if (ret) ++ goto unlock; ++ } + if (s1 && s2) { + s64 v1, v2; + +diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c +index bd087cca1c1d2..af17459c1a5c0 100644 +--- a/drivers/isdn/hardware/mISDN/hfcpci.c ++++ b/drivers/isdn/hardware/mISDN/hfcpci.c +@@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) + } + /* Allocate memory for FIFOS */ + /* the memory needs to be on a 32k boundary within the first 4G */ +- dma_set_mask(&hc->pdev->dev, 0xFFFF8000); ++ if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { ++ printk(KERN_WARNING ++ "HFC-PCI: No usable DMA configuration!\n"); ++ return -EIO; ++ } + buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, + GFP_KERNEL); + /* We silently assume the address is okay if nonzero */ +diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c +index 40588692cec74..c3b2c99b5cd5c 100644 +--- a/drivers/isdn/mISDN/dsp_pipeline.c ++++ b/drivers/isdn/mISDN/dsp_pipeline.c +@@ -17,9 +17,6 @@ + #include "dsp.h" + #include "dsp_hwec.h" + +-/* uncomment for debugging */ +-/*#define PIPELINE_DEBUG*/ +- + struct dsp_pipeline_entry { + struct mISDN_dsp_element *elem; + void *p; +@@ -104,10 +101,6 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) + } + } + +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name); +-#endif +- + return 0; + + err2: +@@ -129,10 +122,6 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem) + list_for_each_entry_safe(entry, n, &dsp_elements, list) + if (entry->elem == elem) { + device_unregister(&entry->dev); +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: %s unregistered\n", +- __func__, elem->name); +-#endif + return; + } + printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name); +@@ -145,10 +134,6 @@ int dsp_pipeline_module_init(void) + if (IS_ERR(elements_class)) + return PTR_ERR(elements_class); + +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__); +-#endif +- + dsp_hwec_init(); + + return 0; +@@ -168,10 +153,6 @@ void dsp_pipeline_module_exit(void) + __func__, entry->elem->name); + kfree(entry); + } +- +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__); +-#endif + } + + int dsp_pipeline_init(struct dsp_pipeline *pipeline) +@@ -181,10 +162,6 @@ int dsp_pipeline_init(struct dsp_pipeline *pipeline) + + INIT_LIST_HEAD(&pipeline->list); + +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__); +-#endif +- + return 0; + } + +@@ -210,16 +187,12 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) + return; + + _dsp_pipeline_destroy(pipeline); +- +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__); +-#endif + } + + int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) + { +- int incomplete = 0, found = 0; +- char *dup, *tok, *name, *args; ++ int found = 0; ++ char *dup, *next, *tok, *name, *args; + struct dsp_element_entry *entry, *n; + struct dsp_pipeline_entry *pipeline_entry; + struct mISDN_dsp_element *elem; +@@ -230,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) + if (!list_empty(&pipeline->list)) + _dsp_pipeline_destroy(pipeline); + +- dup = kstrdup(cfg, GFP_ATOMIC); ++ dup = next = kstrdup(cfg, GFP_ATOMIC); + if (!dup) + return 0; +- while ((tok = strsep(&dup, "|"))) { ++ while ((tok = strsep(&next, "|"))) { + if (!strlen(tok)) + continue; + name = strsep(&tok, "("); +@@ -251,7 +224,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) + printk(KERN_ERR "%s: failed to add " + "entry to pipeline: %s (out of " + "memory)\n", __func__, elem->name); +- incomplete = 1; + goto _out; + } + pipeline_entry->elem = elem; +@@ -268,20 +240,12 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) + if (pipeline_entry->p) { + list_add_tail(&pipeline_entry-> + list, &pipeline->list); +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: created " +- "instance of %s%s%s\n", +- __func__, name, args ? +- " with args " : "", args ? +- args : ""); +-#endif + } else { + printk(KERN_ERR "%s: failed " + "to add entry to pipeline: " + "%s (new() returned NULL)\n", + __func__, elem->name); + kfree(pipeline_entry); +- incomplete = 1; + } + } + found = 1; +@@ -290,11 +254,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) + + if (found) + found = 0; +- else { ++ else + printk(KERN_ERR "%s: element not found, skipping: " + "%s\n", __func__, name); +- incomplete = 1; +- } + } + + _out: +@@ -303,10 +265,6 @@ _out: + else + pipeline->inuse = 0; + +-#ifdef PIPELINE_DEBUG +- printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n", +- __func__, incomplete ? " incomplete" : "", cfg); +-#endif + kfree(dup); + return 0; + } +diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c +index b274083a6e635..091e0e051d109 100644 +--- a/drivers/mmc/host/meson-gx-mmc.c ++++ b/drivers/mmc/host/meson-gx-mmc.c +@@ -173,6 +173,8 @@ struct meson_host { + int irq; + + bool vqmmc_enabled; ++ bool needs_pre_post_req; ++ + }; + + #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) +@@ -652,6 +654,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc, + struct meson_host *host = mmc_priv(mmc); + + host->cmd = NULL; ++ if (host->needs_pre_post_req) ++ meson_mmc_post_req(mmc, mrq, 0); + mmc_request_done(host->mmc, mrq); + } + +@@ -869,7 +873,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data + static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) + { + struct meson_host *host = mmc_priv(mmc); +- bool needs_pre_post_req = mrq->data && ++ host->needs_pre_post_req = mrq->data && + !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); + + /* +@@ -885,22 +889,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) + } + } + +- if (needs_pre_post_req) { ++ if (host->needs_pre_post_req) { + meson_mmc_get_transfer_mode(mmc, mrq); + if (!meson_mmc_desc_chain_mode(mrq->data)) +- needs_pre_post_req = false; ++ host->needs_pre_post_req = false; + } + +- if (needs_pre_post_req) ++ if (host->needs_pre_post_req) + meson_mmc_pre_req(mmc, mrq); + + /* Stop execution */ + writel(0, host->regs + SD_EMMC_START); + + meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); +- +- if (needs_pre_post_req) +- meson_mmc_post_req(mmc, mrq, 0); + } + + static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index 1f642fdbf214c..5ee8809bc2711 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -2342,7 +2342,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, + + phylink_set_port_modes(mask); + +- if (state->interface != PHY_INTERFACE_MODE_TRGMII || ++ if (state->interface != PHY_INTERFACE_MODE_TRGMII && + !phy_interface_mode_is_8023z(state->interface)) { + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +index e84ad587fb214..2c2a56d5a0a1a 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +@@ -41,6 +41,13 @@ + void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) + { + struct bcmgenet_priv *priv = netdev_priv(dev); ++ struct device *kdev = &priv->pdev->dev; ++ ++ if (!device_can_wakeup(kdev)) { ++ wol->supported = 0; ++ wol->wolopts = 0; ++ return; ++ } + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; + wol->wolopts = priv->wolopts; +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index 2af464ac250ac..f29ec765d684a 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -1448,7 +1448,14 @@ static int macb_poll(struct napi_struct *napi, int budget) + if (work_done < budget) { + napi_complete_done(napi, work_done); + +- /* Packets received while interrupts were disabled */ ++ /* RSR bits only seem to propagate to raise interrupts when ++ * interrupts are enabled at the time, so if bits are already ++ * set due to packets received while interrupts were disabled, ++ * they will not cause another interrupt to be generated when ++ * interrupts are re-enabled. ++ * Check for this case here. This has been seen to happen ++ * around 30% of the time under heavy network load. ++ */ + status = macb_readl(bp, RSR); + if (status) { + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) +@@ -1456,6 +1463,22 @@ static int macb_poll(struct napi_struct *napi, int budget) + napi_reschedule(napi); + } else { + queue_writel(queue, IER, bp->rx_intr_mask); ++ ++ /* In rare cases, packets could have been received in ++ * the window between the check above and re-enabling ++ * interrupts. Therefore, a double-check is required ++ * to avoid losing a wakeup. This can potentially race ++ * with the interrupt handler doing the same actions ++ * if an interrupt is raised just after enabling them, ++ * but this should be harmless. ++ */ ++ status = macb_readl(bp, RSR); ++ if (unlikely(status)) { ++ queue_writel(queue, IDR, bp->rx_intr_mask); ++ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) ++ queue_writel(queue, ISR, MACB_BIT(RCOMP)); ++ napi_schedule(napi); ++ } + } + } + +diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c +index cc7d4f93da540..799a1486f586d 100644 +--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c ++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c +@@ -1456,6 +1456,7 @@ static int gfar_get_ts_info(struct net_device *dev, + ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); + if (ptp_node) { + ptp_dev = of_find_device_by_node(ptp_node); ++ of_node_put(ptp_node); + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +index 1114a15a9ce3c..989d5c7263d7c 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +@@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) + vsi = pf->vsi[vf->lan_vsi_idx]; + dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", + vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); +- dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", +- vf->num_mdd_events, +- vf->num_invalid_msgs, +- vf->num_valid_msgs); ++ dev_info(&pf->pdev->dev, " num MDD=%lld\n", ++ vf->num_mdd_events); + } else { + dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); + } +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index f71b7334e2955..9181e007e0392 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -1864,19 +1864,17 @@ sriov_configure_out: + /***********************virtual channel routines******************/ + + /** +- * i40e_vc_send_msg_to_vf_ex ++ * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length +- * @is_quiet: true for not printing unsuccessful return values, false otherwise + * + * send msg to VF + **/ +-static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, +- u32 v_retval, u8 *msg, u16 msglen, +- bool is_quiet) ++static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, ++ u32 v_retval, u8 *msg, u16 msglen) + { + struct i40e_pf *pf; + struct i40e_hw *hw; +@@ -1891,25 +1889,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, + hw = &pf->hw; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + +- /* single place to detect unsuccessful return values */ +- if (v_retval && !is_quiet) { +- vf->num_invalid_msgs++; +- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", +- vf->vf_id, v_opcode, v_retval); +- if (vf->num_invalid_msgs > +- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { +- dev_err(&pf->pdev->dev, +- "Number of invalid messages exceeded for VF %d\n", +- vf->vf_id); +- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); +- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); +- } +- } else { +- vf->num_valid_msgs++; +- /* reset the invalid counter, if a valid message is received. */ +- vf->num_invalid_msgs = 0; +- } +- + aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { +@@ -1922,23 +1901,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, + return 0; + } + +-/** +- * i40e_vc_send_msg_to_vf +- * @vf: pointer to the VF info +- * @v_opcode: virtual channel opcode +- * @v_retval: virtual channel return value +- * @msg: pointer to the msg buffer +- * @msglen: msg length +- * +- * send msg to VF +- **/ +-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, +- u32 v_retval, u8 *msg, u16 msglen) +-{ +- return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, +- msg, msglen, false); +-} +- + /** + * i40e_vc_send_resp_to_vf + * @vf: pointer to the VF info +@@ -2759,7 +2721,6 @@ error_param: + * i40e_check_vf_permission + * @vf: pointer to the VF info + * @al: MAC address list from virtchnl +- * @is_quiet: set true for printing msg without opcode info, false otherwise + * + * Check that the given list of MAC addresses is allowed. Will return -EPERM + * if any address in the list is not valid. Checks the following conditions: +@@ -2774,15 +2735,13 @@ error_param: + * addresses might not be accurate. + **/ + static inline int i40e_check_vf_permission(struct i40e_vf *vf, +- struct virtchnl_ether_addr_list *al, +- bool *is_quiet) ++ struct virtchnl_ether_addr_list *al) + { + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; + int mac2add_cnt = 0; + int i; + +- *is_quiet = false; + for (i = 0; i < al->num_elements; i++) { + struct i40e_mac_filter *f; + u8 *addr = al->list[i].addr; +@@ -2806,7 +2765,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, + !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); +- *is_quiet = true; + return -EPERM; + } + +@@ -2843,7 +2801,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) + (struct virtchnl_ether_addr_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; +- bool is_quiet = false; + i40e_status ret = 0; + int i; + +@@ -2860,7 +2817,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) + */ + spin_lock_bh(&vsi->mac_filter_hash_lock); + +- ret = i40e_check_vf_permission(vf, al, &is_quiet); ++ ret = i40e_check_vf_permission(vf, al); + if (ret) { + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_param; +@@ -2898,8 +2855,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) + + error_param: + /* send the response to the VF */ +- return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, +- ret, NULL, 0, is_quiet); ++ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, ++ ret, NULL, 0); + } + + /** +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +index 03c42fd0fea19..a554d0a0b09bd 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +@@ -10,8 +10,6 @@ + + #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 + +-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 +- + #define I40E_VLAN_PRIORITY_SHIFT 13 + #define I40E_VLAN_MASK 0xFFF + #define I40E_PRIORITY_MASK 0xE000 +@@ -92,9 +90,6 @@ struct i40e_vf { + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ + u8 num_req_queues; /* num of requested qps */ + u64 num_mdd_events; /* num of mdd events detected */ +- /* num of continuous malformed or invalid msgs detected */ +- u64 num_invalid_msgs; +- u64 num_valid_msgs; /* num of valid msgs detected */ + + unsigned long vf_caps; /* vf's adv. capabilities */ + unsigned long vf_states; /* vf's runtime states */ +diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +index b06fbe99d8e93..b6dd8f81d6997 100644 +--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h ++++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +@@ -870,11 +870,11 @@ struct ice_aqc_get_phy_caps { + * 01b - Report topology capabilities + * 10b - Report SW configured + */ +-#define ICE_AQC_REPORT_MODE_S 1 +-#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) +-#define ICE_AQC_REPORT_NVM_CAP 0 +-#define ICE_AQC_REPORT_TOPO_CAP BIT(1) +-#define ICE_AQC_REPORT_SW_CFG BIT(2) ++#define ICE_AQC_REPORT_MODE_S 1 ++#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) ++#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 ++#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) ++#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c +index 2b0d0373ab2c6..ecdc467c4f6f5 100644 +--- a/drivers/net/ethernet/intel/ice/ice_common.c ++++ b/drivers/net/ethernet/intel/ice/ice_common.c +@@ -193,7 +193,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", + pcaps->module_type[2]); + +- if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { ++ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { + pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); + pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); + memcpy(pi->phy.link_info.module_type, &pcaps->module_type, +@@ -924,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) + + /* Initialize port_info struct with PHY capabilities */ + status = ice_aq_get_phy_caps(hw->port_info, false, +- ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); ++ ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, ++ NULL); + devm_kfree(ice_hw_to_dev(hw), pcaps); + if (status) + goto err_unroll_sched; +@@ -2682,7 +2683,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) + if (!pcaps) + return ICE_ERR_NO_MEMORY; + +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + + devm_kfree(ice_hw_to_dev(hw), pcaps); +@@ -2842,8 +2843,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) + return ICE_ERR_NO_MEMORY; + + /* Get the current PHY config */ +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, +- NULL); ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, ++ pcaps, NULL); + if (status) { + *aq_failures = ICE_SET_FC_AQ_FAIL_GET; + goto out; +@@ -2989,7 +2990,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + if (!pcaps) + return ICE_ERR_NO_MEMORY; + +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); + if (status) + goto out; +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c +index 14eba9bc174d8..421fc707f80af 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c +@@ -1081,7 +1081,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) + if (!caps) + return -ENOMEM; + +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + caps, NULL); + if (status) { + err = -EAGAIN; +@@ -1976,7 +1976,7 @@ ice_get_link_ksettings(struct net_device *netdev, + return -ENOMEM; + + status = ice_aq_get_phy_caps(vsi->port_info, false, +- ICE_AQC_REPORT_SW_CFG, caps, NULL); ++ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (status) { + err = -EIO; + goto done; +@@ -2013,7 +2013,7 @@ ice_get_link_ksettings(struct net_device *netdev, + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + + status = ice_aq_get_phy_caps(vsi->port_info, false, +- ICE_AQC_REPORT_TOPO_CAP, caps, NULL); ++ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); + if (status) { + err = -EIO; + goto done; +@@ -2187,12 +2187,12 @@ ice_set_link_ksettings(struct net_device *netdev, + { + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings safe_ks, copy_ks; +- struct ice_aqc_get_phy_caps_data *abilities; + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; +- u16 adv_link_speed, curr_link_speed, idx; ++ struct ice_aqc_get_phy_caps_data *phy_caps; + struct ice_aqc_set_phy_cfg_data config; ++ u16 adv_link_speed, curr_link_speed; + struct ice_pf *pf = np->vsi->back; +- struct ice_port_info *p; ++ struct ice_port_info *pi; + u8 autoneg_changed = 0; + enum ice_status status; + u64 phy_type_high = 0; +@@ -2200,33 +2200,25 @@ ice_set_link_ksettings(struct net_device *netdev, + int err = 0; + bool linkup; + +- p = np->vsi->port_info; ++ pi = np->vsi->port_info; + +- if (!p) ++ if (!pi) + return -EOPNOTSUPP; + +- /* Check if this is LAN VSI */ +- ice_for_each_vsi(pf, idx) +- if (pf->vsi[idx]->type == ICE_VSI_PF) { +- if (np->vsi != pf->vsi[idx]) +- return -EOPNOTSUPP; +- break; +- } +- +- if (p->phy.media_type != ICE_MEDIA_BASET && +- p->phy.media_type != ICE_MEDIA_FIBER && +- p->phy.media_type != ICE_MEDIA_BACKPLANE && +- p->phy.media_type != ICE_MEDIA_DA && +- p->phy.link_info.link_info & ICE_AQ_LINK_UP) ++ if (pi->phy.media_type != ICE_MEDIA_BASET && ++ pi->phy.media_type != ICE_MEDIA_FIBER && ++ pi->phy.media_type != ICE_MEDIA_BACKPLANE && ++ pi->phy.media_type != ICE_MEDIA_DA && ++ pi->phy.link_info.link_info & ICE_AQ_LINK_UP) + return -EOPNOTSUPP; + +- abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); +- if (!abilities) ++ phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); ++ if (!phy_caps) + return -ENOMEM; + + /* Get the PHY capabilities based on media */ +- status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, +- abilities, NULL); ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, ++ phy_caps, NULL); + if (status) { + err = -EAGAIN; + goto done; +@@ -2288,26 +2280,26 @@ ice_set_link_ksettings(struct net_device *netdev, + * configuration is initialized during probe from PHY capabilities + * software mode, and updated on set PHY configuration. + */ +- memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); ++ memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); + + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + /* Check autoneg */ +- err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, ++ err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, + netdev); + + if (err) + goto done; + + /* Call to get the current link speed */ +- p->phy.get_link_info = true; +- status = ice_get_link_status(p, &linkup); ++ pi->phy.get_link_info = true; ++ status = ice_get_link_status(pi, &linkup); + if (status) { + err = -EAGAIN; + goto done; + } + +- curr_link_speed = p->phy.link_info.link_speed; ++ curr_link_speed = pi->phy.curr_user_speed_req; + adv_link_speed = ice_ksettings_find_adv_link_speed(ks); + + /* If speed didn't get set, set it to what it currently is. +@@ -2326,7 +2318,7 @@ ice_set_link_ksettings(struct net_device *netdev, + } + + /* save the requested speeds */ +- p->phy.link_info.req_speeds = adv_link_speed; ++ pi->phy.link_info.req_speeds = adv_link_speed; + + /* set link and auto negotiation so changes take effect */ + config.caps |= ICE_AQ_PHY_ENA_LINK; +@@ -2342,9 +2334,9 @@ ice_set_link_ksettings(struct net_device *netdev, + * for set PHY configuration + */ + config.phy_type_high = cpu_to_le64(phy_type_high) & +- abilities->phy_type_high; ++ phy_caps->phy_type_high; + config.phy_type_low = cpu_to_le64(phy_type_low) & +- abilities->phy_type_low; ++ phy_caps->phy_type_low; + + if (!(config.phy_type_high || config.phy_type_low)) { + /* If there is no intersection and lenient mode is enabled, then +@@ -2364,7 +2356,7 @@ ice_set_link_ksettings(struct net_device *netdev, + } + + /* If link is up put link down */ +- if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { ++ if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ +@@ -2374,7 +2366,7 @@ ice_set_link_ksettings(struct net_device *netdev, + } + + /* make the aq call */ +- status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); ++ status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed,\n"); + err = -EAGAIN; +@@ -2382,9 +2374,9 @@ ice_set_link_ksettings(struct net_device *netdev, + } + + /* Save speed request */ +- p->phy.curr_user_speed_req = adv_link_speed; ++ pi->phy.curr_user_speed_req = adv_link_speed; + done: +- kfree(abilities); ++ kfree(phy_caps); + clear_bit(__ICE_CFG_BUSY, pf->state); + + return err; +@@ -2954,7 +2946,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) + return; + + /* Get current PHY config */ +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (status) + goto out; +@@ -3021,7 +3013,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) + return -ENOMEM; + + /* Get current PHY config */ +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (status) { + kfree(pcaps); +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c +index 6c75df216fa7a..20c9d55f3adce 100644 +--- a/drivers/net/ethernet/intel/ice/ice_main.c ++++ b/drivers/net/ethernet/intel/ice/ice_main.c +@@ -726,7 +726,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) + } + + status = ice_aq_get_phy_caps(vsi->port_info, false, +- ICE_AQC_REPORT_SW_CFG, caps, NULL); ++ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (status) + netdev_info(vsi->netdev, "Get phy capability failed.\n"); + +@@ -1645,7 +1645,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) + if (!pcaps) + return -ENOMEM; + +- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, ++ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", +@@ -1705,7 +1705,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) + if (!pcaps) + return -ENOMEM; + +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, + NULL); + + if (status) { +@@ -1821,7 +1821,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) + if (!pcaps) + return -ENOMEM; + +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); +@@ -1900,7 +1900,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) + return -ENOMEM; + + /* Get current PHY config */ +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", +@@ -1918,7 +1918,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) + + /* Use PHY topology as baseline for configuration */ + memset(pcaps, 0, sizeof(*pcaps)); +- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, ++ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +index 48511ad0e0c82..5134342ff70fc 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +@@ -1849,24 +1849,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + + dev = ice_pf_to_dev(pf); + +- /* single place to detect unsuccessful return values */ +- if (v_retval) { +- vf->num_inval_msgs++; +- dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, +- v_opcode, v_retval); +- if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { +- dev_err(dev, "Number of invalid messages exceeded for VF %d\n", +- vf->vf_id); +- dev_err(dev, "Use PF Control I/F to enable the VF\n"); +- set_bit(ICE_VF_STATE_DIS, vf->vf_states); +- return -EIO; +- } +- } else { +- vf->num_valid_msgs++; +- /* reset the invalid counter, if a valid message is received. */ +- vf->num_inval_msgs = 0; +- } +- + aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +index 59e5b4f16e965..d2e935c678a14 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +@@ -13,7 +13,6 @@ + #define ICE_MAX_MACADDR_PER_VF 18 + + /* Malicious Driver Detection */ +-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 + #define ICE_MDD_EVENTS_THRESHOLD 30 + + /* Static VF transaction/status register def */ +@@ -97,8 +96,6 @@ struct ice_vf { + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + +- u64 num_inval_msgs; /* number of continuous invalid msgs */ +- u64 num_valid_msgs; /* number of valid msgs detected */ + unsigned long vf_caps; /* VF's adv. capabilities */ + u8 num_req_qs; /* num of queue pairs requested by VF */ + u16 num_mac; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 6af0dd8471691..94426d29025eb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -130,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) + + static void cmd_free_index(struct mlx5_cmd *cmd, int idx) + { +- unsigned long flags; +- +- spin_lock_irqsave(&cmd->alloc_lock, flags); ++ lockdep_assert_held(&cmd->alloc_lock); + set_bit(idx, &cmd->bitmask); +- spin_unlock_irqrestore(&cmd->alloc_lock, flags); + } + + static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) +@@ -144,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) + + static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) + { ++ struct mlx5_cmd *cmd = ent->cmd; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&cmd->alloc_lock, flags); + if (!refcount_dec_and_test(&ent->refcnt)) +- return; ++ goto out; + + if (ent->idx >= 0) { +- struct mlx5_cmd *cmd = ent->cmd; +- + cmd_free_index(cmd, ent->idx); + up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + } + + cmd_free_ent(ent); ++out: ++ spin_unlock_irqrestore(&cmd->alloc_lock, flags); + } + + static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +index 0f0d250bbc150..c04413f449c50 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +@@ -123,6 +123,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + return; + } + ++ /* Handle multipath entry with lower priority value */ ++ if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) ++ return; ++ + /* Handle add/replace event */ + nhs = fib_info_num_path(fi); + if (nhs == 1) { +@@ -132,12 +136,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); + + if (i < 0) +- i = MLX5_LAG_NORMAL_AFFINITY; +- else +- ++i; ++ return; + ++ i++; + mlx5_lag_set_port_affinity(ldev, i); + } ++ ++ mp->mfi = fi; + return; + } + +diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c +index 9e098e40fb1c6..a9a9bf2e065a5 100644 +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -1468,6 +1468,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) + { + struct net_device *ndev = platform_get_drvdata(pdev); + struct netdata_local *pldat; ++ int ret; + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(ndev->irq); +@@ -1477,7 +1478,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) + pldat = netdev_priv(ndev); + + /* Enable interface clock */ +- clk_enable(pldat->clk); ++ ret = clk_enable(pldat->clk); ++ if (ret) ++ return ret; + + /* Reset and initialize */ + __lpc_eth_reset(pldat); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +index b8dc5c4591ef5..ef0ad4cf82e60 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c +@@ -3778,11 +3778,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) + return found; + } + +-static void qed_iov_get_link(struct qed_hwfn *p_hwfn, +- u16 vfid, +- struct qed_mcp_link_params *p_params, +- struct qed_mcp_link_state *p_link, +- struct qed_mcp_link_capabilities *p_caps) ++static int qed_iov_get_link(struct qed_hwfn *p_hwfn, ++ u16 vfid, ++ struct qed_mcp_link_params *p_params, ++ struct qed_mcp_link_state *p_link, ++ struct qed_mcp_link_capabilities *p_caps) + { + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, +@@ -3790,7 +3790,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) +- return; ++ return -EINVAL; + + p_bulletin = p_vf->bulletin.p_virt; + +@@ -3800,6 +3800,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, + __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); + if (p_caps) + __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); ++ return 0; + } + + static int +@@ -4658,6 +4659,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, + struct qed_public_vf_info *vf_info; + struct qed_mcp_link_state link; + u32 tx_rate; ++ int ret; + + /* Sanitize request */ + if (IS_VF(cdev)) +@@ -4671,7 +4673,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + +- qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); ++ ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); ++ if (ret) ++ return ret; + + /* Fill information about VF */ + ivi->vf = vf_id; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index 72a38d53d33f6..e2a5a6a373cbe 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) + p_iov->bulletin.size, + &p_iov->bulletin.phys, + GFP_KERNEL); ++ if (!p_iov->bulletin.p_virt) ++ goto free_pf2vf_reply; ++ + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", + p_iov->bulletin.p_virt, +@@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) + + return rc; + ++free_pf2vf_reply: ++ dma_free_coherent(&p_hwfn->cdev->pdev->dev, ++ sizeof(union pfvf_tlvs), ++ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); + free_vf2pf_request: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), +diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c +index 43222a34cba06..f9514518700eb 100644 +--- a/drivers/net/ethernet/ti/cpts.c ++++ b/drivers/net/ethernet/ti/cpts.c +@@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) + for (i = 0; i < CPTS_MAX_EVENTS; i++) + list_add(&cpts->pool_data[i].list, &cpts->pool); + +- clk_enable(cpts->refclk); ++ err = clk_enable(cpts->refclk); ++ if (err) ++ return err; + + cpts_write32(cpts, CPTS_EN, control); + cpts_write32(cpts, TS_PEND_EN, int_enable); +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +index 962831cdde4db..4bd44fbc6ecfa 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +@@ -1187,7 +1187,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) + if (rc) { + dev_err(dev, + "Cannot register network device, aborting\n"); +- goto error; ++ goto put_node; + } + + dev_info(dev, +@@ -1195,6 +1195,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) + (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); + return 0; + ++put_node: ++ of_node_put(lp->phy_node); + error: + free_netdev(ndev); + return rc; +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c +index 7bf43031cea8c..3d75b98f3051d 100644 +--- a/drivers/net/phy/dp83822.c ++++ b/drivers/net/phy/dp83822.c +@@ -289,7 +289,7 @@ static int dp83822_config_intr(struct phy_device *phydev) + if (err < 0) + return err; + +- err = phy_write(phydev, MII_DP83822_MISR1, 0); ++ err = phy_write(phydev, MII_DP83822_MISR2, 0); + if (err < 0) + return err; + +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c +index 94d19158efc18..ca261e0fc9c9b 100644 +--- a/drivers/net/xen-netback/xenbus.c ++++ b/drivers/net/xen-netback/xenbus.c +@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) + unsigned int queue_index; + + xen_unregister_watchers(vif); ++ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); + #ifdef CONFIG_DEBUG_FS + xenvif_debugfs_delif(vif); + #endif /* CONFIG_DEBUG_FS */ +@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, + + /* Not interested in this watch anymore. */ + unregister_hotplug_status_watch(be); +- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); + } + kfree(str); + } +@@ -824,15 +824,11 @@ static void connect(struct backend_info *be) + xenvif_carrier_on(be->vif); + + unregister_hotplug_status_watch(be); +- if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, +- NULL, hotplug_status_changed, +- "%s/%s", dev->nodename, +- "hotplug-status"); +- if (err) +- goto err; ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, ++ hotplug_status_changed, ++ "%s/%s", dev->nodename, "hotplug-status"); ++ if (!err) + be->have_hotplug_status_watch = 1; +- } + + netif_tx_wake_all_queues(be->vif->dev); + +diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c +index 1caebefb25ff1..2ae1474faede9 100644 +--- a/drivers/nfc/port100.c ++++ b/drivers/nfc/port100.c +@@ -1609,7 +1609,9 @@ free_nfc_dev: + nfc_digital_free_device(dev->nfc_digital_dev); + + error: ++ usb_kill_urb(dev->in_urb); + usb_free_urb(dev->in_urb); ++ usb_kill_urb(dev->out_urb); + usb_free_urb(dev->out_urb); + usb_put_dev(dev->udev); + +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c +index 624273d0e727f..a9f97023d5a00 100644 +--- a/drivers/spi/spi-rockchip.c ++++ b/drivers/spi/spi-rockchip.c +@@ -567,6 +567,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) + { + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + ++ if (atomic_read(&rs->state) & RXDMA) ++ dmaengine_terminate_sync(ctlr->dma_rx); ++ if (atomic_read(&rs->state) & TXDMA) ++ dmaengine_terminate_sync(ctlr->dma_tx); ++ atomic_set(&rs->state, 0); ++ spi_enable_chip(rs, false); + rs->slave_abort = true; + complete(&ctlr->xfer_completion); + +@@ -636,7 +642,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) + struct spi_controller *ctlr; + struct resource *mem; + struct device_node *np = pdev->dev.of_node; +- u32 rsd_nsecs; ++ u32 rsd_nsecs, num_cs; + bool slave_mode; + + slave_mode = of_property_read_bool(np, "spi-slave"); +@@ -744,8 +750,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) + * rk spi0 has two native cs, spi1..5 one cs only + * if num-cs is missing in the dts, default to 1 + */ +- if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) +- ctlr->num_chipselect = 1; ++ if (of_property_read_u32(np, "num-cs", &num_cs)) ++ num_cs = 1; ++ ctlr->num_chipselect = num_cs; + ctlr->use_gpio_descriptors = true; + } + ctlr->dev.of_node = pdev->dev.of_node; +diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c +index bd5f874334043..de30262c3fae0 100644 +--- a/drivers/staging/gdm724x/gdm_lte.c ++++ b/drivers/staging/gdm724x/gdm_lte.c +@@ -76,14 +76,15 @@ static void tx_complete(void *arg) + + static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) + { +- int ret; ++ int ret, len; + ++ len = skb->len + ETH_HLEN; + ret = netif_rx_ni(skb); + if (ret == NET_RX_DROP) { + nic->stats.rx_dropped++; + } else { + nic->stats.rx_packets++; +- nic->stats.rx_bytes += skb->len + ETH_HLEN; ++ nic->stats.rx_bytes += len; + } + + return 0; +diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +index 4df6d04315e39..b912ad2f4b720 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c ++++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +@@ -6679,6 +6679,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) + struct sta_info *psta_bmc; + struct list_head *xmitframe_plist, *xmitframe_phead; + struct xmit_frame *pxmitframe = NULL; ++ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; + struct sta_priv *pstapriv = &padapter->stapriv; + + /* for BC/MC Frames */ +@@ -6689,7 +6690,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) + if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { + msleep(10);/* 10ms, ATIM(HIQ) Windows */ + +- spin_lock_bh(&psta_bmc->sleep_q.lock); ++ /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ ++ spin_lock_bh(&pxmitpriv->lock); + + xmitframe_phead = get_list_head(&psta_bmc->sleep_q); + xmitframe_plist = get_next(xmitframe_phead); +@@ -6715,7 +6717,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) + rtw_hal_xmitframe_enqueue(padapter, pxmitframe); + } + +- spin_unlock_bh(&psta_bmc->sleep_q.lock); ++ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ ++ spin_unlock_bh(&pxmitpriv->lock); + + /* check hi queue and bmc_sleepq */ + rtw_chk_hi_queue_cmd(padapter); +diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c +index 0d47e6e121777..6979f8dbccb84 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_recv.c ++++ b/drivers/staging/rtl8723bs/core/rtw_recv.c +@@ -1144,8 +1144,10 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ + if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { + struct list_head *xmitframe_plist, *xmitframe_phead; + struct xmit_frame *pxmitframe = NULL; ++ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; + +- spin_lock_bh(&psta->sleep_q.lock); ++ /* spin_lock_bh(&psta->sleep_q.lock); */ ++ spin_lock_bh(&pxmitpriv->lock); + + xmitframe_phead = get_list_head(&psta->sleep_q); + xmitframe_plist = get_next(xmitframe_phead); +@@ -1180,10 +1182,12 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ + update_beacon(padapter, _TIM_IE_, NULL, true); + } + +- spin_unlock_bh(&psta->sleep_q.lock); ++ /* spin_unlock_bh(&psta->sleep_q.lock); */ ++ spin_unlock_bh(&pxmitpriv->lock); + + } else { +- spin_unlock_bh(&psta->sleep_q.lock); ++ /* spin_unlock_bh(&psta->sleep_q.lock); */ ++ spin_unlock_bh(&pxmitpriv->lock); + + /* DBG_871X("no buffered packets to xmit\n"); */ + if (pstapriv->tim_bitmap&BIT(psta->aid)) { +diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +index b1784b4e466f3..e3f56c6cc882e 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c ++++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +@@ -330,48 +330,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) + + /* list_del_init(&psta->wakeup_list); */ + +- spin_lock_bh(&psta->sleep_q.lock); ++ spin_lock_bh(&pxmitpriv->lock); ++ + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); + psta->sleepq_len = 0; +- spin_unlock_bh(&psta->sleep_q.lock); +- +- spin_lock_bh(&pxmitpriv->lock); + + /* vo */ +- spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); ++ /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); + list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); + phwxmit = pxmitpriv->hwxmits; + phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; + pstaxmitpriv->vo_q.qcnt = 0; +- spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); ++ /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ + + /* vi */ +- spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); ++ /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); + list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); + phwxmit = pxmitpriv->hwxmits+1; + phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; + pstaxmitpriv->vi_q.qcnt = 0; +- spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); ++ /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ + + /* be */ +- spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); ++ /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); + list_del_init(&(pstaxmitpriv->be_q.tx_pending)); + phwxmit = pxmitpriv->hwxmits+2; + phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; + pstaxmitpriv->be_q.qcnt = 0; +- spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); ++ /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ + + /* bk */ +- spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); ++ /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ + rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); + list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); + phwxmit = pxmitpriv->hwxmits+3; + phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; + pstaxmitpriv->bk_q.qcnt = 0; +- spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); ++ /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ + + spin_unlock_bh(&pxmitpriv->lock); + +diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c +index d78cff7ed6a01..6ecaff9728fd4 100644 +--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c ++++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c +@@ -1871,6 +1871,8 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram + struct list_head *plist, *phead; + struct xmit_frame *pxmitframe; + ++ spin_lock_bh(&pframequeue->lock); ++ + phead = get_list_head(pframequeue); + plist = get_next(phead); + +@@ -1881,6 +1883,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram + + rtw_free_xmitframe(pxmitpriv, pxmitframe); + } ++ spin_unlock_bh(&pframequeue->lock); + } + + s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) +@@ -1943,7 +1946,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) + struct sta_info *psta; + struct tx_servq *ptxservq; + struct pkt_attrib *pattrib = &pxmitframe->attrib; +- struct xmit_priv *xmit_priv = &padapter->xmitpriv; + struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; + sint res = _SUCCESS; + +@@ -1972,14 +1974,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) + + ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); + +- spin_lock_bh(&xmit_priv->lock); + if (list_empty(&ptxservq->tx_pending)) + list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); + + list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); + ptxservq->qcnt++; + phwxmits[ac_index].accnt++; +- spin_unlock_bh(&xmit_priv->lock); + + exit: + +@@ -2397,10 +2397,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) + struct list_head *xmitframe_plist, *xmitframe_phead; + struct xmit_frame *pxmitframe = NULL; + struct sta_priv *pstapriv = &padapter->stapriv; ++ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; + + psta_bmc = rtw_get_bcmc_stainfo(padapter); + +- spin_lock_bh(&psta->sleep_q.lock); ++ spin_lock_bh(&pxmitpriv->lock); + + xmitframe_phead = get_list_head(&psta->sleep_q); + xmitframe_plist = get_next(xmitframe_phead); +@@ -2508,7 +2509,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) + + _exit: + +- spin_unlock_bh(&psta->sleep_q.lock); ++ spin_unlock_bh(&pxmitpriv->lock); + + if (update_mask) + update_beacon(padapter, _TIM_IE_, NULL, true); +@@ -2520,8 +2521,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst + struct list_head *xmitframe_plist, *xmitframe_phead; + struct xmit_frame *pxmitframe = NULL; + struct sta_priv *pstapriv = &padapter->stapriv; ++ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; + +- spin_lock_bh(&psta->sleep_q.lock); ++ spin_lock_bh(&pxmitpriv->lock); + + xmitframe_phead = get_list_head(&psta->sleep_q); + xmitframe_plist = get_next(xmitframe_phead); +@@ -2577,7 +2579,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst + } + } + +- spin_unlock_bh(&psta->sleep_q.lock); ++ spin_unlock_bh(&pxmitpriv->lock); + } + + void enqueue_pending_xmitbuf( +diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +index ce5bf2861d0c1..44799c4a9f35b 100644 +--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c ++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +@@ -572,7 +572,9 @@ s32 rtl8723bs_hal_xmit( + rtw_issue_addbareq_cmd(padapter, pxmitframe); + } + ++ spin_lock_bh(&pxmitpriv->lock); + err = rtw_xmitframe_enqueue(padapter, pxmitframe); ++ spin_unlock_bh(&pxmitpriv->lock); + if (err != _SUCCESS) { + RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n")); + rtw_free_xmitframe(pxmitpriv, pxmitframe); +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c +index 5c53098755a35..441bc057896f5 100644 +--- a/drivers/virtio/virtio.c ++++ b/drivers/virtio/virtio.c +@@ -167,14 +167,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) + } + EXPORT_SYMBOL_GPL(virtio_add_status); + +-int virtio_finalize_features(struct virtio_device *dev) ++/* Do some validation, then set FEATURES_OK */ ++static int virtio_features_ok(struct virtio_device *dev) + { +- int ret = dev->config->finalize_features(dev); + unsigned status; ++ int ret; + + might_sleep(); +- if (ret) +- return ret; + + ret = arch_has_restricted_virtio_memory_access(); + if (ret) { +@@ -203,7 +202,6 @@ int virtio_finalize_features(struct virtio_device *dev) + } + return 0; + } +-EXPORT_SYMBOL_GPL(virtio_finalize_features); + + static int virtio_dev_probe(struct device *_d) + { +@@ -240,17 +238,6 @@ static int virtio_dev_probe(struct device *_d) + driver_features_legacy = driver_features; + } + +- /* +- * Some devices detect legacy solely via F_VERSION_1. Write +- * F_VERSION_1 to force LE config space accesses before FEATURES_OK for +- * these when needed. +- */ +- if (drv->validate && !virtio_legacy_is_little_endian() +- && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { +- dev->features = BIT_ULL(VIRTIO_F_VERSION_1); +- dev->config->finalize_features(dev); +- } +- + if (device_features & (1ULL << VIRTIO_F_VERSION_1)) + dev->features = driver_features & device_features; + else +@@ -261,13 +248,26 @@ static int virtio_dev_probe(struct device *_d) + if (device_features & (1ULL << i)) + __virtio_set_bit(dev, i); + ++ err = dev->config->finalize_features(dev); ++ if (err) ++ goto err; ++ + if (drv->validate) { ++ u64 features = dev->features; ++ + err = drv->validate(dev); + if (err) + goto err; ++ ++ /* Did validation change any features? Then write them again. */ ++ if (features != dev->features) { ++ err = dev->config->finalize_features(dev); ++ if (err) ++ goto err; ++ } + } + +- err = virtio_finalize_features(dev); ++ err = virtio_features_ok(dev); + if (err) + goto err; + +@@ -438,7 +438,11 @@ int virtio_device_restore(struct virtio_device *dev) + /* We have a driver! */ + virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); + +- ret = virtio_finalize_features(dev); ++ ret = dev->config->finalize_features(dev); ++ if (ret) ++ goto err; ++ ++ ret = virtio_features_ok(dev); + if (ret) + goto err; + +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 928700d57eb67..6513079c728be 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -74,6 +74,11 @@ int ext4_resize_begin(struct super_block *sb) + return -EPERM; + } + ++ if (ext4_has_feature_sparse_super2(sb)) { ++ ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); ++ return -EOPNOTSUPP; ++ } ++ + if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, + &EXT4_SB(sb)->s_ext4_flags)) + ret = -EBUSY; +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index d100b5dfedbd2..8ac91ba05d6de 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -945,7 +945,17 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, + + while (count) { + if (cs->write && cs->pipebufs && page) { +- return fuse_ref_page(cs, page, offset, count); ++ /* ++ * Can't control lifetime of pipe buffers, so always ++ * copy user pages. ++ */ ++ if (cs->req->args->user_pages) { ++ err = fuse_copy_fill(cs); ++ if (err) ++ return err; ++ } else { ++ return fuse_ref_page(cs, page, offset, count); ++ } + } else if (!cs->len) { + if (cs->move_pages && page && + offset == 0 && count == PAGE_SIZE) { +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index e81d1c3eb7e11..d1bc96ee6eb3d 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -1418,6 +1418,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, + (PAGE_SIZE - ret) & (PAGE_SIZE - 1); + } + ++ ap->args.user_pages = true; + if (write) + ap->args.in_pages = true; + else +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index b159d8b5e8937..b10cddd723559 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -263,6 +263,7 @@ struct fuse_args { + bool nocreds:1; + bool in_pages:1; + bool out_pages:1; ++ bool user_pages:1; + bool out_argvar:1; + bool page_zeroing:1; + bool page_replace:1; +diff --git a/fs/pipe.c b/fs/pipe.c +index d6d4019ba32f5..9f2ca1b1c17ac 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -252,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) + */ + was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); + for (;;) { +- unsigned int head = pipe->head; ++ /* Read ->head with a barrier vs post_one_notification() */ ++ unsigned int head = smp_load_acquire(&pipe->head); + unsigned int tail = pipe->tail; + unsigned int mask = pipe->ring_size - 1; + +@@ -830,10 +831,8 @@ void free_pipe_info(struct pipe_inode_info *pipe) + int i; + + #ifdef CONFIG_WATCH_QUEUE +- if (pipe->watch_queue) { ++ if (pipe->watch_queue) + watch_queue_clear(pipe->watch_queue); +- put_watch_queue(pipe->watch_queue); +- } + #endif + + (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); +@@ -843,6 +842,10 @@ void free_pipe_info(struct pipe_inode_info *pipe) + if (buf->ops) + pipe_buf_release(pipe, buf); + } ++#ifdef CONFIG_WATCH_QUEUE ++ if (pipe->watch_queue) ++ put_watch_queue(pipe->watch_queue); ++#endif + if (pipe->tmp_page) + __free_page(pipe->tmp_page); + kfree(pipe->bufs); +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index f5e829e12a76d..eba1f1cbc9fbd 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -9307,8 +9307,8 @@ struct mlx5_ifc_bufferx_reg_bits { + u8 reserved_at_0[0x6]; + u8 lossy[0x1]; + u8 epsb[0x1]; +- u8 reserved_at_8[0xc]; +- u8 size[0xc]; ++ u8 reserved_at_8[0x8]; ++ u8 size[0x10]; + + u8 xoff_threshold[0x10]; + u8 xon_threshold[0x10]; +diff --git a/include/linux/virtio.h b/include/linux/virtio.h +index 8ecc2e208d613..90c5ad5568097 100644 +--- a/include/linux/virtio.h ++++ b/include/linux/virtio.h +@@ -135,7 +135,6 @@ void virtio_break_device(struct virtio_device *dev); + void virtio_config_changed(struct virtio_device *dev); + void virtio_config_disable(struct virtio_device *dev); + void virtio_config_enable(struct virtio_device *dev); +-int virtio_finalize_features(struct virtio_device *dev); + #ifdef CONFIG_PM_SLEEP + int virtio_device_freeze(struct virtio_device *dev); + int virtio_device_restore(struct virtio_device *dev); +diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h +index 8519b3ae5d52e..b341dd62aa4da 100644 +--- a/include/linux/virtio_config.h ++++ b/include/linux/virtio_config.h +@@ -62,8 +62,9 @@ struct virtio_shm_region { + * Returns the first 64 feature bits (all we currently need). + * @finalize_features: confirm what device features we'll be using. + * vdev: the virtio_device +- * This gives the final feature bits for the device: it can change ++ * This sends the driver feature bits to the device: it can change + * the dev->feature bits if it wants. ++ * Note: despite the name this can be called any number of times. + * Returns 0 on success or error status + * @bus_name: return the bus name associated with the device (optional) + * vdev: the virtio_device +diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h +index c994d1b2cdbaa..3b9a40ae8bdba 100644 +--- a/include/linux/watch_queue.h ++++ b/include/linux/watch_queue.h +@@ -28,7 +28,8 @@ struct watch_type_filter { + struct watch_filter { + union { + struct rcu_head rcu; +- unsigned long type_filter[2]; /* Bitmask of accepted types */ ++ /* Bitmask of accepted types */ ++ DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); + }; + u32 nr_filters; /* Number of filters */ + struct watch_type_filter filters[]; +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 71ed0616d83bd..953dd9568dd74 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1490,10 +1490,12 @@ static int __init set_buf_size(char *str) + if (!str) + return 0; + buf_size = memparse(str, &str); +- /* nr_entries can not be zero */ +- if (buf_size == 0) +- return 0; +- trace_buf_size = buf_size; ++ /* ++ * nr_entries can not be zero and the startup ++ * tests require some buffer space. Therefore ++ * ensure we have at least 4096 bytes of buffer. ++ */ ++ trace_buf_size = max(4096UL, buf_size); + return 1; + } + __setup("trace_buf_size=", set_buf_size); +diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c +index 0ef8f65bd2d71..e3f144d960261 100644 +--- a/kernel/watch_queue.c ++++ b/kernel/watch_queue.c +@@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, + bit += page->index; + + set_bit(bit, wqueue->notes_bitmap); ++ generic_pipe_buf_release(pipe, buf); + } + + // No try_steal function => no stealing +@@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue, + buf->offset = offset; + buf->len = len; + buf->flags = PIPE_BUF_FLAG_WHOLE; +- pipe->head = head + 1; ++ smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ + + if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { + spin_unlock_irq(&pipe->rd_wait.lock); +@@ -243,7 +244,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) + goto error; + } + +- ret = pipe_resize_ring(pipe, nr_notes); ++ nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; ++ ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); + if (ret < 0) + goto error; + +@@ -268,7 +270,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) + wqueue->notes = pages; + wqueue->notes_bitmap = bitmap; + wqueue->nr_pages = nr_pages; +- wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; ++ wqueue->nr_notes = nr_notes; + return 0; + + error_p: +@@ -320,7 +322,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, + tf[i].info_mask & WATCH_INFO_LENGTH) + goto err_filter; + /* Ignore any unknown types */ +- if (tf[i].type >= sizeof(wfilter->type_filter) * 8) ++ if (tf[i].type >= WATCH_TYPE__NR) + continue; + nr_filter++; + } +@@ -336,7 +338,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, + + q = wfilter->filters; + for (i = 0; i < filter.nr_filters; i++) { +- if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) ++ if (tf[i].type >= WATCH_TYPE__NR) + continue; + + q->type = tf[i].type; +@@ -371,6 +373,7 @@ static void __put_watch_queue(struct kref *kref) + + for (i = 0; i < wqueue->nr_pages; i++) + __free_page(wqueue->notes[i]); ++ bitmap_free(wqueue->notes_bitmap); + + wfilter = rcu_access_pointer(wqueue->filter); + if (wfilter) +@@ -566,7 +569,7 @@ void watch_queue_clear(struct watch_queue *wqueue) + rcu_read_lock(); + spin_lock_bh(&wqueue->lock); + +- /* Prevent new additions and prevent notifications from happening */ ++ /* Prevent new notifications from being stored. */ + wqueue->defunct = true; + + while (!hlist_empty(&wqueue->watches)) { +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c +index 23bd26057a828..9e0eef7fe9add 100644 +--- a/net/ax25/af_ax25.c ++++ b/net/ax25/af_ax25.c +@@ -87,6 +87,13 @@ again: + ax25_for_each(s, &ax25_list) { + if (s->ax25_dev == ax25_dev) { + sk = s->sk; ++ if (!sk) { ++ spin_unlock_bh(&ax25_list_lock); ++ s->ax25_dev = NULL; ++ ax25_disconnect(s, ENETUNREACH); ++ spin_lock_bh(&ax25_list_lock); ++ goto again; ++ } + sock_hold(sk); + spin_unlock_bh(&ax25_list_lock); + lock_sock(sk); +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index 99303897b7bb7..989b3f7ee85f4 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev, + if (!rtnl_trylock()) + return restart_syscall(); + +- if (netif_running(netdev)) { ++ if (netif_running(netdev) && netif_device_present(netdev)) { + struct ethtool_link_ksettings cmd; + + if (!__ethtool_get_link_ksettings(netdev, &cmd)) +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c +index 5aa7344dbec7f..3450c9ba2728c 100644 +--- a/net/ipv4/esp4_offload.c ++++ b/net/ipv4/esp4_offload.c +@@ -160,6 +160,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + } + ++ if (proto == IPPROTO_IPV6) ++ skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; ++ + __skb_pull(skb, skb_transport_offset(skb)); + ops = rcu_dereference(inet_offloads[proto]); + if (likely(ops && ops->callbacks.gso_segment)) +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 072c348237536..7c5bf39dca5d1 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -4979,6 +4979,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + goto error; + ++ spin_lock_bh(&ifa->lock); + if (!((ifa->flags&IFA_F_PERMANENT) && + (ifa->prefered_lft == INFINITY_LIFE_TIME))) { + preferred = ifa->prefered_lft; +@@ -5000,6 +5001,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, + preferred = INFINITY_LIFE_TIME; + valid = INFINITY_LIFE_TIME; + } ++ spin_unlock_bh(&ifa->lock); + + if (!ipv6_addr_any(&ifa->peer_addr)) { + if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 || +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c +index 4af56affaafd4..1c3f02d05d2bf 100644 +--- a/net/ipv6/esp6_offload.c ++++ b/net/ipv6/esp6_offload.c +@@ -198,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x, + ipv6_skip_exthdr(skb, 0, &proto, &frag); + } + ++ if (proto == IPPROTO_IPIP) ++ skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; ++ + __skb_pull(skb, skb_transport_offset(skb)); + ops = rcu_dereference(inet6_offloads[proto]); + if (likely(ops && ops->callbacks.gso_segment)) +diff --git a/net/sctp/diag.c b/net/sctp/diag.c +index babadd6720a2b..68ff82ff49a3d 100644 +--- a/net/sctp/diag.c ++++ b/net/sctp/diag.c +@@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, + r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; + r->idiag_retrans = asoc->rtx_data_chunks; + r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); +- } else { +- r->idiag_timer = 0; +- r->idiag_retrans = 0; +- r->idiag_expires = 0; + } + } + +@@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, + r = nlmsg_data(nlh); + BUG_ON(!sk_fullsock(sk)); + ++ r->idiag_timer = 0; ++ r->idiag_retrans = 0; ++ r->idiag_expires = 0; + if (asoc) { + inet_diag_msg_sctpasoc_fill(r, sk, asoc); + } else { + inet_diag_msg_common_fill(r, sk); + r->idiag_state = sk->sk_state; +- r->idiag_timer = 0; +- r->idiag_retrans = 0; + } + + if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c +index 12e535b43d887..6911f1cab2063 100644 +--- a/net/tipc/bearer.c ++++ b/net/tipc/bearer.c +@@ -342,16 +342,18 @@ static int tipc_enable_bearer(struct net *net, const char *name, + goto rejected; + } + +- test_and_set_bit_lock(0, &b->up); +- rcu_assign_pointer(tn->bearer_list[bearer_id], b); +- if (skb) +- tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); +- ++ /* Create monitoring data before accepting activate messages */ + if (tipc_mon_create(net, bearer_id)) { + bearer_disable(net, b); ++ kfree_skb(skb); + return -ENOMEM; + } + ++ test_and_set_bit_lock(0, &b->up); ++ rcu_assign_pointer(tn->bearer_list[bearer_id], b); ++ if (skb) ++ tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); ++ + pr_info("Enabled bearer <%s>, priority %u\n", name, prio); + + return res; +diff --git a/net/tipc/link.c b/net/tipc/link.c +index fb835a3822f49..7a353ff628448 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -2245,6 +2245,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, + break; + + case STATE_MSG: ++ /* Validate Gap ACK blocks, drop if invalid */ ++ glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); ++ if (glen > dlen) ++ break; ++ + l->rcv_nxt_state = msg_seqno(hdr) + 1; + + /* Update own tolerance if peer indicates a non-zero value */ +@@ -2270,10 +2275,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, + break; + } + +- /* Receive Gap ACK blocks from peer if any */ +- glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); +- if(glen > dlen) +- break; + tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, + &l->mon_state, l->bearer_id); + +diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c +new file mode 100644 +index 0000000000000..f74b82305da8c +--- /dev/null ++++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c +@@ -0,0 +1,32 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#include <test_progs.h> ++#include "timer_crash.skel.h" ++ ++enum { ++ MODE_ARRAY, ++ MODE_HASH, ++}; ++ ++static void test_timer_crash_mode(int mode) ++{ ++ struct timer_crash *skel; ++ ++ skel = timer_crash__open_and_load(); ++ if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load")) ++ return; ++ skel->bss->pid = getpid(); ++ skel->bss->crash_map = mode; ++ if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach")) ++ goto end; ++ usleep(1); ++end: ++ timer_crash__destroy(skel); ++} ++ ++void test_timer_crash(void) ++{ ++ if (test__start_subtest("array")) ++ test_timer_crash_mode(MODE_ARRAY); ++ if (test__start_subtest("hash")) ++ test_timer_crash_mode(MODE_HASH); ++} +diff --git a/tools/testing/selftests/bpf/progs/timer_crash.c b/tools/testing/selftests/bpf/progs/timer_crash.c +new file mode 100644 +index 0000000000000..f8f7944e70dae +--- /dev/null ++++ b/tools/testing/selftests/bpf/progs/timer_crash.c +@@ -0,0 +1,54 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include <vmlinux.h> ++#include <bpf/bpf_tracing.h> ++#include <bpf/bpf_helpers.h> ++ ++struct map_elem { ++ struct bpf_timer timer; ++ struct bpf_spin_lock lock; ++}; ++ ++struct { ++ __uint(type, BPF_MAP_TYPE_ARRAY); ++ __uint(max_entries, 1); ++ __type(key, int); ++ __type(value, struct map_elem); ++} amap SEC(".maps"); ++ ++struct { ++ __uint(type, BPF_MAP_TYPE_HASH); ++ __uint(max_entries, 1); ++ __type(key, int); ++ __type(value, struct map_elem); ++} hmap SEC(".maps"); ++ ++int pid = 0; ++int crash_map = 0; /* 0 for amap, 1 for hmap */ ++ ++SEC("fentry/do_nanosleep") ++int sys_enter(void *ctx) ++{ ++ struct map_elem *e, value = {}; ++ void *map = crash_map ? (void *)&hmap : (void *)&amap; ++ ++ if (bpf_get_current_task_btf()->tgid != pid) ++ return 0; ++ ++ *(void **)&value = (void *)0xdeadcaf3; ++ ++ bpf_map_update_elem(map, &(int){0}, &value, 0); ++ /* For array map, doing bpf_map_update_elem will do a ++ * check_and_free_timer_in_array, which will trigger the crash if timer ++ * pointer was overwritten, for hmap we need to use bpf_timer_cancel. ++ */ ++ if (crash_map == 1) { ++ e = bpf_map_lookup_elem(map, &(int){0}); ++ if (!e) ++ return 0; ++ bpf_timer_cancel(&e->timer); ++ } ++ return 0; ++} ++ ++char _license[] SEC("license") = "GPL"; +diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c +index 334a7eea20042..fba322d1c67a1 100644 +--- a/tools/testing/selftests/memfd/memfd_test.c ++++ b/tools/testing/selftests/memfd/memfd_test.c +@@ -455,6 +455,7 @@ static void mfd_fail_write(int fd) + printf("mmap()+mprotect() didn't fail as expected\n"); + abort(); + } ++ munmap(p, mfd_def_size); + } + + /* verify PUNCH_HOLE fails */ +diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh +index 3367fb5f2feff..3253fdc780d62 100755 +--- a/tools/testing/selftests/net/pmtu.sh ++++ b/tools/testing/selftests/net/pmtu.sh +@@ -799,7 +799,6 @@ setup_ovs_bridge() { + setup() { + [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip + +- cleanup + for arg do + eval setup_${arg} || { echo " ${arg} not supported"; return 1; } + done +@@ -810,7 +809,7 @@ trace() { + + for arg do + [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue +- ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & ++ ${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & + tcpdump_pids="${tcpdump_pids} $!" + ns_cmd= + done +@@ -1636,6 +1635,10 @@ run_test() { + + unset IFS + ++ # Since cleanup() relies on variables modified by this subshell, it ++ # has to run in this context. ++ trap cleanup EXIT ++ + if [ "$VERBOSE" = "1" ]; then + printf "\n##########################################################################\n\n" + fi +diff --git a/tools/testing/selftests/vm/map_fixed_noreplace.c b/tools/testing/selftests/vm/map_fixed_noreplace.c +index d91bde5112686..eed44322d1a63 100644 +--- a/tools/testing/selftests/vm/map_fixed_noreplace.c ++++ b/tools/testing/selftests/vm/map_fixed_noreplace.c +@@ -17,9 +17,6 @@ + #define MAP_FIXED_NOREPLACE 0x100000 + #endif + +-#define BASE_ADDRESS (256ul * 1024 * 1024) +- +- + static void dump_maps(void) + { + char cmd[32]; +@@ -28,18 +25,46 @@ static void dump_maps(void) + system(cmd); + } + ++static unsigned long find_base_addr(unsigned long size) ++{ ++ void *addr; ++ unsigned long flags; ++ ++ flags = MAP_PRIVATE | MAP_ANONYMOUS; ++ addr = mmap(NULL, size, PROT_NONE, flags, -1, 0); ++ if (addr == MAP_FAILED) { ++ printf("Error: couldn't map the space we need for the test\n"); ++ return 0; ++ } ++ ++ if (munmap(addr, size) != 0) { ++ printf("Error: couldn't map the space we need for the test\n"); ++ return 0; ++ } ++ return (unsigned long)addr; ++} ++ + int main(void) + { ++ unsigned long base_addr; + unsigned long flags, addr, size, page_size; + char *p; + + page_size = sysconf(_SC_PAGE_SIZE); + ++ //let's find a base addr that is free before we start the tests ++ size = 5 * page_size; ++ base_addr = find_base_addr(size); ++ if (!base_addr) { ++ printf("Error: couldn't map the space we need for the test\n"); ++ return 1; ++ } ++ + flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE; + + // Check we can map all the areas we need below + errno = 0; +- addr = BASE_ADDRESS; ++ addr = base_addr; + size = 5 * page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + +@@ -60,7 +85,7 @@ int main(void) + printf("unmap() successful\n"); + + errno = 0; +- addr = BASE_ADDRESS + page_size; ++ addr = base_addr + page_size; + size = 3 * page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -80,7 +105,7 @@ int main(void) + * +4 | free | new + */ + errno = 0; +- addr = BASE_ADDRESS; ++ addr = base_addr; + size = 5 * page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -101,7 +126,7 @@ int main(void) + * +4 | free | + */ + errno = 0; +- addr = BASE_ADDRESS + (2 * page_size); ++ addr = base_addr + (2 * page_size); + size = page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -121,7 +146,7 @@ int main(void) + * +4 | free | new + */ + errno = 0; +- addr = BASE_ADDRESS + (3 * page_size); ++ addr = base_addr + (3 * page_size); + size = 2 * page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -141,7 +166,7 @@ int main(void) + * +4 | free | + */ + errno = 0; +- addr = BASE_ADDRESS; ++ addr = base_addr; + size = 2 * page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -161,7 +186,7 @@ int main(void) + * +4 | free | + */ + errno = 0; +- addr = BASE_ADDRESS; ++ addr = base_addr; + size = page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -181,7 +206,7 @@ int main(void) + * +4 | free | new + */ + errno = 0; +- addr = BASE_ADDRESS + (4 * page_size); ++ addr = base_addr + (4 * page_size); + size = page_size; + p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); + printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); +@@ -192,7 +217,7 @@ int main(void) + return 1; + } + +- addr = BASE_ADDRESS; ++ addr = base_addr; + size = 5 * page_size; + if (munmap((void *)addr, size) != 0) { + dump_maps();
