commit:     09e0ccd61a70727124b10a20414fe21fb527ac53
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan  9 17:53:23 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan  9 17:53:23 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=09e0ccd6

proj/linux-patches: Linux patch 4.14.92

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    6 +-
 1091_linux-4.14.92.patch | 3420 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3425 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index ca6677a..45efed8 100644
--- a/0000_README
+++ b/0000_README
@@ -403,10 +403,14 @@ Patch:  1089_4.14.90.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.90
 
-Patch:  1090.14.91.patch
+Patch:  1090_4.14.91.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.91
 
+Patch:  1091_4.14.92.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.92
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1091_linux-4.14.92.patch b/1091_linux-4.14.92.patch
new file mode 100644
index 0000000..46658a1
--- /dev/null
+++ b/1091_linux-4.14.92.patch
@@ -0,0 +1,3420 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
+index 5f3d58142600..7d8b17ce8804 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1965,6 +1965,9 @@
+                       off
+                               Disables hypervisor mitigations and doesn't
+                               emit any warnings.
++                              It also drops the swap size and available
++                              RAM limit restriction on both hypervisor and
++                              bare metal.
+ 
+                       Default is 'flush'.
+ 
+diff --git a/Documentation/admin-guide/l1tf.rst 
b/Documentation/admin-guide/l1tf.rst
+index bae52b845de0..9f5924f81f89 100644
+--- a/Documentation/admin-guide/l1tf.rst
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this 
option are:
+ 
+   off         Disables hypervisor mitigations and doesn't emit any
+               warnings.
++              It also drops the swap size and available RAM limit restrictions
++              on both hypervisor and bare metal.
++
+   ============  =============================================================
+ 
+ The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
+@@ -576,7 +579,8 @@ Default mitigations
+   The kernel default mitigations for vulnerable processors are:
+ 
+   - PTE inversion to protect against malicious user space. This is done
+-    unconditionally and cannot be controlled.
++    unconditionally and cannot be controlled. The swap storage is limited
++    to ~16TB.
+ 
+   - L1D conditional flushing on VMENTER when EPT is enabled for
+     a guest.
+diff --git a/Makefile b/Makefile
+index a6fb3b158a19..be4d1f25cb29 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 91
++SUBLEVEL = 92
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm64/include/asm/kvm_arm.h 
b/arch/arm64/include/asm/kvm_arm.h
+index 555d463c0eaa..73cc4309fe01 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -99,7 +99,7 @@
+                        TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | 
TCR_EL2_T0SZ_MASK)
+ 
+ /* VTCR_EL2 Registers bits */
+-#define VTCR_EL2_RES1         (1 << 31)
++#define VTCR_EL2_RES1         (1U << 31)
+ #define VTCR_EL2_HD           (1 << 22)
+ #define VTCR_EL2_HA           (1 << 21)
+ #define VTCR_EL2_PS_MASK      TCR_EL2_PS_MASK
+diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c 
b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+index 37fe58c19a90..542c3ede9722 100644
+--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
++++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+@@ -13,6 +13,7 @@
+ #include <stdint.h>
+ #include <stdio.h>
+ #include <stdlib.h>
++#include "../../../../include/linux/sizes.h"
+ 
+ int main(int argc, char *argv[])
+ {
+@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
+       vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
+ 
+       /*
+-       * Align with 16 bytes: "greater than that used for any standard data
+-       * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
++       * Align with 64KB: KEXEC needs load sections to be aligned to 
PAGE_SIZE,
++       * which may be as large as 64KB depending on the kernel configuration.
+        */
+ 
+-      vmlinuz_load_addr += (16 - vmlinux_size % 16);
++      vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
+ 
+       printf("0x%llx\n", vmlinuz_load_addr);
+ 
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c 
b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+index c683c369bca5..c376f17e142c 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t 
__cvmx_get_mode_cn7xxx(int interface)
+       case 3:
+               return CVMX_HELPER_INTERFACE_MODE_LOOP;
+       case 4:
+-              return CVMX_HELPER_INTERFACE_MODE_RGMII;
++              /* TODO: Implement support for AGL (RGMII). */
++              return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+       default:
+               return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+       }
+diff --git a/arch/mips/include/asm/cpu-info.h 
b/arch/mips/include/asm/cpu-info.h
+index a41059d47d31..ed7ffe4e63a3 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -50,7 +50,7 @@ struct guest_info {
+ #define MIPS_CACHE_PINDEX     0x00000020      /* Physically indexed cache */
+ 
+ struct cpuinfo_mips {
+-      unsigned long           asid_cache;
++      u64                     asid_cache;
+ #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+       unsigned long           asid_mask;
+ #endif
+diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h 
b/arch/mips/include/asm/mach-loongson64/mmzone.h
+index c9f7e231e66b..59c8b11c090e 100644
+--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
++++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
+@@ -21,6 +21,7 @@
+ #define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
+ 
+ #define pa_to_nid(addr)  (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
++#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
+ 
+ #define LEVELS_PER_SLICE 128
+ 
+diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
+index 0740be7d5d4a..24d6b42345fb 100644
+--- a/arch/mips/include/asm/mmu.h
++++ b/arch/mips/include/asm/mmu.h
+@@ -7,7 +7,7 @@
+ #include <linux/wait.h>
+ 
+ typedef struct {
+-      unsigned long asid[NR_CPUS];
++      u64 asid[NR_CPUS];
+       void *vdso;
+       atomic_t fp_mode_switching;
+ 
+diff --git a/arch/mips/include/asm/mmu_context.h 
b/arch/mips/include/asm/mmu_context.h
+index da2004cef2d5..47cc01d948df 100644
+--- a/arch/mips/include/asm/mmu_context.h
++++ b/arch/mips/include/asm/mmu_context.h
+@@ -75,14 +75,14 @@ extern unsigned long pgd_current[];
+  *  All unused by hardware upper bits will be considered
+  *  as a software asid extension.
+  */
+-static unsigned long asid_version_mask(unsigned int cpu)
++static inline u64 asid_version_mask(unsigned int cpu)
+ {
+       unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+ 
+-      return ~(asid_mask | (asid_mask - 1));
++      return ~(u64)(asid_mask | (asid_mask - 1));
+ }
+ 
+-static unsigned long asid_first_version(unsigned int cpu)
++static inline u64 asid_first_version(unsigned int cpu)
+ {
+       return ~asid_version_mask(cpu) + 1;
+ }
+@@ -101,14 +101,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, 
struct task_struct *tsk)
+ static inline void
+ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+ {
+-      unsigned long asid = asid_cache(cpu);
++      u64 asid = asid_cache(cpu);
+ 
+       if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
+               if (cpu_has_vtag_icache)
+                       flush_icache_all();
+               local_flush_tlb_all();  /* start new asid cycle */
+-              if (!asid)              /* fix version if needed */
+-                      asid = asid_first_version(cpu);
+       }
+ 
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
+index f085fba41da5..b826b8473e95 100644
+--- a/arch/mips/include/asm/mmzone.h
++++ b/arch/mips/include/asm/mmzone.h
+@@ -7,7 +7,18 @@
+ #define _ASM_MMZONE_H_
+ 
+ #include <asm/page.h>
+-#include <mmzone.h>
++
++#ifdef CONFIG_NEED_MULTIPLE_NODES
++# include <mmzone.h>
++#endif
++
++#ifndef pa_to_nid
++#define pa_to_nid(addr) 0
++#endif
++
++#ifndef nid_to_addrbase
++#define nid_to_addrbase(nid) 0
++#endif
+ 
+ #ifdef CONFIG_DISCONTIGMEM
+ 
+diff --git a/arch/mips/include/asm/pgtable-64.h 
b/arch/mips/include/asm/pgtable-64.h
+index 67fe6dc5211c..a2252c2a9ded 100644
+--- a/arch/mips/include/asm/pgtable-64.h
++++ b/arch/mips/include/asm/pgtable-64.h
+@@ -271,6 +271,11 @@ static inline int pmd_bad(pmd_t pmd)
+ 
+ static inline int pmd_present(pmd_t pmd)
+ {
++#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
++      if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
++              return pmd_val(pmd) & _PAGE_PRESENT;
++#endif
++
+       return pmd_val(pmd) != (unsigned long) invalid_pte_table;
+ }
+ 
+diff --git a/arch/mips/include/asm/r4kcache.h 
b/arch/mips/include/asm/r4kcache.h
+index 7f12d7e27c94..e5190126080e 100644
+--- a/arch/mips/include/asm/r4kcache.h
++++ b/arch/mips/include/asm/r4kcache.h
+@@ -20,6 +20,7 @@
+ #include <asm/cpu-features.h>
+ #include <asm/cpu-type.h>
+ #include <asm/mipsmtregs.h>
++#include <asm/mmzone.h>
+ #include <linux/uaccess.h> /* for uaccess_kernel() */
+ 
+ extern void (*r4k_blast_dcache)(void);
+@@ -747,4 +748,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, 
Hit_Writeback_Inv_SD, , )
+ __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
+ __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
+ 
++/* Currently, this is very specific to Loongson-3 */
++#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)    \
++static inline void blast_##pfx##cache##lsize##_node(long node)                
\
++{                                                                     \
++      unsigned long start = CAC_BASE | nid_to_addrbase(node);         \
++      unsigned long end = start + current_cpu_data.desc.waysize;      \
++      unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
++      unsigned long ws_end = current_cpu_data.desc.ways <<            \
++                             current_cpu_data.desc.waybit;            \
++      unsigned long ws, addr;                                         \
++                                                                      \
++      for (ws = 0; ws < ws_end; ws += ws_inc)                         \
++              for (addr = start; addr < end; addr += lsize * 32)      \
++                      cache##lsize##_unroll32(addr|ws, indexop);      \
++}
++
++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, 
Hit_Writeback_Inv_SD, 16)
++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, 
Hit_Writeback_Inv_SD, 32)
++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, 
Hit_Writeback_Inv_SD, 64)
++__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, 
Hit_Writeback_Inv_SD, 128)
++
+ #endif /* _ASM_R4KCACHE_H */
+diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
+index 48a9c6b90e07..9df3ebdc7b0f 100644
+--- a/arch/mips/kernel/vdso.c
++++ b/arch/mips/kernel/vdso.c
+@@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
+ 
+       /* Map delay slot emulation page */
+       base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+-                         VM_READ|VM_WRITE|VM_EXEC|
+-                         VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
++                         VM_READ | VM_EXEC |
++                         VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+                          0, NULL);
+       if (IS_ERR_VALUE(base)) {
+               ret = base;
+diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
+index 5450f4d1c920..e2d46cb93ca9 100644
+--- a/arch/mips/math-emu/dsemul.c
++++ b/arch/mips/math-emu/dsemul.c
+@@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ {
+       int isa16 = get_isa16_mode(regs->cp0_epc);
+       mips_instruction break_math;
+-      struct emuframe __user *fr;
+-      int err, fr_idx;
++      unsigned long fr_uaddr;
++      struct emuframe fr;
++      int fr_idx, ret;
+ 
+       /* NOP is easy */
+       if (ir == 0)
+@@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction 
ir,
+               fr_idx = alloc_emuframe();
+       if (fr_idx == BD_EMUFRAME_NONE)
+               return SIGBUS;
+-      fr = &dsemul_page()[fr_idx];
+ 
+       /* Retrieve the appropriately encoded break instruction */
+       break_math = BREAK_MATH(isa16);
+ 
+       /* Write the instructions to the frame */
+       if (isa16) {
+-              err = __put_user(ir >> 16,
+-                               (u16 __user *)(&fr->emul));
+-              err |= __put_user(ir & 0xffff,
+-                                (u16 __user *)((long)(&fr->emul) + 2));
+-              err |= __put_user(break_math >> 16,
+-                                (u16 __user *)(&fr->badinst));
+-              err |= __put_user(break_math & 0xffff,
+-                                (u16 __user *)((long)(&fr->badinst) + 2));
++              union mips_instruction _emul = {
++                      .halfword = { ir >> 16, ir }
++              };
++              union mips_instruction _badinst = {
++                      .halfword = { break_math >> 16, break_math }
++              };
++
++              fr.emul = _emul.word;
++              fr.badinst = _badinst.word;
+       } else {
+-              err = __put_user(ir, &fr->emul);
+-              err |= __put_user(break_math, &fr->badinst);
++              fr.emul = ir;
++              fr.badinst = break_math;
+       }
+ 
+-      if (unlikely(err)) {
++      /* Write the frame to user memory */
++      fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
++      ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
++                              FOLL_FORCE | FOLL_WRITE);
++      if (unlikely(ret != sizeof(fr))) {
+               MIPS_FPU_EMU_INC_STATS(errors);
+               free_emuframe(fr_idx, current->mm);
+               return SIGBUS;
+@@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+       atomic_set(&current->thread.bd_emu_frame, fr_idx);
+ 
+       /* Change user register context to execute the frame */
+-      regs->cp0_epc = (unsigned long)&fr->emul | isa16;
+-
+-      /* Ensure the icache observes our newly written frame */
+-      flush_cache_sigtramp((unsigned long)&fr->emul);
++      regs->cp0_epc = fr_uaddr | isa16;
+ 
+       return 0;
+ }
+diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
+index 3466fcdae0ca..01848cdf2074 100644
+--- a/arch/mips/mm/c-r3k.c
++++ b/arch/mips/mm/c-r3k.c
+@@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct 
*vma,
+       pmd_t *pmdp;
+       pte_t *ptep;
+ 
+-      pr_debug("cpage[%08lx,%08lx]\n",
++      pr_debug("cpage[%08llx,%08lx]\n",
+                cpu_context(smp_processor_id(), mm), addr);
+ 
+       /* No ASID => no such page in the cache.  */
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index a5893b2cdc0e..bacd67f5d71d 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
+               r4k_blast_scache = blast_scache128;
+ }
+ 
++static void (*r4k_blast_scache_node)(long node);
++
++static void r4k_blast_scache_node_setup(void)
++{
++      unsigned long sc_lsize = cpu_scache_line_size();
++
++      if (current_cpu_type() != CPU_LOONGSON3)
++              r4k_blast_scache_node = (void *)cache_noop;
++      else if (sc_lsize == 16)
++              r4k_blast_scache_node = blast_scache16_node;
++      else if (sc_lsize == 32)
++              r4k_blast_scache_node = blast_scache32_node;
++      else if (sc_lsize == 64)
++              r4k_blast_scache_node = blast_scache64_node;
++      else if (sc_lsize == 128)
++              r4k_blast_scache_node = blast_scache128_node;
++}
++
+ static inline void local_r4k___flush_cache_all(void * args)
+ {
+       switch (current_cpu_type()) {
+       case CPU_LOONGSON2:
+-      case CPU_LOONGSON3:
+       case CPU_R4000SC:
+       case CPU_R4000MC:
+       case CPU_R4400SC:
+@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * 
args)
+               r4k_blast_scache();
+               break;
+ 
++      case CPU_LOONGSON3:
++              /* Use get_ebase_cpunum() for both NUMA=y/n */
++              r4k_blast_scache_node(get_ebase_cpunum() >> 2);
++              break;
++
+       case CPU_BMIPS5000:
+               r4k_blast_scache();
+               __sync();
+@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, 
unsigned long size)
+ 
+       preempt_disable();
+       if (cpu_has_inclusive_pcaches) {
+-              if (size >= scache_size)
+-                      r4k_blast_scache();
+-              else
++              if (size >= scache_size) {
++                      if (current_cpu_type() != CPU_LOONGSON3)
++                              r4k_blast_scache();
++                      else
++                              r4k_blast_scache_node(pa_to_nid(addr));
++              } else {
+                       blast_scache_range(addr, addr + size);
++              }
+               preempt_enable();
+               __sync();
+               return;
+@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, 
unsigned long size)
+ 
+       preempt_disable();
+       if (cpu_has_inclusive_pcaches) {
+-              if (size >= scache_size)
+-                      r4k_blast_scache();
+-              else {
++              if (size >= scache_size) {
++                      if (current_cpu_type() != CPU_LOONGSON3)
++                              r4k_blast_scache();
++                      else
++                              r4k_blast_scache_node(pa_to_nid(addr));
++              } else {
+                       /*
+                        * There is no clearly documented alignment requirement
+                        * for the cache instruction on MIPS processors and
+@@ -1910,6 +1939,7 @@ void r4k_cache_init(void)
+       r4k_blast_scache_page_setup();
+       r4k_blast_scache_page_indexed_setup();
+       r4k_blast_scache_setup();
++      r4k_blast_scache_node_setup();
+ #ifdef CONFIG_EVA
+       r4k_blast_dcache_user_page_setup();
+       r4k_blast_icache_user_page_setup();
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 93cd0f1ca12b..d8dfd645bf02 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -437,7 +437,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
+       struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+       int rc;
+ 
+-      rrb = clp_alloc_block(GFP_KERNEL);
++      rrb = clp_alloc_block(GFP_ATOMIC);
+       if (!rrb)
+               return -ENOMEM;
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 367cdd263a5c..523308d030d2 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1355,7 +1355,7 @@ asmlinkage void kvm_spurious_fault(void);
+       "cmpb $0, kvm_rebooting \n\t"         \
+       "jne 668b \n\t"                       \
+       __ASM_SIZE(push) " $666b \n\t"        \
+-      "call kvm_spurious_fault \n\t"        \
++      "jmp kvm_spurious_fault \n\t"         \
+       ".popsection \n\t" \
+       _ASM_EXTABLE(666b, 667b)
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index f7a6d6203e13..98b24d668b08 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -999,7 +999,8 @@ static void __init l1tf_select_mitigation(void)
+ #endif
+ 
+       half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+-      if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
++      if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
++                      e820__mapped_any(half_pa, ULLONG_MAX - half_pa, 
E820_TYPE_RAM)) {
+               pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation 
not effective.\n");
+               pr_info("You may make it effective by booting the kernel with 
mem=%llu parameter.\n",
+                               half_pa);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 8eec37d37c3d..16bb8e35605e 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7275,13 +7275,16 @@ static __init int hardware_setup(void)
+ 
+       kvm_mce_cap_supported |= MCG_LMCE_P;
+ 
+-      return alloc_kvm_area();
++      r = alloc_kvm_area();
++      if (r)
++              goto out;
++      return 0;
+ 
+ out:
+       for (i = 0; i < VMX_BITMAP_NR; i++)
+               free_page((unsigned long)vmx_bitmap[i]);
+ 
+-    return r;
++      return r;
+ }
+ 
+ static __exit void hardware_unsetup(void)
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 94b8d90830d1..32bb38f6fc18 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -890,7 +890,7 @@ unsigned long max_swapfile_size(void)
+ 
+       pages = generic_max_swapfile_size();
+ 
+-      if (boot_cpu_has_bug(X86_BUG_L1TF)) {
++      if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != 
L1TF_MITIGATION_OFF) {
+               /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+               unsigned long long l1tf_limit = l1tf_pfn_limit();
+               /*
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 642357aff216..624edfbff02d 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -574,7 +574,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
+                                                          paddr_end,
+                                                          page_size_mask,
+                                                          prot);
+-                              __flush_tlb_all();
+                               continue;
+                       }
+                       /*
+@@ -617,7 +616,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, 
unsigned long paddr_end,
+               pud_populate(&init_mm, pud, pmd);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+-      __flush_tlb_all();
+ 
+       update_page_count(PG_LEVEL_1G, pages);
+ 
+@@ -658,7 +656,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, 
unsigned long paddr_end,
+                       paddr_last = phys_pud_init(pud, paddr,
+                                       paddr_end,
+                                       page_size_mask);
+-                      __flush_tlb_all();
+                       continue;
+               }
+ 
+@@ -670,7 +667,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, 
unsigned long paddr_end,
+               p4d_populate(&init_mm, p4d, pud);
+               spin_unlock(&init_mm.page_table_lock);
+       }
+-      __flush_tlb_all();
+ 
+       return paddr_last;
+ }
+@@ -723,8 +719,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
+       if (pgd_changed)
+               sync_global_pgds(vaddr_start, vaddr_end - 1);
+ 
+-      __flush_tlb_all();
+-
+       return paddr_last;
+ }
+ 
+diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
+index e5473525e7b2..ae9b2f568879 100644
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -374,14 +374,16 @@ void platform_msi_domain_free(struct irq_domain *domain, 
unsigned int virq,
+                             unsigned int nvec)
+ {
+       struct platform_msi_priv_data *data = domain->host_data;
+-      struct msi_desc *desc;
+-      for_each_msi_entry(desc, data->dev) {
++      struct msi_desc *desc, *tmp;
++      for_each_msi_entry_safe(desc, tmp, data->dev) {
+               if (WARN_ON(!desc->irq || desc->nvec_used != 1))
+                       return;
+               if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
+                       continue;
+ 
+               irq_domain_free_irqs_common(domain, desc->irq, 1);
++              list_del(&desc->list);
++              free_msi_entry(desc);
+       }
+ }
+ 
+diff --git a/drivers/char/tpm/tpm-interface.c 
b/drivers/char/tpm/tpm-interface.c
+index 6e93df272c20..038b91bcbd31 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -479,13 +479,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
+ 
+       if (need_locality) {
+               rc = tpm_request_locality(chip, flags);
+-              if (rc < 0)
+-                      goto out_no_locality;
++              if (rc < 0) {
++                      need_locality = false;
++                      goto out_locality;
++              }
+       }
+ 
+       rc = tpm_cmd_ready(chip, flags);
+       if (rc)
+-              goto out;
++              goto out_locality;
+ 
+       rc = tpm2_prepare_space(chip, space, ordinal, buf);
+       if (rc)
+@@ -549,14 +551,13 @@ out_recv:
+               dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
+ 
+ out:
+-      rc = tpm_go_idle(chip, flags);
+-      if (rc)
+-              goto out;
++      /* may fail but do not override previous error value in rc */
++      tpm_go_idle(chip, flags);
+ 
++out_locality:
+       if (need_locality)
+               tpm_relinquish_locality(chip, flags);
+ 
+-out_no_locality:
+       if (chip->ops->clk_enable != NULL)
+               chip->ops->clk_enable(chip, false);
+ 
+diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c 
b/drivers/char/tpm/tpm_i2c_nuvoton.c
+index caa86b19c76d..f74f451baf6a 100644
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 
*buf, size_t len)
+       struct device *dev = chip->dev.parent;
+       struct i2c_client *client = to_i2c_client(dev);
+       u32 ordinal;
++      unsigned long duration;
+       size_t count = 0;
+       int burst_count, bytes2write, retries, rc = -EIO;
+ 
+@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 
*buf, size_t len)
+               return rc;
+       }
+       ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+-      rc = i2c_nuvoton_wait_for_data_avail(chip,
+-                                           tpm_calc_ordinal_duration(chip,
+-                                                                     ordinal),
+-                                           &priv->read_queue);
++      if (chip->flags & TPM_CHIP_FLAG_TPM2)
++              duration = tpm2_calc_ordinal_duration(chip, ordinal);
++      else
++              duration = tpm_calc_ordinal_duration(chip, ordinal);
++
++      rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
+       if (rc) {
+               dev_err(dev, "%s() timeout command duration\n", __func__);
+               i2c_nuvoton_ready(chip);
+diff --git a/drivers/clk/rockchip/clk-rk3188.c 
b/drivers/clk/rockchip/clk-rk3188.c
+index 00ad0e5f8d66..2b0d772b4f43 100644
+--- a/drivers/clk/rockchip/clk-rk3188.c
++++ b/drivers/clk/rockchip/clk-rk3188.c
+@@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] 
__initdata = {
+       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 13, GFLAGS),
+-      COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
++      COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
+                       RK2928_CLKSEL_CON(9), 0,
+                       RK2928_CLKGATE_CON(0), 14, GFLAGS,
+                       &common_spdif_fracmux),
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c 
b/drivers/crypto/cavium/nitrox/nitrox_algs.c
+index 2ae6124e5da6..5d54ebc20cb3 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
+@@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
+ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
+ {
+       struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+-      void *fctx;
++      struct crypto_ctx_hdr *chdr;
+ 
+       /* get the first device */
+       nctx->ndev = nitrox_get_first_device();
+@@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher 
*tfm)
+               return -ENODEV;
+ 
+       /* allocate nitrox crypto context */
+-      fctx = crypto_alloc_context(nctx->ndev);
+-      if (!fctx) {
++      chdr = crypto_alloc_context(nctx->ndev);
++      if (!chdr) {
+               nitrox_put_device(nctx->ndev);
+               return -ENOMEM;
+       }
+-      nctx->u.ctx_handle = (uintptr_t)fctx;
++      nctx->chdr = chdr;
++      nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
++                                       sizeof(struct ctx_hdr));
+       crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
+                                   sizeof(struct nitrox_kcrypt_request));
+       return 0;
+@@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher 
*tfm)
+ 
+               memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
+               memset(&fctx->auth, 0, sizeof(struct auth_keys));
+-              crypto_free_context((void *)fctx);
++              crypto_free_context((void *)nctx->chdr);
+       }
+       nitrox_put_device(nctx->ndev);
+ 
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c 
b/drivers/crypto/cavium/nitrox/nitrox_lib.c
+index 9906c0086647..cea977e158fc 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
+@@ -146,12 +146,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device 
*ndev)
+ void *crypto_alloc_context(struct nitrox_device *ndev)
+ {
+       struct ctx_hdr *ctx;
++      struct crypto_ctx_hdr *chdr;
+       void *vaddr;
+       dma_addr_t dma;
+ 
++      chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
++      if (!chdr)
++              return NULL;
++
+       vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
+-      if (!vaddr)
++      if (!vaddr) {
++              kfree(chdr);
+               return NULL;
++      }
+ 
+       /* fill meta data */
+       ctx = vaddr;
+@@ -159,7 +166,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
+       ctx->dma = dma;
+       ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
+ 
+-      return ((u8 *)vaddr + sizeof(struct ctx_hdr));
++      chdr->pool = ndev->ctx_pool;
++      chdr->dma = dma;
++      chdr->vaddr = vaddr;
++
++      return chdr;
+ }
+ 
+ /**
+@@ -168,13 +179,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
+  */
+ void crypto_free_context(void *ctx)
+ {
+-      struct ctx_hdr *ctxp;
++      struct crypto_ctx_hdr *ctxp;
+ 
+       if (!ctx)
+               return;
+ 
+-      ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
+-      dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
++      ctxp = ctx;
++      dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
++      kfree(ctxp);
+ }
+ 
+ /**
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h 
b/drivers/crypto/cavium/nitrox/nitrox_req.h
+index d091b6f5f5dd..19f0a20e3bb3 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
++++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
+@@ -181,12 +181,19 @@ struct flexi_crypto_context {
+       struct auth_keys auth;
+ };
+ 
++struct crypto_ctx_hdr {
++      struct dma_pool *pool;
++      dma_addr_t dma;
++      void *vaddr;
++};
++
+ struct nitrox_crypto_ctx {
+       struct nitrox_device *ndev;
+       union {
+               u64 ctx_handle;
+               struct flexi_crypto_context *fctx;
+       } u;
++      struct crypto_ctx_hdr *chdr;
+ };
+ 
+ struct nitrox_kcrypt_request {
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c 
b/drivers/infiniband/hw/hfi1/verbs.c
+index 12cf0f7ca7bb..2e8854ba18cf 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -1123,6 +1123,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct 
hfi1_pkt_state *ps,
+ 
+                               if (slen > len)
+                                       slen = len;
++                              if (slen > ss->sge.sge_length)
++                                      slen = ss->sge.sge_length;
+                               rvt_update_sge(ss, slen, false);
+                               seg_pio_copy_mid(pbuf, addr, slen);
+                               len -= slen;
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 368871a398a5..f2bf8fa1ab04 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1251,6 +1251,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
+ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0000", 0 },
+       { "ELAN0100", 0 },
++      { "ELAN0501", 0 },
+       { "ELAN0600", 0 },
+       { "ELAN0602", 0 },
+       { "ELAN0605", 0 },
+diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
+index 46c189ad8d94..28a1c6b5095d 100644
+--- a/drivers/isdn/capi/kcapi.c
++++ b/drivers/isdn/capi/kcapi.c
+@@ -851,7 +851,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
+       u16 ret;
+ 
+       if (contr == 0) {
+-              strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
++              strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+               return CAPI_NOERROR;
+       }
+ 
+@@ -859,7 +859,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
+ 
+       ctr = get_capi_ctr_by_nr(contr);
+       if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+-              strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
++              strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+               ret = CAPI_NOERROR;
+       } else
+               ret = CAPI_REGNOTINSTALLED;
+diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c 
b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+index a1aacd6fb96f..664c4b8b1a6d 100644
+--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
++++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+@@ -1733,7 +1733,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
+               unsigned s;     \
+       \
+               for (s = 0; s < len; s++) {     \
+-                      u8 chr = font8x16[text[s] * 16 + line]; \
++                      u8 chr = font8x16[(u8)text[s] * 16 + line];     \
+       \
+                       if (hdiv == 2 && tpg->hflip) { \
+                               pos[3] = (chr & (0x01 << 6) ? fg : bg); \
+diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c 
b/drivers/media/platform/vivid/vivid-vid-cap.c
+index 01419455e545..a7a366093524 100644
+--- a/drivers/media/platform/vivid/vivid-vid-cap.c
++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
+@@ -455,6 +455,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool 
keep_controls)
+               tpg_s_rgb_range(&dev->tpg, 
v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
+               break;
+       }
++      vfree(dev->bitmap_cap);
++      dev->bitmap_cap = NULL;
+       vivid_update_quality(dev);
+       tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, 
dev->field_cap);
+       dev->crop_cap = dev->src_rect;
+diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
+index 69c638dd0484..14e8967c1dea 100644
+--- a/drivers/mtd/spi-nor/Kconfig
++++ b/drivers/mtd/spi-nor/Kconfig
+@@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
+ 
+ config SPI_ATMEL_QUADSPI
+       tristate "Atmel Quad SPI Controller"
+-      depends on ARCH_AT91 || (ARM && COMPILE_TEST)
++      depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
+       depends on OF && HAS_IOMEM
+       help
+         This enables support for the Quad SPI controller in master mode.
+diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
+index d6f8d6c8b0f1..0b2f9ddfb1c4 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -60,7 +60,8 @@
+ #define MACB_TX_ERR_FLAGS     (MACB_BIT(ISR_TUND)                     \
+                                       | MACB_BIT(ISR_RLE)             \
+                                       | MACB_BIT(TXERR))
+-#define MACB_TX_INT_FLAGS     (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
++#define MACB_TX_INT_FLAGS     (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)    \
++                                      | MACB_BIT(TXUBR))
+ 
+ /* Max length of transmit frame must be a multiple of 8 bytes */
+ #define MACB_TX_LEN_ALIGN     8
+@@ -1243,6 +1244,21 @@ static int macb_poll(struct napi_struct *napi, int 
budget)
+       return work_done;
+ }
+ 
++static void macb_tx_restart(struct macb_queue *queue)
++{
++      unsigned int head = queue->tx_head;
++      unsigned int tail = queue->tx_tail;
++      struct macb *bp = queue->bp;
++
++      if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
++              queue_writel(queue, ISR, MACB_BIT(TXUBR));
++
++      if (head == tail)
++              return;
++
++      macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
++}
++
+ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+ {
+       struct macb_queue *queue = dev_id;
+@@ -1300,6 +1316,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
+               if (status & MACB_BIT(TCOMP))
+                       macb_tx_interrupt(queue);
+ 
++              if (status & MACB_BIT(TXUBR))
++                      macb_tx_restart(queue);
++
+               /* Link change detection isn't possible with RMII, so we'll
+                * add that if/when we get our hands on a full-blown MII PHY.
+                */
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c 
b/drivers/net/ethernet/ibm/ibmveth.c
+index f210398200ec..6c05819d995e 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1172,11 +1172,15 @@ out:
+ 
+ map_failed_frags:
+       last = i+1;
+-      for (i = 0; i < last; i++)
++      for (i = 1; i < last; i++)
+               dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
+                              descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+                              DMA_TO_DEVICE);
+ 
++      dma_unmap_single(&adapter->vdev->dev,
++                       descs[0].fields.address,
++                       descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
++                       DMA_TO_DEVICE);
+ map_failed:
+       if (!firmware_has_feature(FW_FEATURE_CMO))
+               netdev_err(netdev, "tx: unable to map xmit buffer\n");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index d12e9fc0d76b..d9db3ad3d765 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1417,21 +1417,15 @@ static int mlx5e_set_pauseparam(struct net_device 
*netdev,
+ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+                             struct ethtool_ts_info *info)
+ {
+-      int ret;
+-
+-      ret = ethtool_op_get_ts_info(priv->netdev, info);
+-      if (ret)
+-              return ret;
+-
+       info->phc_index = priv->tstamp.ptp ?
+                         ptp_clock_index(priv->tstamp.ptp) : -1;
+ 
+       if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+               return 0;
+ 
+-      info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+-                               SOF_TIMESTAMPING_RX_HARDWARE |
+-                               SOF_TIMESTAMPING_RAW_HARDWARE;
++      info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++                              SOF_TIMESTAMPING_RX_HARDWARE |
++                              SOF_TIMESTAMPING_RAW_HARDWARE;
+ 
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+                        BIT(HWTSTAMP_TX_ON);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 3d3fd03fa450..8b7b52c7512e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1072,7 +1072,7 @@ mpwrq_cqe_out:
+ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+ {
+       struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+-      struct mlx5e_xdpsq *xdpsq;
++      struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
+       struct mlx5_cqe64 *cqe;
+       int work_done = 0;
+ 
+@@ -1083,10 +1083,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+               work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+ 
+       cqe = mlx5_cqwq_get_cqe(&cq->wq);
+-      if (!cqe)
++      if (!cqe) {
++              if (unlikely(work_done))
++                      goto out;
+               return 0;
+-
+-      xdpsq = &rq->xdpsq;
++      }
+ 
+       do {
+               if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+@@ -1101,6 +1102,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+               rq->handle_rx_cqe(rq, cqe);
+       } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ 
++out:
+       if (xdpsq->db.doorbell) {
+               mlx5e_xmit_xdp_doorbell(xdpsq);
+               xdpsq->db.doorbell = false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index dd05cf148845..6538b7b943f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -425,7 +425,7 @@ static void del_rule(struct fs_node *node)
+ 
+       if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+           --fte->dests_size) {
+-              modify_mask = 
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
++              modify_mask = 
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+               update_fte = true;
+       }
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c 
b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index f3315bc874ad..cced009da869 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -113,6 +113,7 @@ struct mlxsw_core {
+       struct mlxsw_thermal *thermal;
+       struct mlxsw_core_port *ports;
+       unsigned int max_ports;
++      bool fw_flash_in_progress;
+       unsigned long driver_priv[0];
+       /* driver_priv has to be always the last item */
+ };
+@@ -460,12 +461,16 @@ struct mlxsw_reg_trans {
+       struct rcu_head rcu;
+ };
+ 
+-#define MLXSW_EMAD_TIMEOUT_MS 200
++#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
++#define MLXSW_EMAD_TIMEOUT_MS                 200
+ 
+ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
+ {
+       unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+ 
++      if (trans->core->fw_flash_in_progress)
++              timeout = 
msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
++
+       queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
+ }
+ 
+@@ -1791,6 +1796,18 @@ void mlxsw_core_flush_owq(void)
+ }
+ EXPORT_SYMBOL(mlxsw_core_flush_owq);
+ 
++void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
++{
++      mlxsw_core->fw_flash_in_progress = true;
++}
++EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
++
++void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
++{
++      mlxsw_core->fw_flash_in_progress = false;
++}
++EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
++
+ static int __init mlxsw_core_module_init(void)
+ {
+       int err;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h 
b/drivers/net/ethernet/mellanox/mlxsw/core.h
+index 6e966af72fc4..3fa04da38fd0 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
+@@ -312,6 +312,9 @@ struct mlxsw_driver {
+       const struct mlxsw_config_profile *profile;
+ };
+ 
++void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
++void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
++
+ bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
+                         enum mlxsw_res_id res_id);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 18bb6798937b..84864fdcb0e8 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -333,8 +333,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp 
*mlxsw_sp,
+               },
+               .mlxsw_sp = mlxsw_sp
+       };
++      int err;
++
++      mlxsw_core_fw_flash_start(mlxsw_sp->core);
++      err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
++      mlxsw_core_fw_flash_end(mlxsw_sp->core);
+ 
+-      return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
++      return err;
+ }
+ 
+ static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 5b56a86e88ff..c433be573e0d 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -159,11 +159,8 @@ static int mdio_bus_phy_restore(struct device *dev)
+       if (ret < 0)
+               return ret;
+ 
+-      /* The PHY needs to renegotiate. */
+-      phydev->link = 0;
+-      phydev->state = PHY_UP;
+-
+-      phy_start_machine(phydev);
++      if (phydev->attached_dev && phydev->adjust_link)
++              phy_start_machine(phydev);
+ 
+       return 0;
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 11a25cef113f..969474c9d297 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1109,6 +1109,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x1435, 0xd181, 3)},    /* Wistron NeWeb D18Q1 */
+       {QMI_FIXED_INTF(0x1435, 0xd181, 4)},    /* Wistron NeWeb D18Q1 */
+       {QMI_FIXED_INTF(0x1435, 0xd181, 5)},    /* Wistron NeWeb D18Q1 */
++      {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
+       {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
+       {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
+       {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
+@@ -1221,6 +1222,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
++      {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+       {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
+       {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},    /* Telewell TW-3G HSPA+ */
+       {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G 
Systems */
+@@ -1250,12 +1252,13 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ 
Gobi 4G Module */
+       {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE 
*/
+       {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
+-      {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
++      {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E 
++ */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  
Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+       {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
++      {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ 
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
+index 40ee80c03c94..3eaefecd4448 100644
+--- a/drivers/net/wan/x25_asy.c
++++ b/drivers/net/wan/x25_asy.c
+@@ -485,8 +485,10 @@ static int x25_asy_open(struct net_device *dev)
+ 
+       /* Cleanup */
+       kfree(sl->xbuff);
++      sl->xbuff = NULL;
+ noxbuff:
+       kfree(sl->rbuff);
++      sl->rbuff = NULL;
+ norbuff:
+       return -ENOMEM;
+ }
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 6ea95b316256..4af4e5c12d53 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -904,7 +904,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue 
*queue,
+               if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
+                       unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
+ 
+-                      BUG_ON(pull_to <= skb_headlen(skb));
++                      BUG_ON(pull_to < skb_headlen(skb));
+                       __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+               }
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index 6620016869cf..407aa1ea2421 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -404,7 +404,7 @@ static int m41t80_read_alarm(struct device *dev, struct 
rtc_wkalrm *alrm)
+       alrm->time.tm_min  = bcd2bin(alarmvals[3] & 0x7f);
+       alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
+       alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
+-      alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f);
++      alrm->time.tm_mon  = bcd2bin(alarmvals[0] & 0x3f) - 1;
+ 
+       alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
+       alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index f35cc10772f6..25abf2d1732a 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -88,7 +88,7 @@ struct bcm2835_spi {
+       u8 *rx_buf;
+       int tx_len;
+       int rx_len;
+-      bool dma_pending;
++      unsigned int dma_pending;
+ };
+ 
+ static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
+@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void 
*dev_id)
+       /* Write as many bytes as possible to FIFO */
+       bcm2835_wr_fifo(bs);
+ 
+-      /* based on flags decide if we can finish the transfer */
+-      if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
++      if (!bs->rx_len) {
+               /* Transfer complete - reset SPI HW */
+               bcm2835_spi_reset_hw(master);
+               /* wake up the framework */
+@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
+        * is called the tx-dma must have finished - can't get to this
+        * situation otherwise...
+        */
+-      dmaengine_terminate_all(master->dma_tx);
+-
+-      /* mark as no longer pending */
+-      bs->dma_pending = 0;
++      if (cmpxchg(&bs->dma_pending, true, false)) {
++              dmaengine_terminate_all(master->dma_tx);
++      }
+ 
+       /* and mark as completed */;
+       complete(&master->xfer_completion);
+@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master 
*master,
+       if (ret) {
+               /* need to reset on errors */
+               dmaengine_terminate_all(master->dma_tx);
++              bs->dma_pending = false;
+               bcm2835_spi_reset_hw(master);
+               return ret;
+       }
+@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master 
*master,
+       struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ 
+       /* if an error occurred and we have an active dma, then terminate */
+-      if (bs->dma_pending) {
++      if (cmpxchg(&bs->dma_pending, true, false)) {
+               dmaengine_terminate_all(master->dma_tx);
+               dmaengine_terminate_all(master->dma_rx);
+-              bs->dma_pending = 0;
+       }
+       /* and reset */
+       bcm2835_spi_reset_hw(master);
+diff --git a/drivers/staging/wilc1000/wilc_sdio.c 
b/drivers/staging/wilc1000/wilc_sdio.c
+index 0189e3edbbbe..a4b2bfc31503 100644
+--- a/drivers/staging/wilc1000/wilc_sdio.c
++++ b/drivers/staging/wilc1000/wilc_sdio.c
+@@ -823,6 +823,7 @@ static int sdio_read_int(struct wilc *wilc, u32 
*int_status)
+       if (!g_sdio.irq_gpio) {
+               int i;
+ 
++              cmd.read_write = 0;
+               cmd.function = 1;
+               cmd.address = 0x04;
+               cmd.data = 0;
+diff --git a/drivers/tty/serial/xilinx_uartps.c 
b/drivers/tty/serial/xilinx_uartps.c
+index 21c35ad72b99..897b1c515d00 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -130,7 +130,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
+ #define CDNS_UART_IXR_RXTRIG  0x00000001 /* RX FIFO trigger interrupt */
+ #define CDNS_UART_IXR_RXFULL  0x00000004 /* RX FIFO full interrupt. */
+ #define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
+-#define CDNS_UART_IXR_MASK    0x00001FFF /* Valid bit mask */
++#define CDNS_UART_IXR_RXMASK  0x000021e7 /* Valid RX bit mask */
+ 
+       /*
+        * Do not enable parity error interrupt for the following
+@@ -366,7 +366,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+               cdns_uart_handle_tx(dev_id);
+               isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
+       }
+-      if (isrstatus & CDNS_UART_IXR_MASK)
++      if (isrstatus & CDNS_UART_IXR_RXMASK)
+               cdns_uart_handle_rx(dev_id, isrstatus);
+ 
+       spin_unlock(&port->lock);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 5a8ef83a5c5c..423a339e53bc 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -593,6 +593,13 @@ static int acm_tty_install(struct tty_driver *driver, 
struct tty_struct *tty)
+       if (retval)
+               goto error_init_termios;
+ 
++      /*
++       * Suppress initial echoing for some devices which might send data
++       * immediately after acm driver has been installed.
++       */
++      if (acm->quirks & DISABLE_ECHO)
++              tty->termios.c_lflag &= ~ECHO;
++
+       tty->driver_data = acm;
+ 
+       return 0;
+@@ -1685,6 +1692,9 @@ static const struct usb_device_id acm_ids[] = {
+       { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; 
[email protected] */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
++      { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
++      .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
++      },
+       { USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index ca06b20d7af9..515aad0847ee 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -140,3 +140,4 @@ struct acm {
+ #define QUIRK_CONTROL_LINE_STATE      BIT(6)
+ #define CLEAR_HALT_CONDITIONS         BIT(7)
+ #define SEND_ZERO_PACKET              BIT(8)
++#define DISABLE_ECHO                  BIT(9)
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index 5e5fc9d7d533..4c6b31a4a263 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -1990,6 +1990,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, 
struct urb *urb,
+ 
+ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
+                                     struct usb_host_endpoint *hep)
++__acquires(r8a66597->lock)
++__releases(r8a66597->lock)
+ {
+       struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+       struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
+@@ -2002,13 +2004,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd 
*hcd,
+               return;
+       pipenum = pipe->info.pipenum;
+ 
++      spin_lock_irqsave(&r8a66597->lock, flags);
+       if (pipenum == 0) {
+               kfree(hep->hcpriv);
+               hep->hcpriv = NULL;
++              spin_unlock_irqrestore(&r8a66597->lock, flags);
+               return;
+       }
+ 
+-      spin_lock_irqsave(&r8a66597->lock, flags);
+       pipe_stop(r8a66597, pipe);
+       pipe_irq_disable(r8a66597, pipenum);
+       disable_irq_empty(r8a66597, pipenum);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 988be9ca2b4f..8cdca3f7acaa 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1957,6 +1957,10 @@ static const struct usb_device_id option_ids[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+       { USB_DEVICE(0x1508, 0x1001),                                           
/* Fibocom NL668 */
+         .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
++      { USB_DEVICE(0x2cb7, 0x0104),                                           
/* Fibocom NL678 series */
++        .driver_info = RSVD(4) | RSVD(5) },
++      { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     
/* Fibocom NL678 series */
++        .driver_info = RSVD(6) },
+       { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 2153e67eeeee..5fa1e6fb49a6 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -94,9 +94,14 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
+       { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
++      { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
++      { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
++      { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
++      { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
++      { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
+       { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+       { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
+       { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index cec7141245ef..b46e74a90af2 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -124,10 +124,15 @@
+ 
+ /* Hewlett-Packard POS Pole Displays */
+ #define HP_VENDOR_ID          0x03f0
++#define HP_LM920_PRODUCT_ID   0x026b
++#define HP_TD620_PRODUCT_ID   0x0956
+ #define HP_LD960_PRODUCT_ID   0x0b39
+ #define HP_LCM220_PRODUCT_ID  0x3139
+ #define HP_LCM960_PRODUCT_ID  0x3239
+ #define HP_LD220_PRODUCT_ID   0x3524
++#define HP_LD220TA_PRODUCT_ID 0x4349
++#define HP_LD960TA_PRODUCT_ID 0x4439
++#define HP_LM940_PRODUCT_ID   0x5039
+ 
+ /* Cressi Edy (diving computer) PC interface */
+ #define CRESSI_VENDOR_ID      0x04b8
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index ffdd4e937d1d..97518685ab58 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2231,6 +2231,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct 
vring_used_elem *heads,
+               return -EFAULT;
+       }
+       if (unlikely(vq->log_used)) {
++              /* Make sure used idx is seen before log. */
++              smp_wmb();
+               /* Log used index update. */
+               log_write(vq->log_base,
+                         vq->log_addr + offsetof(struct vring_used, idx),
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index eccadb5f62a5..0f77ef303ae7 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -160,6 +160,12 @@ struct btrfs_inode {
+        */
+       u64 last_unlink_trans;
+ 
++      /*
++       * Track the transaction id of the last transaction used to create a
++       * hard link for the inode. This is used by the log tree (fsync).
++       */
++      u64 last_link_trans;
++
+       /*
+        * Number of bytes outstanding that are going to need csums.  This is
+        * used in ENOSPC accounting.
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1c340d6c8568..09829e8d759e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3897,6 +3897,21 @@ cache_index:
+        * inode is not a directory, logging its parent unnecessarily.
+        */
+       BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
++      /*
++       * Similar reasoning for last_link_trans, needs to be set otherwise
++       * for a case like the following:
++       *
++       * mkdir A
++       * touch foo
++       * ln foo A/bar
++       * echo 2 > /proc/sys/vm/drop_caches
++       * fsync foo
++       * <power failure>
++       *
++       * Would result in link bar and directory A not existing after the power
++       * failure.
++       */
++      BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
+ 
+       path->slots[0]++;
+       if (inode->i_nlink != 1 ||
+@@ -6813,6 +6828,7 @@ static int btrfs_link(struct dentry *old_dentry, struct 
inode *dir,
+                       if (err)
+                               goto fail;
+               }
++              BTRFS_I(inode)->last_link_trans = trans->transid;
+               d_instantiate(dentry, inode);
+               btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+       }
+@@ -9540,6 +9556,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+       ei->index_cnt = (u64)-1;
+       ei->dir_index = 0;
+       ei->last_unlink_trans = 0;
++      ei->last_link_trans = 0;
+       ei->last_log_commit = 0;
+       ei->delayed_iput_count = 0;
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 2109db196449..179a383a4aaa 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5795,6 +5795,22 @@ static int btrfs_log_inode_parent(struct 
btrfs_trans_handle *trans,
+                       goto end_trans;
+       }
+ 
++      /*
++       * If a new hard link was added to the inode in the current transaction
++       * and its link count is now greater than 1, we need to fallback to a
++       * transaction commit, otherwise we can end up not logging all its new
++       * parents for all the hard links. Here just from the dentry used to
++       * fsync, we can not visit the ancestor inodes for all the other hard
++       * links to figure out if any is new, so we fallback to a transaction
++       * commit (instead of adding a lot of complexity of scanning a btree,
++       * since this scenario is not a common use case).
++       */
++      if (inode->vfs_inode.i_nlink > 1 &&
++          inode->last_link_trans > last_committed) {
++              ret = -EMLINK;
++              goto end_trans;
++      }
++
+       while (1) {
+               if (!parent || d_really_is_negative(parent) || sb != 
parent->d_sb)
+                       break;
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 62c88dfed57b..d7e839cb773f 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -378,8 +378,8 @@ static const struct status_to_posix_error 
smb2_error_map_table[] = {
+       {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
+       {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
+       {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
+-      {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
+-      {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
++      {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
++      {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
+       {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
+       {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
+       "STATUS_CTL_FILE_NOT_SUPPORTED"},
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index c0c6562b3c44..02970a2e86a3 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2568,6 +2568,8 @@ extern int ext4_group_extend(struct super_block *sb,
+ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count);
+ 
+ /* super.c */
++extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
++                                       sector_t block, int op_flags);
+ extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
+ extern int ext4_calculate_overhead(struct super_block *sb);
+ extern void ext4_superblock_csum_set(struct super_block *sb);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index ac2e0516c16f..40a2d1a428c2 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -711,8 +711,11 @@ int ext4_try_to_write_inline_data(struct address_space 
*mapping,
+ 
+       if (!PageUptodate(page)) {
+               ret = ext4_read_inline_page(inode, page);
+-              if (ret < 0)
++              if (ret < 0) {
++                      unlock_page(page);
++                      put_page(page);
+                       goto out_up_read;
++              }
+       }
+ 
+       ret = 1;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bd2bf83b1a1f..22c9bb8c671f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5218,9 +5218,13 @@ int ext4_write_inode(struct inode *inode, struct 
writeback_control *wbc)
+ {
+       int err;
+ 
+-      if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
++      if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
++          sb_rdonly(inode->i_sb))
+               return 0;
+ 
++      if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
++              return -EIO;
++
+       if (EXT4_SB(inode->i_sb)->s_journal) {
+               if (ext4_journal_current_handle()) {
+                       jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
+@@ -5236,7 +5240,8 @@ int ext4_write_inode(struct inode *inode, struct 
writeback_control *wbc)
+               if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
+                       return 0;
+ 
+-              err = ext4_force_commit(inode->i_sb);
++              err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
++                                              EXT4_I(inode)->i_sync_tid);
+       } else {
+               struct ext4_iloc iloc;
+ 
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index cf5181b62df1..78d45c7d3fa7 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -123,9 +123,9 @@ static int update_ind_extent_range(handle_t *handle, 
struct inode *inode,
+       int i, retval = 0;
+       unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ 
+-      bh = sb_bread(inode->i_sb, pblock);
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, pblock, 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       i_data = (__le32 *)bh->b_data;
+       for (i = 0; i < max_entries; i++) {
+@@ -152,9 +152,9 @@ static int update_dind_extent_range(handle_t *handle, 
struct inode *inode,
+       int i, retval = 0;
+       unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ 
+-      bh = sb_bread(inode->i_sb, pblock);
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, pblock, 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       i_data = (__le32 *)bh->b_data;
+       for (i = 0; i < max_entries; i++) {
+@@ -182,9 +182,9 @@ static int update_tind_extent_range(handle_t *handle, 
struct inode *inode,
+       int i, retval = 0;
+       unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ 
+-      bh = sb_bread(inode->i_sb, pblock);
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, pblock, 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       i_data = (__le32 *)bh->b_data;
+       for (i = 0; i < max_entries; i++) {
+@@ -231,9 +231,9 @@ static int free_dind_blocks(handle_t *handle,
+       struct buffer_head *bh;
+       unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ 
+-      bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       tmp_idata = (__le32 *)bh->b_data;
+       for (i = 0; i < max_entries; i++) {
+@@ -261,9 +261,9 @@ static int free_tind_blocks(handle_t *handle,
+       struct buffer_head *bh;
+       unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ 
+-      bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       tmp_idata = (__le32 *)bh->b_data;
+       for (i = 0; i < max_entries; i++) {
+@@ -389,9 +389,9 @@ static int free_ext_idx(handle_t *handle, struct inode 
*inode,
+       struct ext4_extent_header *eh;
+ 
+       block = ext4_idx_pblock(ix);
+-      bh = sb_bread(inode->i_sb, block);
+-      if (!bh)
+-              return -EIO;
++      bh = ext4_sb_bread(inode->i_sb, block, 0);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+ 
+       eh = (struct ext4_extent_header *)bh->b_data;
+       if (eh->eh_depth != 0) {
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index deebb8842c82..703b516366fd 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
+       else if (free_blocks_count < 0)
+               ext4_warning(sb, "Bad blocks count %u",
+                            input->blocks_count);
+-      else if (!(bh = sb_bread(sb, end - 1)))
++      else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
++              err = PTR_ERR(bh);
++              bh = NULL;
+               ext4_warning(sb, "Cannot read last block (%llu)",
+                            end - 1);
+-      else if (outside(input->block_bitmap, start, end))
++      } else if (outside(input->block_bitmap, start, end))
+               ext4_warning(sb, "Block bitmap not in group (block %llu)",
+                            (unsigned long long)input->block_bitmap);
+       else if (outside(input->inode_bitmap, start, end))
+@@ -757,11 +759,11 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+       ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
+-      struct buffer_head **o_group_desc, **n_group_desc;
+-      struct buffer_head *dind;
+-      struct buffer_head *gdb_bh;
++      struct buffer_head **o_group_desc, **n_group_desc = NULL;
++      struct buffer_head *dind = NULL;
++      struct buffer_head *gdb_bh = NULL;
+       int gdbackups;
+-      struct ext4_iloc iloc;
++      struct ext4_iloc iloc = { .bh = NULL };
+       __le32 *data;
+       int err;
+ 
+@@ -770,21 +772,22 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+                      "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
+                      gdb_num);
+ 
+-      gdb_bh = sb_bread(sb, gdblock);
+-      if (!gdb_bh)
+-              return -EIO;
++      gdb_bh = ext4_sb_bread(sb, gdblock, 0);
++      if (IS_ERR(gdb_bh))
++              return PTR_ERR(gdb_bh);
+ 
+       gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
+       if (gdbackups < 0) {
+               err = gdbackups;
+-              goto exit_bh;
++              goto errout;
+       }
+ 
+       data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
+-      dind = sb_bread(sb, le32_to_cpu(*data));
+-      if (!dind) {
+-              err = -EIO;
+-              goto exit_bh;
++      dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
++      if (IS_ERR(dind)) {
++              err = PTR_ERR(dind);
++              dind = NULL;
++              goto errout;
+       }
+ 
+       data = (__le32 *)dind->b_data;
+@@ -792,18 +795,18 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+               ext4_warning(sb, "new group %u GDT block %llu not reserved",
+                            group, gdblock);
+               err = -EINVAL;
+-              goto exit_dind;
++              goto errout;
+       }
+ 
+       BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+       if (unlikely(err))
+-              goto exit_dind;
++              goto errout;
+ 
+       BUFFER_TRACE(gdb_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, gdb_bh);
+       if (unlikely(err))
+-              goto exit_dind;
++              goto errout;
+ 
+       BUFFER_TRACE(dind, "get_write_access");
+       err = ext4_journal_get_write_access(handle, dind);
+@@ -813,7 +816,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       /* ext4_reserve_inode_write() gets a reference on the iloc */
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
+       if (unlikely(err))
+-              goto exit_dind;
++              goto errout;
+ 
+       n_group_desc = ext4_kvmalloc((gdb_num + 1) *
+                                    sizeof(struct buffer_head *),
+@@ -822,7 +825,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+               err = -ENOMEM;
+               ext4_warning(sb, "not enough memory for %lu groups",
+                            gdb_num + 1);
+-              goto exit_inode;
++              goto errout;
+       }
+ 
+       /*
+@@ -838,7 +841,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       err = ext4_handle_dirty_metadata(handle, NULL, dind);
+       if (unlikely(err)) {
+               ext4_std_error(sb, err);
+-              goto exit_inode;
++              goto errout;
+       }
+       inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
+       ext4_mark_iloc_dirty(handle, inode, &iloc);
+@@ -846,8 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
+       if (unlikely(err)) {
+               ext4_std_error(sb, err);
+-              iloc.bh = NULL;
+-              goto exit_inode;
++              goto errout;
+       }
+       brelse(dind);
+ 
+@@ -863,15 +865,11 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       err = ext4_handle_dirty_super(handle, sb);
+       if (err)
+               ext4_std_error(sb, err);
+-
+       return err;
+-
+-exit_inode:
++errout:
+       kvfree(n_group_desc);
+       brelse(iloc.bh);
+-exit_dind:
+       brelse(dind);
+-exit_bh:
+       brelse(gdb_bh);
+ 
+       ext4_debug("leaving with error %d\n", err);
+@@ -891,9 +889,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 
+       gdblock = ext4_meta_bg_first_block_no(sb, group) +
+                  ext4_bg_has_super(sb, group);
+-      gdb_bh = sb_bread(sb, gdblock);
+-      if (!gdb_bh)
+-              return -EIO;
++      gdb_bh = ext4_sb_bread(sb, gdblock, 0);
++      if (IS_ERR(gdb_bh))
++              return PTR_ERR(gdb_bh);
+       n_group_desc = ext4_kvmalloc((gdb_num + 1) *
+                                    sizeof(struct buffer_head *),
+                                    GFP_NOFS);
+@@ -949,9 +947,10 @@ static int reserve_backup_gdb(handle_t *handle, struct 
inode *inode,
+               return -ENOMEM;
+ 
+       data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
+-      dind = sb_bread(sb, le32_to_cpu(*data));
+-      if (!dind) {
+-              err = -EIO;
++      dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
++      if (IS_ERR(dind)) {
++              err = PTR_ERR(dind);
++              dind = NULL;
+               goto exit_free;
+       }
+ 
+@@ -970,9 +969,10 @@ static int reserve_backup_gdb(handle_t *handle, struct 
inode *inode,
+                       err = -EINVAL;
+                       goto exit_bh;
+               }
+-              primary[res] = sb_bread(sb, blk);
+-              if (!primary[res]) {
+-                      err = -EIO;
++              primary[res] = ext4_sb_bread(sb, blk, 0);
++              if (IS_ERR(primary[res])) {
++                      err = PTR_ERR(primary[res]);
++                      primary[res] = NULL;
+                       goto exit_bh;
+               }
+               gdbackups = verify_reserved_gdb(sb, group, primary[res]);
+@@ -1602,7 +1602,7 @@ int ext4_group_add(struct super_block *sb, struct 
ext4_new_group_data *input)
+       }
+ 
+       if (reserved_gdb || gdb_off == 0) {
+-              if (ext4_has_feature_resize_inode(sb) ||
++              if (!ext4_has_feature_resize_inode(sb) ||
+                   !le16_to_cpu(es->s_reserved_gdt_blocks)) {
+                       ext4_warning(sb,
+                                    "No reserved GDT blocks, can't resize");
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 7fd64f5f70f0..77300b8ca211 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
+ MODULE_ALIAS("ext3");
+ #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+ 
++/*
++ * This works like sb_bread() except it uses ERR_PTR for error
++ * returns.  Currently with sb_bread it's impossible to distinguish
++ * between ENOMEM and EIO situations (since both result in a NULL
++ * return.
++ */
++struct buffer_head *
++ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
++{
++      struct buffer_head *bh = sb_getblk(sb, block);
++
++      if (bh == NULL)
++              return ERR_PTR(-ENOMEM);
++      if (buffer_uptodate(bh))
++              return bh;
++      ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
++      wait_on_buffer(bh);
++      if (buffer_uptodate(bh))
++              return bh;
++      put_bh(bh);
++      return ERR_PTR(-EIO);
++}
++
+ static int ext4_verify_csum_type(struct super_block *sb,
+                                struct ext4_super_block *es)
+ {
+@@ -1130,6 +1153,16 @@ static struct dentry *ext4_fh_to_parent(struct 
super_block *sb, struct fid *fid,
+                                   ext4_nfs_get_inode);
+ }
+ 
++static int ext4_nfs_commit_metadata(struct inode *inode)
++{
++      struct writeback_control wbc = {
++              .sync_mode = WB_SYNC_ALL
++      };
++
++      trace_ext4_nfs_commit_metadata(inode);
++      return ext4_write_inode(inode, &wbc);
++}
++
+ /*
+  * Try to release metadata pages (indirect blocks, directories) which are
+  * mapped via the block device.  Since these pages could have journal heads
+@@ -1338,6 +1371,7 @@ static const struct export_operations ext4_export_ops = {
+       .fh_to_dentry = ext4_fh_to_dentry,
+       .fh_to_parent = ext4_fh_to_parent,
+       .get_parent = ext4_get_parent,
++      .commit_metadata = ext4_nfs_commit_metadata,
+ };
+ 
+ enum {
+@@ -5613,9 +5647,9 @@ static int ext4_quota_enable(struct super_block *sb, int 
type, int format_id,
+       qf_inode->i_flags |= S_NOQUOTA;
+       lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
+       err = dquot_enable(qf_inode, type, format_id, flags);
+-      iput(qf_inode);
+       if (err)
+               lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
++      iput(qf_inode);
+ 
+       return err;
+ }
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index a5923a1d0ff4..311761a6ef6d 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -521,14 +521,13 @@ ext4_xattr_block_get(struct inode *inode, int 
name_index, const char *name,
+       ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
+                 name_index, name, buffer, (long)buffer_size);
+ 
+-      error = -ENODATA;
+       if (!EXT4_I(inode)->i_file_acl)
+-              goto cleanup;
++              return -ENODATA;
+       ea_idebug(inode, "reading block %llu",
+                 (unsigned long long)EXT4_I(inode)->i_file_acl);
+-      bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-      if (!bh)
+-              goto cleanup;
++      bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+       ea_bdebug(bh, "b_count=%d, refcount=%d",
+               atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+       error = ext4_xattr_check_block(inode, bh);
+@@ -695,26 +694,23 @@ ext4_xattr_block_list(struct dentry *dentry, char 
*buffer, size_t buffer_size)
+       ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+                 buffer, (long)buffer_size);
+ 
+-      error = 0;
+       if (!EXT4_I(inode)->i_file_acl)
+-              goto cleanup;
++              return 0;
+       ea_idebug(inode, "reading block %llu",
+                 (unsigned long long)EXT4_I(inode)->i_file_acl);
+-      bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-      error = -EIO;
+-      if (!bh)
+-              goto cleanup;
++      bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
++      if (IS_ERR(bh))
++              return PTR_ERR(bh);
+       ea_bdebug(bh, "b_count=%d, refcount=%d",
+               atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+       error = ext4_xattr_check_block(inode, bh);
+       if (error)
+               goto cleanup;
+       ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
+-      error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, 
buffer_size);
+-
++      error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
++                                      buffer_size);
+ cleanup:
+       brelse(bh);
+-
+       return error;
+ }
+ 
+@@ -829,9 +825,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t 
*usage)
+       }
+ 
+       if (EXT4_I(inode)->i_file_acl) {
+-              bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-              if (!bh) {
+-                      ret = -EIO;
++              bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, 
REQ_PRIO);
++              if (IS_ERR(bh)) {
++                      ret = PTR_ERR(bh);
+                       goto out;
+               }
+ 
+@@ -1824,16 +1820,15 @@ ext4_xattr_block_find(struct inode *inode, struct 
ext4_xattr_info *i,
+ 
+       if (EXT4_I(inode)->i_file_acl) {
+               /* The inode already has an extended attribute block. */
+-              bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
+-              error = -EIO;
+-              if (!bs->bh)
+-                      goto cleanup;
++              bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
++              if (IS_ERR(bs->bh))
++                      return PTR_ERR(bs->bh);
+               ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
+                       atomic_read(&(bs->bh->b_count)),
+                       le32_to_cpu(BHDR(bs->bh)->h_refcount));
+               error = ext4_xattr_check_block(inode, bs->bh);
+               if (error)
+-                      goto cleanup;
++                      return error;
+               /* Find the named attribute. */
+               bs->s.base = BHDR(bs->bh);
+               bs->s.first = BFIRST(bs->bh);
+@@ -1842,13 +1837,10 @@ ext4_xattr_block_find(struct inode *inode, struct 
ext4_xattr_info *i,
+               error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
+                                        i->name_index, i->name, 1);
+               if (error && error != -ENODATA)
+-                      goto cleanup;
++                      return error;
+               bs->s.not_found = error;
+       }
+-      error = 0;
+-
+-cleanup:
+-      return error;
++      return 0;
+ }
+ 
+ static int
+@@ -2277,9 +2269,9 @@ static struct buffer_head *ext4_xattr_get_block(struct 
inode *inode)
+ 
+       if (!EXT4_I(inode)->i_file_acl)
+               return NULL;
+-      bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-      if (!bh)
+-              return ERR_PTR(-EIO);
++      bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
++      if (IS_ERR(bh))
++              return bh;
+       error = ext4_xattr_check_block(inode, bh);
+       if (error) {
+               brelse(bh);
+@@ -2732,7 +2724,7 @@ retry:
+       base = IFIRST(header);
+       end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+       min_offs = end - base;
+-      total_ino = sizeof(struct ext4_xattr_ibody_header);
++      total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
+ 
+       error = xattr_check_inode(inode, header, end);
+       if (error)
+@@ -2749,10 +2741,11 @@ retry:
+       if (EXT4_I(inode)->i_file_acl) {
+               struct buffer_head *bh;
+ 
+-              bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-              error = -EIO;
+-              if (!bh)
++              bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, 
REQ_PRIO);
++              if (IS_ERR(bh)) {
++                      error = PTR_ERR(bh);
+                       goto cleanup;
++              }
+               error = ext4_xattr_check_block(inode, bh);
+               if (error) {
+                       brelse(bh);
+@@ -2906,11 +2899,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct 
inode *inode,
+       }
+ 
+       if (EXT4_I(inode)->i_file_acl) {
+-              bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+-              if (!bh) {
+-                      EXT4_ERROR_INODE(inode, "block %llu read error",
+-                                       EXT4_I(inode)->i_file_acl);
+-                      error = -EIO;
++              bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, 
REQ_PRIO);
++              if (IS_ERR(bh)) {
++                      error = PTR_ERR(bh);
++                      if (error == -EIO)
++                              EXT4_ERROR_INODE(inode, "block %llu read error",
++                                               EXT4_I(inode)->i_file_acl);
+                       goto cleanup;
+               }
+               error = ext4_xattr_check_block(inode, bh);
+@@ -3063,8 +3057,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
+       while (ce) {
+               struct buffer_head *bh;
+ 
+-              bh = sb_bread(inode->i_sb, ce->e_value);
+-              if (!bh) {
++              bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
++              if (IS_ERR(bh)) {
++                      if (PTR_ERR(bh) == -ENOMEM)
++                              return NULL;
+                       EXT4_ERROR_INODE(inode, "block %lu read error",
+                                        (unsigned long)ce->e_value);
+               } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index de4de4ebe64c..fc5c41257e68 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1897,10 +1897,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info 
*sbi,
+               return 1;
+       }
+ 
+-      if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
++      if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
+               f2fs_msg(sb, KERN_INFO,
+-                      "Wrong segment_count / block_count (%u > %u)",
+-                      segment_count, le32_to_cpu(raw_super->block_count));
++                      "Wrong segment_count / block_count (%u > %llu)",
++                      segment_count, le64_to_cpu(raw_super->block_count));
+               return 1;
+       }
+ 
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index cdd069cf9ed8..a89cdea8795a 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -116,6 +116,8 @@ struct msi_desc {
+       list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
+ #define for_each_msi_entry(desc, dev) \
+       list_for_each_entry((desc), dev_to_msi_list((dev)), list)
++#define for_each_msi_entry_safe(desc, tmp, dev)       \
++      list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
+ 
+ #ifdef CONFIG_PCI_MSI
+ #define first_pci_msi_entry(pdev)     first_msi_entry(&(pdev)->dev)
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index e8b12b79a0de..dc396196585a 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -551,6 +551,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring 
*r, void **queue,
+               else if (destroy)
+                       destroy(ptr);
+ 
++      if (producer >= size)
++              producer = 0;
+       __ptr_ring_set_size(r, size);
+       r->producer = producer;
+       r->consumer_head = 0;
+diff --git a/include/net/netfilter/nf_conntrack_count.h 
b/include/net/netfilter/nf_conntrack_count.h
+new file mode 100644
+index 000000000000..4b71a2f4c351
+--- /dev/null
++++ b/include/net/netfilter/nf_conntrack_count.h
+@@ -0,0 +1,15 @@
++#ifndef _NF_CONNTRACK_COUNT_H
++#define _NF_CONNTRACK_COUNT_H
++
++unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
++                               const struct nf_conntrack_tuple *tuple,
++                               const struct nf_conntrack_zone *zone,
++                               bool *addit);
++
++bool nf_conncount_add(struct hlist_head *head,
++                    const struct nf_conntrack_tuple *tuple,
++                    const struct nf_conntrack_zone *zone);
++
++void nf_conncount_cache_free(struct hlist_head *hhead);
++
++#endif
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 64a330544dad..4280e96d4b46 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -292,6 +292,7 @@ struct sock_common {
+   *   @sk_filter: socket filtering instructions
+   *   @sk_timer: sock cleanup timer
+   *   @sk_stamp: time stamp of last packet received
++  *   @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
+   *   @sk_tsflags: SO_TIMESTAMPING socket options
+   *   @sk_tskey: counter to disambiguate concurrent tstamp requests
+   *   @sk_zckey: counter to order MSG_ZEROCOPY notifications
+@@ -457,6 +458,9 @@ struct sock {
+       const struct cred       *sk_peer_cred;
+       long                    sk_rcvtimeo;
+       ktime_t                 sk_stamp;
++#if BITS_PER_LONG==32
++      seqlock_t               sk_stamp_seq;
++#endif
+       u16                     sk_tsflags;
+       u8                      sk_shutdown;
+       u32                     sk_tskey;
+@@ -2201,6 +2205,34 @@ static inline void sk_drops_add(struct sock *sk, const 
struct sk_buff *skb)
+       atomic_add(segs, &sk->sk_drops);
+ }
+ 
++static inline ktime_t sock_read_timestamp(struct sock *sk)
++{
++#if BITS_PER_LONG==32
++      unsigned int seq;
++      ktime_t kt;
++
++      do {
++              seq = read_seqbegin(&sk->sk_stamp_seq);
++              kt = sk->sk_stamp;
++      } while (read_seqretry(&sk->sk_stamp_seq, seq));
++
++      return kt;
++#else
++      return sk->sk_stamp;
++#endif
++}
++
++static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
++{
++#if BITS_PER_LONG==32
++      write_seqlock(&sk->sk_stamp_seq);
++      sk->sk_stamp = kt;
++      write_sequnlock(&sk->sk_stamp_seq);
++#else
++      sk->sk_stamp = kt;
++#endif
++}
++
+ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+                          struct sk_buff *skb);
+ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+@@ -2225,7 +2257,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 
struct sk_buff *skb)
+            (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+               __sock_recv_timestamp(msg, sk, skb);
+       else
+-              sk->sk_stamp = kt;
++              sock_write_timestamp(sk, kt);
+ 
+       if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+               __sock_recv_wifi_status(msg, sk, skb);
+@@ -2246,9 +2278,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr 
*msg, struct sock *sk,
+       if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
+               __sock_recv_ts_and_drops(msg, sk, skb);
+       else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
+-              sk->sk_stamp = skb->tstamp;
++              sock_write_timestamp(sk, skb->tstamp);
+       else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+-              sk->sk_stamp = 0;
++              sock_write_timestamp(sk, 0);
+ }
+ 
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
+index 4d0e3af4e561..3902b6960db2 100644
+--- a/include/trace/events/ext4.h
++++ b/include/trace/events/ext4.h
+@@ -225,6 +225,26 @@ TRACE_EVENT(ext4_drop_inode,
+                 (unsigned long) __entry->ino, __entry->drop)
+ );
+ 
++TRACE_EVENT(ext4_nfs_commit_metadata,
++      TP_PROTO(struct inode *inode),
++
++      TP_ARGS(inode),
++
++      TP_STRUCT__entry(
++              __field(        dev_t,  dev                     )
++              __field(        ino_t,  ino                     )
++      ),
++
++      TP_fast_assign(
++              __entry->dev    = inode->i_sb->s_dev;
++              __entry->ino    = inode->i_ino;
++      ),
++
++      TP_printk("dev %d,%d ino %lu",
++                MAJOR(__entry->dev), MINOR(__entry->dev),
++                (unsigned long) __entry->ino)
++);
++
+ TRACE_EVENT(ext4_mark_inode_dirty,
+       TP_PROTO(struct inode *inode, unsigned long IP),
+ 
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 3fc11b8851ac..109c32c56de7 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -4080,20 +4080,25 @@ static void css_task_iter_advance(struct css_task_iter 
*it)
+ 
+       lockdep_assert_held(&css_set_lock);
+ repeat:
+-      /*
+-       * Advance iterator to find next entry.  cset->tasks is consumed
+-       * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
+-       * next cset.
+-       */
+-      next = it->task_pos->next;
++      if (it->task_pos) {
++              /*
++               * Advance iterator to find next entry.  cset->tasks is
++               * consumed first and then ->mg_tasks.  After ->mg_tasks,
++               * we move onto the next cset.
++               */
++              next = it->task_pos->next;
+ 
+-      if (next == it->tasks_head)
+-              next = it->mg_tasks_head->next;
++              if (next == it->tasks_head)
++                      next = it->mg_tasks_head->next;
+ 
+-      if (next == it->mg_tasks_head)
++              if (next == it->mg_tasks_head)
++                      css_task_iter_advance_css_set(it);
++              else
++                      it->task_pos = next;
++      } else {
++              /* called from start, proceed to the first cset */
+               css_task_iter_advance_css_set(it);
+-      else
+-              it->task_pos = next;
++      }
+ 
+       /* if PROCS, skip over tasks which aren't group leaders */
+       if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
+@@ -4133,7 +4138,7 @@ void css_task_iter_start(struct cgroup_subsys_state 
*css, unsigned int flags,
+ 
+       it->cset_head = it->cset_pos;
+ 
+-      css_task_iter_advance_css_set(it);
++      css_task_iter_advance(it);
+ 
+       spin_unlock_irq(&css_set_lock);
+ }
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index f3f9d18891de..d783d90c20f1 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -654,15 +654,22 @@ static int ax25_setsockopt(struct socket *sock, int 
level, int optname,
+                       break;
+               }
+ 
+-              dev = dev_get_by_name(&init_net, devname);
++              rtnl_lock();
++              dev = __dev_get_by_name(&init_net, devname);
+               if (!dev) {
++                      rtnl_unlock();
+                       res = -ENODEV;
+                       break;
+               }
+ 
+               ax25->ax25_dev = ax25_dev_ax25dev(dev);
++              if (!ax25->ax25_dev) {
++                      rtnl_unlock();
++                      res = -ENODEV;
++                      break;
++              }
+               ax25_fillin_cb(ax25, ax25->ax25_dev);
+-              dev_put(dev);
++              rtnl_unlock();
+               break;
+ 
+       default:
+diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
+index 9a3a301e1e2f..d92195cd7834 100644
+--- a/net/ax25/ax25_dev.c
++++ b/net/ax25/ax25_dev.c
+@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
+       if ((s = ax25_dev_list) == ax25_dev) {
+               ax25_dev_list = s->next;
+               spin_unlock_bh(&ax25_dev_lock);
++              dev->ax25_ptr = NULL;
+               dev_put(dev);
+               kfree(ax25_dev);
+               return;
+@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
+               if (s->next == ax25_dev) {
+                       s->next = ax25_dev->next;
+                       spin_unlock_bh(&ax25_dev_lock);
++                      dev->ax25_ptr = NULL;
+                       dev_put(dev);
+                       kfree(ax25_dev);
+                       return;
+diff --git a/net/compat.c b/net/compat.c
+index 32ed993588d6..790851e70dab 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -462,12 +462,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct 
timeval __user *userstamp)
+       err = -ENOENT;
+       if (!sock_flag(sk, SOCK_TIMESTAMP))
+               sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+-      tv = ktime_to_timeval(sk->sk_stamp);
++      tv = ktime_to_timeval(sock_read_timestamp(sk));
++
+       if (tv.tv_sec == -1)
+               return err;
+       if (tv.tv_sec == 0) {
+-              sk->sk_stamp = ktime_get_real();
+-              tv = ktime_to_timeval(sk->sk_stamp);
++              ktime_t kt = ktime_get_real();
++              sock_write_timestamp(sk, kt);
++              tv = ktime_to_timeval(kt);
+       }
+       err = 0;
+       if (put_user(tv.tv_sec, &ctv->tv_sec) ||
+@@ -490,12 +492,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct 
timespec __user *usersta
+       err = -ENOENT;
+       if (!sock_flag(sk, SOCK_TIMESTAMP))
+               sock_enable_timestamp(sk, SOCK_TIMESTAMP);
+-      ts = ktime_to_timespec(sk->sk_stamp);
++      ts = ktime_to_timespec(sock_read_timestamp(sk));
+       if (ts.tv_sec == -1)
+               return err;
+       if (ts.tv_sec == 0) {
+-              sk->sk_stamp = ktime_get_real();
+-              ts = ktime_to_timespec(sk->sk_stamp);
++              ktime_t kt = ktime_get_real();
++              sock_write_timestamp(sk, kt);
++              ts = ktime_to_timespec(kt);
+       }
+       err = 0;
+       if (put_user(ts.tv_sec, &ctv->tv_sec) ||
+diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
+index 4b54e5f107c6..acf45ddbe924 100644
+--- a/net/core/gro_cells.c
++++ b/net/core/gro_cells.c
+@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+ 
++              napi_disable(&cell->napi);
+               netif_napi_del(&cell->napi);
+               __skb_queue_purge(&cell->napi_skbs);
+       }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 36f19458e2fe..01cae48d6eef 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2730,6 +2730,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+       sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
+ 
+       sk->sk_stamp = SK_DEFAULT_STAMP;
++#if BITS_PER_LONG==32
++      seqlock_init(&sk->sk_stamp_seq);
++#endif
+       atomic_set(&sk->sk_zckey, 0);
+ 
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
+index ca53efa17be1..8bec827081cd 100644
+--- a/net/ieee802154/6lowpan/tx.c
++++ b/net/ieee802154/6lowpan/tx.c
+@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct 
net_device *ldev,
+       const struct ipv6hdr *hdr = ipv6_hdr(skb);
+       struct neighbour *n;
+ 
++      if (!daddr)
++              return -EINVAL;
++
+       /* TODO:
+        * if this package isn't ipv6 one, where should it be routed?
+        */
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index c9c35b61a027..857ec3dbb742 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -991,7 +991,9 @@ next_chunk:
+                       if (!inet_diag_bc_sk(bc, sk))
+                               goto next_normal;
+ 
+-                      sock_hold(sk);
++                      if (!refcount_inc_not_zero(&sk->sk_refcnt))
++                              goto next_normal;
++
+                       num_arr[accum] = num;
+                       sk_arr[accum] = sk;
+                       if (++accum == SKARR_SZ)
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index f686d7761acb..f8bbd693c19c 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -347,10 +347,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+       struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+       struct rb_node **rbn, *parent;
+       struct sk_buff *skb1, *prev_tail;
++      int ihl, end, skb1_run_end;
+       struct net_device *dev;
+       unsigned int fragsize;
+       int flags, offset;
+-      int ihl, end;
+       int err = -ENOENT;
+       u8 ecn;
+ 
+@@ -420,7 +420,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments) MUST be silently discarded.
+        *
+-       * We do the same here for IPv4 (and increment an snmp counter).
++       * We do the same here for IPv4 (and increment an snmp counter) but
++       * we do not want to drop the whole queue in response to a duplicate
++       * fragment.
+        */
+ 
+       /* Find out where to put this fragment.  */
+@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+               do {
+                       parent = *rbn;
+                       skb1 = rb_to_skb(parent);
++                      skb1_run_end = skb1->ip_defrag_offset +
++                                     FRAG_CB(skb1)->frag_run_len;
+                       if (end <= skb1->ip_defrag_offset)
+                               rbn = &parent->rb_left;
+-                      else if (offset >= skb1->ip_defrag_offset +
+-                                              FRAG_CB(skb1)->frag_run_len)
++                      else if (offset >= skb1_run_end)
+                               rbn = &parent->rb_right;
+-                      else /* Found an overlap with skb1. */
+-                              goto discard_qp;
++                      else if (offset >= skb1->ip_defrag_offset &&
++                               end <= skb1_run_end)
++                              goto err; /* No new data, potential duplicate */
++                      else
++                              goto discard_qp; /* Found an overlap */
+               } while (*rbn);
+               /* Here we have parent properly set, and rbn pointing to
+                * one of its NULL left/right children. Insert skb.
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 9f314a5e9f27..ce3d5f734fdb 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -68,6 +68,8 @@
+ #include <linux/netconf.h>
+ #include <net/nexthop.h>
+ 
++#include <linux/nospec.h>
++
+ struct ipmr_rule {
+       struct fib_rule         common;
+ };
+@@ -1620,6 +1622,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, 
void __user *arg)
+                       return -EFAULT;
+               if (vr.vifi >= mrt->maxvif)
+                       return -EINVAL;
++              vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
+               read_lock(&mrt_lock);
+               vif = &mrt->vif_table[vr.vifi];
+               if (VIF_EXISTS(mrt, vr.vifi)) {
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 948f304db0a3..1812c2a748ff 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -908,6 +908,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
+                       goto drop;
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
++              ipv6h = ipv6_hdr(skb);
+               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
+                       goto drop;
+               if (iptunnel_pull_header(skb, 0, tpi->proto, false))
+diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
+index b283f293ee4a..caad40d6e74d 100644
+--- a/net/ipv6/ip6_udp_tunnel.c
++++ b/net/ipv6/ip6_udp_tunnel.c
+@@ -15,7 +15,7 @@
+ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+                    struct socket **sockp)
+ {
+-      struct sockaddr_in6 udp6_addr;
++      struct sockaddr_in6 udp6_addr = {};
+       int err;
+       struct socket *sock = NULL;
+ 
+@@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg 
*cfg,
+               goto error;
+ 
+       if (cfg->peer_udp_port) {
++              memset(&udp6_addr, 0, sizeof(udp6_addr));
+               udp6_addr.sin6_family = AF_INET6;
+               memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
+                      sizeof(udp6_addr.sin6_addr));
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index db5a24f09335..6b2416b4a53e 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -318,6 +318,7 @@ static int vti6_rcv(struct sk_buff *skb)
+                       return 0;
+               }
+ 
++              ipv6h = ipv6_hdr(skb);
+               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+                       t->dev->stats.rx_dropped++;
+                       rcu_read_unlock();
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 8015e74fd7d9..b2fdb3fdd217 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -72,6 +72,8 @@ struct mr6_table {
+ #endif
+ };
+ 
++#include <linux/nospec.h>
++
+ struct ip6mr_rule {
+       struct fib_rule         common;
+ };
+@@ -1883,6 +1885,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user 
*arg)
+                       return -EFAULT;
+               if (vr.mifi >= mrt->maxvif)
+                       return -EINVAL;
++              vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
+               read_lock(&mrt_lock);
+               vif = &mrt->vif6_table[vr.mifi];
+               if (MIF_EXISTS(mrt, vr.mifi)) {
+@@ -1957,6 +1960,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int 
cmd, void __user *arg)
+                       return -EFAULT;
+               if (vr.mifi >= mrt->maxvif)
+                       return -EINVAL;
++              vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
+               read_lock(&mrt_lock);
+               vif = &mrt->vif6_table[vr.mifi];
+               if (MIF_EXISTS(mrt, vr.mifi)) {
+diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
+index ffa8eec980e9..b1646c24a632 100644
+--- a/net/netfilter/xt_connlimit.c
++++ b/net/netfilter/xt_connlimit.c
+@@ -46,7 +46,9 @@
+ struct xt_connlimit_conn {
+       struct hlist_node               node;
+       struct nf_conntrack_tuple       tuple;
+-      union nf_inet_addr              addr;
++      struct nf_conntrack_zone        zone;
++      int                             cpu;
++      u32                             jiffies32;
+ };
+ 
+ struct xt_connlimit_rb {
+@@ -115,9 +117,9 @@ same_source_net(const union nf_inet_addr *addr,
+       }
+ }
+ 
+-static bool add_hlist(struct hlist_head *head,
++bool nf_conncount_add(struct hlist_head *head,
+                     const struct nf_conntrack_tuple *tuple,
+-                    const union nf_inet_addr *addr)
++                    const struct nf_conntrack_zone *zone)
+ {
+       struct xt_connlimit_conn *conn;
+ 
+@@ -125,37 +127,78 @@ static bool add_hlist(struct hlist_head *head,
+       if (conn == NULL)
+               return false;
+       conn->tuple = *tuple;
+-      conn->addr = *addr;
++      conn->zone = *zone;
++      conn->cpu = raw_smp_processor_id();
++      conn->jiffies32 = (u32)jiffies;
+       hlist_add_head(&conn->node, head);
+       return true;
+ }
++EXPORT_SYMBOL_GPL(nf_conncount_add);
+ 
+-static unsigned int check_hlist(struct net *net,
+-                              struct hlist_head *head,
+-                              const struct nf_conntrack_tuple *tuple,
+-                              const struct nf_conntrack_zone *zone,
+-                              bool *addit)
++static const struct nf_conntrack_tuple_hash *
++find_or_evict(struct net *net, struct xt_connlimit_conn *conn)
++{
++      const struct nf_conntrack_tuple_hash *found;
++      unsigned long a, b;
++      int cpu = raw_smp_processor_id();
++      u32 age;
++
++      found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
++      if (found)
++              return found;
++      b = conn->jiffies32;
++      a = (u32)jiffies;
++
++      /* conn might have been added just before by another cpu and
++       * might still be unconfirmed.  In this case, nf_conntrack_find()
++       * returns no result.  Thus only evict if this cpu added the
++       * stale entry or if the entry is older than two jiffies.
++       */
++      age = a - b;
++      if (conn->cpu == cpu || age >= 2) {
++              hlist_del(&conn->node);
++              kmem_cache_free(connlimit_conn_cachep, conn);
++              return ERR_PTR(-ENOENT);
++      }
++
++      return ERR_PTR(-EAGAIN);
++}
++
++unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
++                               const struct nf_conntrack_tuple *tuple,
++                               const struct nf_conntrack_zone *zone,
++                               bool *addit)
+ {
+       const struct nf_conntrack_tuple_hash *found;
+       struct xt_connlimit_conn *conn;
+-      struct hlist_node *n;
+       struct nf_conn *found_ct;
++      struct hlist_node *n;
+       unsigned int length = 0;
+ 
+       *addit = true;
+ 
+       /* check the saved connections */
+       hlist_for_each_entry_safe(conn, n, head, node) {
+-              found = nf_conntrack_find_get(net, zone, &conn->tuple);
+-              if (found == NULL) {
+-                      hlist_del(&conn->node);
+-                      kmem_cache_free(connlimit_conn_cachep, conn);
++              found = find_or_evict(net, conn);
++              if (IS_ERR(found)) {
++                      /* Not found, but might be about to be confirmed */
++                      if (PTR_ERR(found) == -EAGAIN) {
++                              length++;
++                              if (!tuple)
++                                      continue;
++
++                              if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
++                                  nf_ct_zone_id(&conn->zone, conn->zone.dir) 
==
++                                  nf_ct_zone_id(zone, zone->dir))
++                                      *addit = false;
++                      }
+                       continue;
+               }
+ 
+               found_ct = nf_ct_tuplehash_to_ctrack(found);
+ 
+-              if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
++              if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
++                  nf_ct_zone_equal(found_ct, zone, zone->dir)) {
+                       /*
+                        * Just to be sure we have it only once in the list.
+                        * We should not see tuples twice unless someone hooks
+@@ -179,6 +222,7 @@ static unsigned int check_hlist(struct net *net,
+ 
+       return length;
+ }
++EXPORT_SYMBOL_GPL(nf_conncount_lookup);
+ 
+ static void tree_nodes_free(struct rb_root *root,
+                           struct xt_connlimit_rb *gc_nodes[],
+@@ -225,13 +269,15 @@ count_tree(struct net *net, struct rb_root *root,
+               } else {
+                       /* same source network -> be counted! */
+                       unsigned int count;
+-                      count = check_hlist(net, &rbconn->hhead, tuple, zone, 
&addit);
++
++                      count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
++                                                  zone, &addit);
+ 
+                       tree_nodes_free(root, gc_nodes, gc_count);
+                       if (!addit)
+                               return count;
+ 
+-                      if (!add_hlist(&rbconn->hhead, tuple, addr))
++                      if (!nf_conncount_add(&rbconn->hhead, tuple, zone))
+                               return 0; /* hotdrop */
+ 
+                       return count + 1;
+@@ -241,7 +287,7 @@ count_tree(struct net *net, struct rb_root *root,
+                       continue;
+ 
+               /* only used for GC on hhead, retval and 'addit' ignored */
+-              check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
++              nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
+               if (hlist_empty(&rbconn->hhead))
+                       gc_nodes[gc_count++] = rbconn;
+       }
+@@ -270,7 +316,7 @@ count_tree(struct net *net, struct rb_root *root,
+       }
+ 
+       conn->tuple = *tuple;
+-      conn->addr = *addr;
++      conn->zone = *zone;
+       rbconn->addr = *addr;
+ 
+       INIT_HLIST_HEAD(&rbconn->hhead);
+@@ -382,11 +428,19 @@ static int connlimit_mt_check(const struct 
xt_mtchk_param *par)
+       return 0;
+ }
+ 
+-static void destroy_tree(struct rb_root *r)
++void nf_conncount_cache_free(struct hlist_head *hhead)
+ {
+       struct xt_connlimit_conn *conn;
+-      struct xt_connlimit_rb *rbconn;
+       struct hlist_node *n;
++
++      hlist_for_each_entry_safe(conn, n, hhead, node)
++              kmem_cache_free(connlimit_conn_cachep, conn);
++}
++EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
++
++static void destroy_tree(struct rb_root *r)
++{
++      struct xt_connlimit_rb *rbconn;
+       struct rb_node *node;
+ 
+       while ((node = rb_first(r)) != NULL) {
+@@ -394,8 +448,7 @@ static void destroy_tree(struct rb_root *r)
+ 
+               rb_erase(node, r);
+ 
+-              hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
+-                      kmem_cache_free(connlimit_conn_cachep, conn);
++              nf_conncount_cache_free(&rbconn->hhead);
+ 
+               kmem_cache_free(connlimit_rb_cachep, rbconn);
+       }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index ebf16f7f9089..fc876b0c3e06 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
+       sk_for_each(s, &nr_list)
+               if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
+                   s->sk_state == TCP_LISTEN) {
+-                      bh_lock_sock(s);
++                      sock_hold(s);
+                       goto found;
+               }
+       s = NULL;
+@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, 
unsigned char id)
+               struct nr_sock *nr = nr_sk(s);
+ 
+               if (nr->my_index == index && nr->my_id == id) {
+-                      bh_lock_sock(s);
++                      sock_hold(s);
+                       goto found;
+               }
+       }
+@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, 
unsigned char id,
+ 
+               if (nr->your_index == index && nr->your_id == id &&
+                   !ax25cmp(&nr->dest_addr, dest)) {
+-                      bh_lock_sock(s);
++                      sock_hold(s);
+                       goto found;
+               }
+       }
+@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
+               if (i != 0 && j != 0) {
+                       if ((sk=nr_find_socket(i, j)) == NULL)
+                               break;
+-                      bh_unlock_sock(sk);
++                      sock_put(sk);
+               }
+ 
+               id++;
+@@ -919,6 +919,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+       }
+ 
+       if (sk != NULL) {
++              bh_lock_sock(sk);
+               skb_reset_transport_header(skb);
+ 
+               if (frametype == NR_CONNACK && skb->len == 22)
+@@ -928,6 +929,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+ 
+               ret = nr_process_rx_frame(sk, skb);
+               bh_unlock_sock(sk);
++              sock_put(sk);
+               return ret;
+       }
+ 
+@@ -959,10 +961,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+           (make = nr_make_new(sk)) == NULL) {
+               nr_transmit_refusal(skb, 0);
+               if (sk)
+-                      bh_unlock_sock(sk);
++                      sock_put(sk);
+               return 0;
+       }
+ 
++      bh_lock_sock(sk);
++
+       window = skb->data[20];
+ 
+       skb->sk             = make;
+@@ -1015,6 +1019,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+               sk->sk_data_ready(sk);
+ 
+       bh_unlock_sock(sk);
++      sock_put(sk);
+ 
+       nr_insert_socket(make);
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 88d5b2645bb0..91a323f99d47 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2663,8 +2663,10 @@ static int tpacket_snd(struct packet_sock *po, struct 
msghdr *msg)
+                                               sll_addr)))
+                       goto out;
+               proto   = saddr->sll_protocol;
+-              addr    = saddr->sll_addr;
++              addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
+               dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
++              if (addr && dev && saddr->sll_halen < dev->addr_len)
++                      goto out;
+       }
+ 
+       err = -ENXIO;
+@@ -2861,8 +2863,10 @@ static int packet_snd(struct socket *sock, struct 
msghdr *msg, size_t len)
+               if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct 
sockaddr_ll, sll_addr)))
+                       goto out;
+               proto   = saddr->sll_protocol;
+-              addr    = saddr->sll_addr;
++              addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
+               dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
++              if (addr && dev && saddr->sll_halen < dev->addr_len)
++                      goto out;
+       }
+ 
+       err = -ENXIO;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 853fecdf6374..8002a72aae1a 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block 
*this, unsigned long ev,
+               if (addr) {
+                       addr->a.v6.sin6_family = AF_INET6;
+                       addr->a.v6.sin6_port = 0;
++                      addr->a.v6.sin6_flowinfo = 0;
+                       addr->a.v6.sin6_addr = ifa->addr;
+                       addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
+                       addr->valid = 1;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 43ef7be69428..8c71f0929fbb 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -133,8 +133,14 @@ static int smc_release(struct socket *sock)
+               sk->sk_shutdown |= SHUTDOWN_MASK;
+       }
+       if (smc->clcsock) {
++              if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
++                      /* wake up clcsock accept */
++                      rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
++              }
++              mutex_lock(&smc->clcsock_release_lock);
+               sock_release(smc->clcsock);
+               smc->clcsock = NULL;
++              mutex_unlock(&smc->clcsock_release_lock);
+       }
+ 
+       /* detach socket */
+@@ -184,6 +190,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct 
socket *sock)
+       INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
+       sk->sk_prot->hash(sk);
+       sk_refcnt_debug_inc(sk);
++      mutex_init(&smc->clcsock_release_lock);
+ 
+       return sk;
+ }
+@@ -577,7 +584,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, 
struct smc_sock **new_smc)
+       struct sock *sk = &lsmc->sk;
+       struct socket *new_clcsock;
+       struct sock *new_sk;
+-      int rc;
++      int rc = -EINVAL;
+ 
+       release_sock(&lsmc->sk);
+       new_sk = smc_sock_alloc(sock_net(sk), NULL);
+@@ -590,7 +597,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, 
struct smc_sock **new_smc)
+       }
+       *new_smc = smc_sk(new_sk);
+ 
+-      rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
++      mutex_lock(&lsmc->clcsock_release_lock);
++      if (lsmc->clcsock)
++              rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
++      mutex_unlock(&lsmc->clcsock_release_lock);
+       lock_sock(&lsmc->sk);
+       if  (rc < 0) {
+               lsmc->sk.sk_err = -rc;
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 0bee9d16cf29..926a97cc511a 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -185,6 +185,10 @@ struct smc_sock {                         /* smc sock 
container */
+                                                * started, waiting for unsent
+                                                * data to be sent
+                                                */
++      struct mutex            clcsock_release_lock;
++                                              /* protects clcsock of a listen
++                                               * socket
++                                               * */
+ };
+ 
+ static inline struct smc_sock *smc_sk(const struct sock *sk)
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index ff8e06cd067e..c83df30e9655 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -585,7 +585,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
+               /* Don't enable netstamp, sunrpc doesn't
+                  need that much accuracy */
+       }
+-      svsk->sk_sk->sk_stamp = skb->tstamp;
++      sock_write_timestamp(svsk->sk_sk, skb->tstamp);
+       set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more 
data... */
+ 
+       len  = skb->len;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 4d2125d258fe..e5f9f43ff15b 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2261,11 +2261,15 @@ void tipc_sk_reinit(struct net *net)
+                       goto walk_stop;
+ 
+               while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
+-                      spin_lock_bh(&tsk->sk.sk_lock.slock);
++                      sock_hold(&tsk->sk);
++                      rhashtable_walk_stop(&iter);
++                      lock_sock(&tsk->sk);
+                       msg = &tsk->phdr;
+                       msg_set_prevnode(msg, tn->own_addr);
+                       msg_set_orignode(msg, tn->own_addr);
+-                      spin_unlock_bh(&tsk->sk.sk_lock.slock);
++                      release_sock(&tsk->sk);
++                      rhashtable_walk_start(&iter);
++                      sock_put(&tsk->sk);
+               }
+ walk_stop:
+               rhashtable_walk_stop(&iter);
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 3deabcab4882..e3cff9d6c092 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -243,10 +243,8 @@ static int tipc_udp_send_msg(struct net *net, struct 
sk_buff *skb,
+               }
+ 
+               err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
+-              if (err) {
+-                      kfree_skb(_skb);
++              if (err)
+                       goto out;
+-              }
+       }
+       err = 0;
+ out:
+@@ -677,6 +675,11 @@ static int tipc_udp_enable(struct net *net, struct 
tipc_bearer *b,
+       if (err)
+               goto err;
+ 
++      if (remote.proto != local.proto) {
++              err = -EINVAL;
++              goto err;
++      }
++
+       b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
+       b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
+       rcu_assign_pointer(b->media_ptr, ub);
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index d5be519b0271..bf7c51644446 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm 
*src,
+                                                false);
+ }
+ 
++static int
++vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
++                                    struct sockaddr_vm *dst,
++                                    enum vmci_transport_packet_type type,
++                                    u64 size,
++                                    u64 mode,
++                                    struct vmci_transport_waiting_info *wait,
++                                    u16 proto,
++                                    struct vmci_handle handle)
++{
++      struct vmci_transport_packet *pkt;
++      int err;
++
++      pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
++      if (!pkt)
++              return -ENOMEM;
++
++      err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
++                                              mode, wait, proto, handle,
++                                              true);
++      kfree(pkt);
++
++      return err;
++}
++
+ static int
+ vmci_transport_send_control_pkt(struct sock *sk,
+                               enum vmci_transport_packet_type type,
+@@ -273,9 +298,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
+                               u16 proto,
+                               struct vmci_handle handle)
+ {
+-      struct vmci_transport_packet *pkt;
+       struct vsock_sock *vsk;
+-      int err;
+ 
+       vsk = vsock_sk(sk);
+ 
+@@ -285,17 +308,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
+       if (!vsock_addr_bound(&vsk->remote_addr))
+               return -EINVAL;
+ 
+-      pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+-      if (!pkt)
+-              return -ENOMEM;
+-
+-      err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
+-                                              &vsk->remote_addr, type, size,
+-                                              mode, wait, proto, handle,
+-                                              true);
+-      kfree(pkt);
+-
+-      return err;
++      return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
++                                                   &vsk->remote_addr,
++                                                   type, size, mode,
++                                                   wait, proto, handle);
+ }
+ 
+ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
+@@ -313,12 +329,29 @@ static int vmci_transport_send_reset_bh(struct 
sockaddr_vm *dst,
+ static int vmci_transport_send_reset(struct sock *sk,
+                                    struct vmci_transport_packet *pkt)
+ {
++      struct sockaddr_vm *dst_ptr;
++      struct sockaddr_vm dst;
++      struct vsock_sock *vsk;
++
+       if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
+               return 0;
+-      return vmci_transport_send_control_pkt(sk,
+-                                      VMCI_TRANSPORT_PACKET_TYPE_RST,
+-                                      0, 0, NULL, VSOCK_PROTO_INVALID,
+-                                      VMCI_INVALID_HANDLE);
++
++      vsk = vsock_sk(sk);
++
++      if (!vsock_addr_bound(&vsk->local_addr))
++              return -EINVAL;
++
++      if (vsock_addr_bound(&vsk->remote_addr)) {
++              dst_ptr = &vsk->remote_addr;
++      } else {
++              vsock_addr_init(&dst, pkt->dg.src.context,
++                              pkt->src_port);
++              dst_ptr = &dst;
++      }
++      return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
++                                           VMCI_TRANSPORT_PACKET_TYPE_RST,
++                                           0, 0, NULL, VSOCK_PROTO_INVALID,
++                                           VMCI_INVALID_HANDLE);
+ }
+ 
+ static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index e8dc1a5afe66..2b5caa8dea2e 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -25,6 +25,7 @@
+ #include <linux/time.h>
+ #include <linux/mutex.h>
+ #include <linux/device.h>
++#include <linux/nospec.h>
+ #include <sound/core.h>
+ #include <sound/minors.h>
+ #include <sound/pcm.h>
+@@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
+                               return -EFAULT;
+                       if (stream < 0 || stream > 1)
+                               return -EINVAL;
++                      stream = array_index_nospec(stream, 2);
+                       if (get_user(subdevice, &info->subdevice))
+                               return -EFAULT;
+                       mutex_lock(&register_mutex);
+diff --git a/sound/firewire/amdtp-stream-trace.h 
b/sound/firewire/amdtp-stream-trace.h
+index ea0d486652c8..ed704cd37b5f 100644
+--- a/sound/firewire/amdtp-stream-trace.h
++++ b/sound/firewire/amdtp-stream-trace.h
+@@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
+               __entry->index = index;
+       ),
+       TP_printk(
+-              "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
++              "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
+               __entry->second,
+               __entry->cycle,
+               __entry->src,
+@@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
+               __entry->dest = fw_parent_device(s->unit)->node_id;
+               __entry->payload_quadlets = payload_length / 4;
+               __entry->data_blocks = data_blocks,
+-              __entry->data_blocks = s->data_block_counter,
++              __entry->data_block_counter = s->data_block_counter,
+               __entry->packet_index = s->packet_index;
+               __entry->irq = !!in_interrupt();
+               __entry->index = index;
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index d7d47dc8b5f1..bca1ce8f1c54 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -629,15 +629,17 @@ end:
+ }
+ 
+ static int handle_in_packet_without_header(struct amdtp_stream *s,
+-                      unsigned int payload_quadlets, unsigned int cycle,
++                      unsigned int payload_length, unsigned int cycle,
+                       unsigned int index)
+ {
+       __be32 *buffer;
++      unsigned int payload_quadlets;
+       unsigned int data_blocks;
+       struct snd_pcm_substream *pcm;
+       unsigned int pcm_frames;
+ 
+       buffer = s->buffer.packets[s->packet_index].buffer;
++      payload_quadlets = payload_length / 4;
+       data_blocks = payload_quadlets / s->data_block_quadlets;
+ 
+       trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
+diff --git a/sound/firewire/fireface/ff-protocol-ff400.c 
b/sound/firewire/fireface/ff-protocol-ff400.c
+index b47954a6b8ab..bb0c1abf4718 100644
+--- a/sound/firewire/fireface/ff-protocol-ff400.c
++++ b/sound/firewire/fireface/ff-protocol-ff400.c
+@@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, 
bool enable)
+       if (reg == NULL)
+               return -ENOMEM;
+ 
+-      if (enable) {
++      if (!enable) {
+               /*
+                * Each quadlet is corresponding to data channels in a data
+                * blocks in reverse order. Precisely, quadlets for available
+diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
+index d68bb40d3676..5c00e3536767 100644
+--- a/sound/pci/emu10k1/emufx.c
++++ b/sound/pci/emu10k1/emufx.c
+@@ -36,6 +36,7 @@
+ #include <linux/init.h>
+ #include <linux/mutex.h>
+ #include <linux/moduleparam.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/tlv.h>
+@@ -1033,6 +1034,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
+ 
+       if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
+               return -EINVAL;
++      ipcm->substream = array_index_nospec(ipcm->substream,
++                                           EMU10K1_FX8010_PCM_COUNT);
+       if (ipcm->channels > 32)
+               return -EINVAL;
+       pcm = &emu->fx8010.pcm[ipcm->substream];
+@@ -1079,6 +1082,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
+ 
+       if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
+               return -EINVAL;
++      ipcm->substream = array_index_nospec(ipcm->substream,
++                                           EMU10K1_FX8010_PCM_COUNT);
+       pcm = &emu->fx8010.pcm[ipcm->substream];
+       mutex_lock(&emu->fx8010.lock);
+       spin_lock_irq(&emu->reg_lock);
+diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
+index 0621920f7617..e85fb04ec7be 100644
+--- a/sound/pci/hda/hda_tegra.c
++++ b/sound/pci/hda/hda_tegra.c
+@@ -249,10 +249,12 @@ static int hda_tegra_suspend(struct device *dev)
+       struct snd_card *card = dev_get_drvdata(dev);
+       struct azx *chip = card->private_data;
+       struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
++      struct hdac_bus *bus = azx_bus(chip);
+ 
+       snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ 
+       azx_stop_chip(chip);
++      synchronize_irq(bus->irq);
+       azx_enter_link_reset(chip);
+       hda_tegra_disable_clocks(hda);
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 0cc0ced1f2ed..0a225dc85044 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -961,6 +961,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
++      SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index 9f0f73875f01..e41bb4100306 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -30,6 +30,7 @@
+ #include <linux/math64.h>
+ #include <linux/vmalloc.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct 
snd_pcm_substream *substream,
+                                   struct snd_pcm_channel_info *info)
+ {
+       struct hdsp *hdsp = snd_pcm_substream_chip(substream);
+-      int mapped_channel;
++      unsigned int channel = info->channel;
+ 
+-      if (snd_BUG_ON(info->channel >= hdsp->max_channels))
++      if (snd_BUG_ON(channel >= hdsp->max_channels))
+               return -EINVAL;
++      channel = array_index_nospec(channel, hdsp->max_channels);
+ 
+-      if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
++      if (hdsp->channel_map[channel] < 0)
+               return -EINVAL;
+ 
+-      info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
++      info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
+       info->first = 0;
+       info->step = 32;
+       return 0;
+diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
+index e557946718a9..d9fcae071b47 100644
+--- a/sound/synth/emux/emux_hwdep.c
++++ b/sound/synth/emux/emux_hwdep.c
+@@ -22,9 +22,9 @@
+ #include <sound/core.h>
+ #include <sound/hwdep.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ #include "emux_voice.h"
+ 
+-
+ #define TMP_CLIENT_ID 0x1001
+ 
+ /*
+@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user 
*arg)
+               return -EFAULT;
+       if (info.mode < 0 || info.mode >= EMUX_MD_END)
+               return -EINVAL;
++      info.mode = array_index_nospec(info.mode, EMUX_MD_END);
+ 
+       if (info.port < 0) {
+               for (i = 0; i < emu->num_ports; i++)
+                       emu->portptrs[i]->ctrls[info.mode] = info.value;
+       } else {
+-              if (info.port < emu->num_ports)
++              if (info.port < emu->num_ports) {
++                      info.port = array_index_nospec(info.port, 
emu->num_ports);
+                       emu->portptrs[info.port]->ctrls[info.mode] = info.value;
++              }
+       }
+       return 0;
+ }
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index dceef4725d33..2deffc234932 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -143,7 +143,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias 
*alias, char *dir, char *
+       int fd, ret = -1;
+       char path[PATH_MAX];
+ 
+-      snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
++      scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+ 
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+@@ -173,7 +173,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias 
*alias, char *dir, char *n
+       ssize_t sret;
+       int fd;
+ 
+-      snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
++      scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+ 
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+@@ -203,7 +203,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char 
*dir, char *name)
+       char path[PATH_MAX];
+       int fd;
+ 
+-      snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
++      scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+ 
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+@@ -221,7 +221,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias 
*alias,
+       char path[PATH_MAX];
+       int fd;
+ 
+-      snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
++      scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+ 
+       fd = open(path, O_RDONLY);
+       if (fd == -1)
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index b4c5baf4af45..36194c666814 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -241,14 +241,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu 
*vcpu, struct vgic_irq *irq,
+  */
+ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+ {
+-      if (intid > VGIC_NR_PRIVATE_IRQS)
++      if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++          intid > VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_halt_guest(vcpu->kvm);
+ }
+ 
+ /* See vgic_change_active_prepare */
+ static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+ {
+-      if (intid > VGIC_NR_PRIVATE_IRQS)
++      if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
++          intid > VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_resume_guest(vcpu->kvm);
+ }
+ 

Reply via email to