commit:     9876d1f9f2910ed10d2b590547570755b6847dbc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 11 17:41:44 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 11 17:41:44 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9876d1f9

Linux patch 4.9.42

 0000_README             |    4 +
 1041_linux-4.9.42.patch | 3113 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3117 insertions(+)

diff --git a/0000_README b/0000_README
index eacc709..c5dce51 100644
--- a/0000_README
+++ b/0000_README
@@ -207,6 +207,10 @@ Patch:  1040_linux-4.9.41.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.41
 
+Patch:  1041_linux-4.9.42.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.42
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1041_linux-4.9.42.patch b/1041_linux-4.9.42.patch
new file mode 100644
index 0000000..7b92a09
--- /dev/null
+++ b/1041_linux-4.9.42.patch
@@ -0,0 +1,3113 @@
+diff --git a/Makefile b/Makefile
+index 82eb3d1ee801..34d4d9f8a4b2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index 7037201c5e3a..f3baa896ce84 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -820,6 +820,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
+       sun8i-a83t-allwinner-h8homlet-v2.dtb \
+       sun8i-a83t-cubietruck-plus.dtb \
+       sun8i-h3-bananapi-m2-plus.dtb \
++      sun8i-h3-nanopi-m1.dtb  \
+       sun8i-h3-nanopi-neo.dtb \
+       sun8i-h3-orangepi-2.dtb \
+       sun8i-h3-orangepi-lite.dtb \
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts 
b/arch/arm/boot/dts/armada-388-gp.dts
+index 895fa6cfa15a..563901e0ec07 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -75,7 +75,7 @@
+                                       pinctrl-names = "default";
+                                       pinctrl-0 = <&pca0_pins>;
+                                       interrupt-parent = <&gpio0>;
+-                                      interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++                                      interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+@@ -87,7 +87,7 @@
+                                       compatible = "nxp,pca9555";
+                                       pinctrl-names = "default";
+                                       interrupt-parent = <&gpio0>;
+-                                      interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++                                      interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts 
b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+index 5ea4915f6d75..10d307408f23 100644
+--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
++++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+@@ -56,7 +56,7 @@
+ };
+ 
+ &pio {
+-      mmc2_pins_nrst: mmc2@0 {
++      mmc2_pins_nrst: mmc2-rst-pin {
+               allwinner,pins = "PC16";
+               allwinner,function = "gpio_out";
+               allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts 
b/arch/arm/boot/dts/tango4-vantage-1172.dts
+index 4cab64cb581e..e3a51e3538b7 100644
+--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
++++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
+@@ -21,7 +21,7 @@
+ };
+ 
+ &eth0 {
+-      phy-connection-type = "rgmii";
++      phy-connection-type = "rgmii-id";
+       phy-handle = <&eth0_phy>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
+index bfe2a2f5a644..22b73112b75f 100644
+--- a/arch/arm/include/asm/ftrace.h
++++ b/arch/arm/include/asm/ftrace.h
+@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
+ 
+ #define ftrace_return_address(n) return_address(n)
+ 
++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
++
++static inline bool arch_syscall_match_sym_name(const char *sym,
++                                             const char *name)
++{
++      if (!strcmp(sym, "sys_mmap2"))
++              sym = "sys_mmap_pgoff";
++      else if (!strcmp(sym, "sys_statfs64_wrapper"))
++              sym = "sys_statfs64";
++      else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
++              sym = "sys_fstatfs64";
++      else if (!strcmp(sym, "sys_arm_fadvise64_64"))
++              sym = "sys_fadvise64_64";
++
++      /* Ignore case since sym may start with "SyS" instead of "sys" */
++      return !strcasecmp(sym, name);
++}
++
+ #endif /* ifndef __ASSEMBLY__ */
+ 
+ #endif /* _ASM_ARM_FTRACE */
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index c721ea2fdbd8..df757c9675e6 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -604,13 +604,12 @@ void flush_cache_range(struct vm_area_struct *vma,
+       if (parisc_requires_coherency())
+               flush_tlb_range(vma, start, end);
+ 
+-      if ((end - start) >= parisc_cache_flush_threshold) {
++      if ((end - start) >= parisc_cache_flush_threshold
++          || vma->vm_mm->context != mfsp(3)) {
+               flush_cache_all();
+               return;
+       }
+ 
+-      BUG_ON(vma->vm_mm->context != mfsp(3));
+-
+       flush_user_dcache_range_asm(start, end);
+       if (vma->vm_flags & VM_EXEC)
+               flush_user_icache_range_asm(start, end);
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 3c05c311e35e..028a22bfa90c 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -146,6 +146,19 @@ notrace unsigned int __check_irq_replay(void)
+ 
+       /* Clear bit 0 which we wouldn't clear otherwise */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
++      if (happened & PACA_IRQ_HARD_DIS) {
++              /*
++               * We may have missed a decrementer interrupt if hard disabled.
++               * Check the decrementer register in case we had a rollover
++               * while hard disabled.
++               */
++              if (!(happened & PACA_IRQ_DEC)) {
++                      if (decrementer_check_overflow()) {
++                              local_paca->irq_happened |= PACA_IRQ_DEC;
++                              happened |= PACA_IRQ_DEC;
++                      }
++              }
++      }
+ 
+       /*
+        * Force the delivery of pending soft-disabled interrupts on PS3.
+@@ -171,7 +184,7 @@ notrace unsigned int __check_irq_replay(void)
+        * in case we also had a rollover while hard disabled
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_DEC;
+-      if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
++      if (happened & PACA_IRQ_DEC)
+               return 0x900;
+ 
+       /* Finally check if an external interrupt happened */
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 5c8f12fe9721..dcbb9144c16d 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct 
*tsk)
+        * If task is not current, it will have been flushed already to
+        * it's thread_struct during __switch_to().
+        *
+-       * A reclaim flushes ALL the state.
++       * A reclaim flushes ALL the state or if not in TM save TM SPRs
++       * in the appropriate thread structures from live.
+        */
+ 
+-      if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+-              tm_reclaim_current(TM_CAUSE_SIGNAL);
++      if (tsk != current)
++              return;
+ 
++      if (MSR_TM_SUSPENDED(mfmsr())) {
++              tm_reclaim_current(TM_CAUSE_SIGNAL);
++      } else {
++              tm_enable();
++              tm_save_sprs(&(tsk->thread));
++      }
+ }
+ #else
+ static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+diff --git a/arch/sparc/include/asm/trap_block.h 
b/arch/sparc/include/asm/trap_block.h
+index ec9c04de3664..ff05992dae7a 100644
+--- a/arch/sparc/include/asm/trap_block.h
++++ b/arch/sparc/include/asm/trap_block.h
+@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
+ void init_cur_cpu_trap(struct thread_info *);
+ void setup_tba(void);
+ extern int ncpus_probed;
++extern u64 cpu_mondo_counter[NR_CPUS];
+ 
+ unsigned long real_hard_smp_processor_id(void);
+ 
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index d5807d24b98f..2deb89ef1d5f 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -621,22 +621,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu 
*tb, int cnt)
+       }
+ }
+ 
+-/* Multi-cpu list version.  */
++#define       CPU_MONDO_COUNTER(cpuid)        (cpu_mondo_counter[cpuid])
++#define       MONDO_USEC_WAIT_MIN             2
++#define       MONDO_USEC_WAIT_MAX             100
++#define       MONDO_RETRY_LIMIT               500000
++
++/* Multi-cpu list version.
++ *
++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
++ * Sometimes not all cpus receive the mondo, requiring us to re-send
++ * the mondo until all cpus have received, or cpus are truly stuck
++ * unable to receive mondo, and we timeout.
++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
++ * perform guest service, such as PCIe error handling. Consider the
++ * service time, 1 second overall wait is reasonable for 1 cpu.
++ * Here two in-between mondo check wait time are defined: 2 usec for
++ * single cpu quick turn around and up to 100usec for large cpu count.
++ * Deliver mondo to large number of cpus could take longer, we adjusts
++ * the retry count as long as target cpus are making forward progress.
++ */
+ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ {
+-      int retries, this_cpu, prev_sent, i, saw_cpu_error;
++      int this_cpu, tot_cpus, prev_sent, i, rem;
++      int usec_wait, retries, tot_retries;
++      u16 first_cpu = 0xffff;
++      unsigned long xc_rcvd = 0;
+       unsigned long status;
++      int ecpuerror_id = 0;
++      int enocpu_id = 0;
+       u16 *cpu_list;
++      u16 cpu;
+ 
+       this_cpu = smp_processor_id();
+-
+       cpu_list = __va(tb->cpu_list_pa);
+-
+-      saw_cpu_error = 0;
+-      retries = 0;
++      usec_wait = cnt * MONDO_USEC_WAIT_MIN;
++      if (usec_wait > MONDO_USEC_WAIT_MAX)
++              usec_wait = MONDO_USEC_WAIT_MAX;
++      retries = tot_retries = 0;
++      tot_cpus = cnt;
+       prev_sent = 0;
++
+       do {
+-              int forward_progress, n_sent;
++              int n_sent, mondo_delivered, target_cpu_busy;
+ 
+               status = sun4v_cpu_mondo_send(cnt,
+                                             tb->cpu_list_pa,
+@@ -644,94 +670,113 @@ static void hypervisor_xcall_deliver(struct 
trap_per_cpu *tb, int cnt)
+ 
+               /* HV_EOK means all cpus received the xcall, we're done.  */
+               if (likely(status == HV_EOK))
+-                      break;
++                      goto xcall_done;
++
++              /* If not these non-fatal errors, panic */
++              if (unlikely((status != HV_EWOULDBLOCK) &&
++                      (status != HV_ECPUERROR) &&
++                      (status != HV_ENOCPU)))
++                      goto fatal_errors;
+ 
+               /* First, see if we made any forward progress.
++               *
++               * Go through the cpu_list, count the target cpus that have
++               * received our mondo (n_sent), and those that did not (rem).
++               * Re-pack cpu_list with the cpus remain to be retried in the
++               * front - this simplifies tracking the truly stalled cpus.
+                *
+                * The hypervisor indicates successful sends by setting
+                * cpu list entries to the value 0xffff.
++               *
++               * EWOULDBLOCK means some target cpus did not receive the
++               * mondo and retry usually helps.
++               *
++               * ECPUERROR means at least one target cpu is in error state,
++               * it's usually safe to skip the faulty cpu and retry.
++               *
++               * ENOCPU means one of the target cpu doesn't belong to the
++               * domain, perhaps offlined which is unexpected, but not
++               * fatal and it's okay to skip the offlined cpu.
+                */
++              rem = 0;
+               n_sent = 0;
+               for (i = 0; i < cnt; i++) {
+-                      if (likely(cpu_list[i] == 0xffff))
++                      cpu = cpu_list[i];
++                      if (likely(cpu == 0xffff)) {
+                               n_sent++;
++                      } else if ((status == HV_ECPUERROR) &&
++                              (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
++                              ecpuerror_id = cpu + 1;
++                      } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
++                              enocpu_id = cpu + 1;
++                      } else {
++                              cpu_list[rem++] = cpu;
++                      }
+               }
+ 
+-              forward_progress = 0;
+-              if (n_sent > prev_sent)
+-                      forward_progress = 1;
++              /* No cpu remained, we're done. */
++              if (rem == 0)
++                      break;
+ 
+-              prev_sent = n_sent;
++              /* Otherwise, update the cpu count for retry. */
++              cnt = rem;
+ 
+-              /* If we get a HV_ECPUERROR, then one or more of the cpus
+-               * in the list are in error state.  Use the cpu_state()
+-               * hypervisor call to find out which cpus are in error state.
++              /* Record the overall number of mondos received by the
++               * first of the remaining cpus.
+                */
+-              if (unlikely(status == HV_ECPUERROR)) {
+-                      for (i = 0; i < cnt; i++) {
+-                              long err;
+-                              u16 cpu;
++              if (first_cpu != cpu_list[0]) {
++                      first_cpu = cpu_list[0];
++                      xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
++              }
+ 
+-                              cpu = cpu_list[i];
+-                              if (cpu == 0xffff)
+-                                      continue;
++              /* Was any mondo delivered successfully? */
++              mondo_delivered = (n_sent > prev_sent);
++              prev_sent = n_sent;
+ 
+-                              err = sun4v_cpu_state(cpu);
+-                              if (err == HV_CPU_STATE_ERROR) {
+-                                      saw_cpu_error = (cpu + 1);
+-                                      cpu_list[i] = 0xffff;
+-                              }
+-                      }
+-              } else if (unlikely(status != HV_EWOULDBLOCK))
+-                      goto fatal_mondo_error;
++              /* or, was any target cpu busy processing other mondos? */
++              target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
++              xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ 
+-              /* Don't bother rewriting the CPU list, just leave the
+-               * 0xffff and non-0xffff entries in there and the
+-               * hypervisor will do the right thing.
+-               *
+-               * Only advance timeout state if we didn't make any
+-               * forward progress.
++              /* Retry count is for no progress. If we're making progress,
++               * reset the retry count.
+                */
+-              if (unlikely(!forward_progress)) {
+-                      if (unlikely(++retries > 10000))
+-                              goto fatal_mondo_timeout;
+-
+-                      /* Delay a little bit to let other cpus catch up
+-                       * on their cpu mondo queue work.
+-                       */
+-                      udelay(2 * cnt);
++              if (likely(mondo_delivered || target_cpu_busy)) {
++                      tot_retries += retries;
++                      retries = 0;
++              } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
++                      goto fatal_mondo_timeout;
+               }
+-      } while (1);
+ 
+-      if (unlikely(saw_cpu_error))
+-              goto fatal_mondo_cpu_error;
++              /* Delay a little bit to let other cpus catch up on
++               * their cpu mondo queue work.
++               */
++              if (!mondo_delivered)
++                      udelay(usec_wait);
+ 
+-      return;
++              retries++;
++      } while (1);
+ 
+-fatal_mondo_cpu_error:
+-      printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+-             "(including %d) were in error state\n",
+-             this_cpu, saw_cpu_error - 1);
++xcall_done:
++      if (unlikely(ecpuerror_id > 0)) {
++              pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in 
error state\n",
++                     this_cpu, ecpuerror_id - 1);
++      } else if (unlikely(enocpu_id > 0)) {
++              pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does 
not belong to the domain\n",
++                     this_cpu, enocpu_id - 1);
++      }
+       return;
+ 
++fatal_errors:
++      /* fatal errors include bad alignment, etc */
++      pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) 
mondo_block_pa(%lx)\n",
++             this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
++      panic("Unexpected SUN4V mondo error %lu\n", status);
++
+ fatal_mondo_timeout:
+-      printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+-             " progress after %d retries.\n",
+-             this_cpu, retries);
+-      goto dump_cpu_list_and_out;
+-
+-fatal_mondo_error:
+-      printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+-             this_cpu, status);
+-      printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+-             "mondo_block_pa(%lx)\n",
+-             this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+-
+-dump_cpu_list_and_out:
+-      printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+-      for (i = 0; i < cnt; i++)
+-              printk("%u ", cpu_list[i]);
+-      printk("]\n");
++      /* some cpus being non-responsive to the cpu mondo */
++      pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress 
after %d retries. Total target cpus(%d).\n",
++             this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
++      panic("SUN4V mondo timeout panic\n");
+ }
+ 
+ static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
+diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
+index 559bc5e9c199..34631995859a 100644
+--- a/arch/sparc/kernel/sun4v_ivec.S
++++ b/arch/sparc/kernel/sun4v_ivec.S
+@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
+       ldxa    [%g0] ASI_SCRATCHPAD, %g4
+       sub     %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ 
++      /* Get smp_processor_id() into %g3 */
++      sethi   %hi(trap_block), %g5
++      or      %g5, %lo(trap_block), %g5
++      sub     %g4, %g5, %g3
++      srlx    %g3, TRAP_BLOCK_SZ_SHIFT, %g3
++
++      /* Increment cpu_mondo_counter[smp_processor_id()] */
++      sethi   %hi(cpu_mondo_counter), %g5
++      or      %g5, %lo(cpu_mondo_counter), %g5
++      sllx    %g3, 3, %g3
++      add     %g5, %g3, %g5
++      ldx     [%g5], %g3
++      add     %g3, 1, %g3
++      stx     %g3, [%g5]
++
+       /* Get CPU mondo queue base phys address into %g7.  */
+       ldx     [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+ 
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index d44fb806bbd7..32dafb920908 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2732,6 +2732,7 @@ void do_getpsr(struct pt_regs *regs)
+       }
+ }
+ 
++u64 cpu_mondo_counter[NR_CPUS] = {0};
+ struct trap_per_cpu trap_block[NR_CPUS];
+ EXPORT_SYMBOL(trap_block);
+ 
+diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
+index 54f98706b03b..5a8cb37f0a3b 100644
+--- a/arch/sparc/lib/U3memcpy.S
++++ b/arch/sparc/lib/U3memcpy.S
+@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ ENTRY(U3_retl_o2_and_7_plus_GS)
+       and     %o2, 7, %o2
+       retl
+-       add    %o2, GLOBAL_SPARE, %o2
++       add    %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS)
+ ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+       add     GLOBAL_SPARE, 8, GLOBAL_SPARE
+       and     %o2, 7, %o2
+       retl
+-       add    %o2, GLOBAL_SPARE, %o2
++       add    %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+ #endif
+ 
+diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
+index cc3bd583dce1..9e240fcba784 100644
+--- a/arch/x86/boot/string.c
++++ b/arch/x86/boot/string.c
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/types.h>
+ #include "ctype.h"
++#include "string.h"
+ 
+ int memcmp(const void *s1, const void *s2, size_t len)
+ {
+diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
+index 725e820602b1..113588ddb43f 100644
+--- a/arch/x86/boot/string.h
++++ b/arch/x86/boot/string.h
+@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
+ #define memset(d,c,l) __builtin_memset(d,c,l)
+ #define memcmp        __builtin_memcmp
+ 
++extern int strcmp(const char *str1, const char *str2);
++extern int strncmp(const char *cs, const char *ct, size_t count);
++extern size_t strlen(const char *s);
++extern char *strstr(const char *s1, const char *s2);
++extern size_t strnlen(const char *s, size_t maxlen);
++extern unsigned int atou(const char *s);
++extern unsigned long long simple_strtoull(const char *cp, char **endp,
++                                        unsigned int base);
++
+ #endif /* BOOT_STRING_H */
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 9cf697ceedbf..55ffd9dc2258 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -152,6 +152,8 @@ void kvm_async_pf_task_wait(u32 token)
+               if (hlist_unhashed(&n.link))
+                       break;
+ 
++              rcu_irq_exit();
++
+               if (!n.halted) {
+                       local_irq_enable();
+                       schedule();
+@@ -160,11 +162,11 @@ void kvm_async_pf_task_wait(u32 token)
+                       /*
+                        * We cannot reschedule. So halt.
+                        */
+-                      rcu_irq_exit();
+                       native_safe_halt();
+                       local_irq_disable();
+-                      rcu_irq_enter();
+               }
++
++              rcu_irq_enter();
+       }
+       if (!n.halted)
+               finish_swait(&n.wq, &wait);
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 8e575fbdf31d..e3e10e8f6f6a 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2971,10 +2971,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd 
*qc)
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+       if (!sata_pmp_attached(ap)) {
+-              if (likely(devno < ata_link_max_devices(&ap->link)))
++              if (likely(devno >= 0 &&
++                         devno < ata_link_max_devices(&ap->link)))
+                       return &ap->link.device[devno];
+       } else {
+-              if (likely(devno < ap->nr_pmp_links))
++              if (likely(devno >= 0 &&
++                         devno < ap->nr_pmp_links))
+                       return &ap->pmp_link[devno].device[0];
+       }
+ 
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 43a36d68c3fd..06f66687fe0b 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -182,11 +182,12 @@ static int pset_prop_read_string(struct property_set 
*pset,
+       return 0;
+ }
+ 
+-static inline struct fwnode_handle *dev_fwnode(struct device *dev)
++struct fwnode_handle *dev_fwnode(struct device *dev)
+ {
+       return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+               &dev->of_node->fwnode : dev->fwnode;
+ }
++EXPORT_SYMBOL_GPL(dev_fwnode);
+ 
+ /**
+  * device_property_present - check if a property of a device is present
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index c9441f9d4585..98b767d3171e 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -929,6 +929,7 @@ static int __init nbd_init(void)
+               return -ENOMEM;
+ 
+       for (i = 0; i < nbds_max; i++) {
++              struct request_queue *q;
+               struct gendisk *disk = alloc_disk(1 << part_shift);
+               if (!disk)
+                       goto out;
+@@ -954,12 +955,13 @@ static int __init nbd_init(void)
+                * every gendisk to have its very own request_queue struct.
+                * These structs are big so we dynamically allocate them.
+                */
+-              disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
+-              if (!disk->queue) {
++              q = blk_mq_init_queue(&nbd_dev[i].tag_set);
++              if (IS_ERR(q)) {
+                       blk_mq_free_tag_set(&nbd_dev[i].tag_set);
+                       put_disk(disk);
+                       goto out;
+               }
++              disk->queue = q;
+ 
+               /*
+                * Tell the block layer that we are not a rotational device
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 3c3b8f601469..10332c24f961 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -630,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
+       if (err)
+               goto out_put_disk;
+ 
+-      q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
++      q = blk_mq_init_queue(&vblk->tag_set);
+       if (IS_ERR(q)) {
+               err = -ENOMEM;
+               goto out_free_tags;
+       }
++      vblk->disk->queue = q;
+ 
+       q->queuedata = vblk;
+ 
+diff --git a/drivers/clk/samsung/clk-exynos5420.c 
b/drivers/clk/samsung/clk-exynos5420.c
+index 8c8b495cbf0d..cdc092a1d9ef 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -586,7 +586,7 @@ static const struct samsung_gate_clock 
exynos5800_gate_clks[] __initconst = {
+       GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
+                               GATE_BUS_TOP, 24, 0, 0),
+       GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
+-                              GATE_BUS_TOP, 27, 0, 0),
++                              GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
+ };
+ 
+ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
+@@ -956,20 +956,20 @@ static const struct samsung_gate_clock 
exynos5x_gate_clks[] __initconst = {
+       GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
+ 
+       GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
+-                      GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
++                      GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
+       GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
+                       GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
+ 
+       GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
+                       GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
+       GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
+-                      GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
++                      GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
+       GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
+                       GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
+       GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
+                       GATE_BUS_TOP, 5, 0, 0),
+       GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
+-                      GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
++                      GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
+       GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
+                       GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
+       GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
+@@ -983,20 +983,20 @@ static const struct samsung_gate_clock 
exynos5x_gate_clks[] __initconst = {
+       GATE(0, "aclk166", "mout_user_aclk166",
+                       GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
+-                      GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
++                      GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
+       GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
+                       GATE_BUS_TOP, 16, 0, 0),
+       GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
+                       GATE_BUS_TOP, 17, 0, 0),
+       GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
+-                      GATE_BUS_TOP, 18, 0, 0),
++                      GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
+       GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
+                       GATE_BUS_TOP, 28, 0, 0),
+       GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
+                       GATE_BUS_TOP, 29, 0, 0),
+ 
+       GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
+-                      SRC_MASK_TOP2, 24, 0, 0),
++                      SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
+ 
+       GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
+                       SRC_MASK_TOP7, 20, 0, 0),
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index f2bb5122d2c2..063d176baa24 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -703,24 +703,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
+ {
+       struct lineevent_state *le = p;
+       struct gpioevent_data ge;
+-      int ret;
++      int ret, level;
+ 
+       ge.timestamp = ktime_get_real_ns();
++      level = gpiod_get_value_cansleep(le->desc);
+ 
+       if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
+           && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+-              int level = gpiod_get_value_cansleep(le->desc);
+-
+               if (level)
+                       /* Emit low-to-high event */
+                       ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+               else
+                       /* Emit high-to-low event */
+                       ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+-      } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
++      } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
+               /* Emit low-to-high event */
+               ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+-      } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
++      } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
+               /* Emit high-to-low event */
+               ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+       } else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index dc9511c5ecb8..327bdf13e8bc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1301,6 +1301,7 @@ static void si_init_golden_registers(struct 
amdgpu_device *adev)
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const 
u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
++              break;
+       case CHIP_VERDE:
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_registers,
+@@ -1325,6 +1326,7 @@ static void si_init_golden_registers(struct 
amdgpu_device *adev)
+               amdgpu_program_register_sequence(adev,
+                                                oland_mgcg_cgcg_init,
+                                                (const 
u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
++              break;
+       case CHIP_HAINAN:
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers,
+diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c 
b/drivers/gpu/drm/virtio/virtgpu_fb.c
+index 2242a80866a9..dc2976c2bed3 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
++++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
+@@ -337,7 +337,7 @@ static int virtio_gpufb_create(struct drm_fb_helper 
*helper,
+       info->fbops = &virtio_gpufb_ops;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ 
+-      info->screen_base = obj->vmap;
++      info->screen_buffer = obj->vmap;
+       info->screen_size = obj->gem_base.size;
+       drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(info, &vfbdev->helper,
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c 
b/drivers/infiniband/hw/cxgb4/cm.c
+index f1510cc76d2d..9398143d7c5e 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct 
sk_buff *skb)
+       skb_trim(skb, dlen);
+       mutex_lock(&ep->com.mutex);
+ 
+-      /* update RX credits */
+-      update_rx_credits(ep, dlen);
+-
+       switch (ep->com.state) {
+       case MPA_REQ_SENT:
++              update_rx_credits(ep, dlen);
+               ep->rcv_seq += dlen;
+               disconnect = process_mpa_reply(ep, skb);
+               break;
+       case MPA_REQ_WAIT:
++              update_rx_credits(ep, dlen);
+               ep->rcv_seq += dlen;
+               disconnect = process_mpa_request(ep, skb);
+               break;
+       case FPDU_MODE: {
+               struct c4iw_qp_attributes attrs;
++
++              update_rx_credits(ep, dlen);
+               BUG_ON(!ep->com.qp);
+               if (status)
+                       pr_err("%s Unexpected streaming data." \
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 41800b6d492e..c380b7e8f1c6 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -4294,6 +4294,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data 
*data, void *vcpu_info)
+               /* Setting */
+               irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
+               irte->hi.fields.vector = vcpu_pi_info->vector;
++              irte->lo.fields_vapic.ga_log_intr = 1;
+               irte->lo.fields_vapic.guest_mode = 1;
+               irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
+ 
+diff --git a/drivers/media/pci/saa7164/saa7164-bus.c 
b/drivers/media/pci/saa7164/saa7164-bus.c
+index a18fe5d47238..b4857cd7069e 100644
+--- a/drivers/media/pci/saa7164/saa7164-bus.c
++++ b/drivers/media/pci/saa7164/saa7164-bus.c
+@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+       msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
+       msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
+       msg_tmp.controlselector = le16_to_cpu((__force 
__le16)msg_tmp.controlselector);
++      memcpy(msg, &msg_tmp, sizeof(*msg));
+ 
+       /* No need to update the read positions, because this was a peek */
+       /* If the caller specifically want to peek, return */
+       if (peekonly) {
+-              memcpy(msg, &msg_tmp, sizeof(*msg));
+               goto peekout;
+       }
+ 
+@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+               space_rem = bus->m_dwSizeGetRing - curr_grp;
+ 
+               if (space_rem < sizeof(*msg)) {
+-                      /* msg wraps around the ring */
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
space_rem);
+-                      memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
+-                              sizeof(*msg) - space_rem);
+                       if (buf)
+                               memcpy_fromio(buf, bus->m_pdwGetRing + 
sizeof(*msg) -
+                                       space_rem, buf_size);
+ 
+               } else if (space_rem == sizeof(*msg)) {
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
sizeof(*msg));
+                       if (buf)
+                               memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
+               } else {
+                       /* Additional data wraps around the ring */
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
sizeof(*msg));
+                       if (buf) {
+                               memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp 
+
+                                       sizeof(*msg), space_rem - sizeof(*msg));
+@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+ 
+       } else {
+               /* No wrapping */
+-              memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+               if (buf)
+                       memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + 
sizeof(*msg),
+                               buf_size);
+       }
+-      /* Convert from little endian to CPU */
+-      msg->size = le16_to_cpu((__force __le16)msg->size);
+-      msg->command = le32_to_cpu((__force __le32)msg->command);
+-      msg->controlselector = le16_to_cpu((__force 
__le16)msg->controlselector);
+ 
+       /* Update the read positions, adjusting the ring */
+       saa7164_writel(bus->m_dwGetReadPos, new_grp);
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c 
b/drivers/media/platform/davinci/vpfe_capture.c
+index 6efb2f1631c4..bdb7a0a00932 100644
+--- a/drivers/media/platform/davinci/vpfe_capture.c
++++ b/drivers/media/platform/davinci/vpfe_capture.c
+@@ -1725,27 +1725,9 @@ static long vpfe_param_handler(struct file *file, void 
*priv,
+ 
+       switch (cmd) {
+       case VPFE_CMD_S_CCDC_RAW_PARAMS:
++              ret = -EINVAL;
+               v4l2_warn(&vpfe_dev->v4l2_dev,
+-                        "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+-              if (ccdc_dev->hw_ops.set_params) {
+-                      ret = ccdc_dev->hw_ops.set_params(param);
+-                      if (ret) {
+-                              v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                                      "Error setting parameters in CCDC\n");
+-                              goto unlock_out;
+-                      }
+-                      ret = vpfe_get_ccdc_image_format(vpfe_dev,
+-                                                       &vpfe_dev->fmt);
+-                      if (ret < 0) {
+-                              v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                                      "Invalid image format at CCDC\n");
+-                              goto unlock_out;
+-                      }
+-              } else {
+-                      ret = -EINVAL;
+-                      v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                              "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+-              }
++                      "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+               break;
+       default:
+               ret = -ENOTTY;
+diff --git a/drivers/media/rc/ir-lirc-codec.c 
b/drivers/media/rc/ir-lirc-codec.c
+index c3277308a70b..b49f80cb49c9 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int 
cmd,
+               return 0;
+ 
+       case LIRC_GET_REC_RESOLUTION:
+-              val = dev->rx_resolution;
++              val = dev->rx_resolution / 1000;
+               break;
+ 
+       case LIRC_SET_WIDEBAND_RECEIVER:
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 98f25ffb4258..848b3453517e 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -179,19 +179,17 @@ static void mmc_retune_timer(unsigned long data)
+  */
+ int mmc_of_parse(struct mmc_host *host)
+ {
+-      struct device_node *np;
++      struct device *dev = host->parent;
+       u32 bus_width;
+       int ret;
+       bool cd_cap_invert, cd_gpio_invert = false;
+       bool ro_cap_invert, ro_gpio_invert = false;
+ 
+-      if (!host->parent || !host->parent->of_node)
++      if (!dev || !dev_fwnode(dev))
+               return 0;
+ 
+-      np = host->parent->of_node;
+-
+       /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+-      if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
++      if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+               dev_dbg(host->parent,
+                       "\"bus-width\" property is missing, assuming 1 bit.\n");
+               bus_width = 1;
+@@ -213,7 +211,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+ 
+       /* f_max is obtained from the optional "max-frequency" property */
+-      of_property_read_u32(np, "max-frequency", &host->f_max);
++      device_property_read_u32(dev, "max-frequency", &host->f_max);
+ 
+       /*
+        * Configure CD and WP pins. They are both by default active low to
+@@ -228,12 +226,12 @@ int mmc_of_parse(struct mmc_host *host)
+        */
+ 
+       /* Parse Card Detection */
+-      if (of_property_read_bool(np, "non-removable")) {
++      if (device_property_read_bool(dev, "non-removable")) {
+               host->caps |= MMC_CAP_NONREMOVABLE;
+       } else {
+-              cd_cap_invert = of_property_read_bool(np, "cd-inverted");
++              cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ 
+-              if (of_property_read_bool(np, "broken-cd"))
++              if (device_property_read_bool(dev, "broken-cd"))
+                       host->caps |= MMC_CAP_NEEDS_POLL;
+ 
+               ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+@@ -259,7 +257,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+ 
+       /* Parse Write Protection */
+-      ro_cap_invert = of_property_read_bool(np, "wp-inverted");
++      ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+ 
+       ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+       if (!ret)
+@@ -267,62 +265,62 @@ int mmc_of_parse(struct mmc_host *host)
+       else if (ret != -ENOENT && ret != -ENOSYS)
+               return ret;
+ 
+-      if (of_property_read_bool(np, "disable-wp"))
++      if (device_property_read_bool(dev, "disable-wp"))
+               host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+ 
+       /* See the comment on CD inversion above */
+       if (ro_cap_invert ^ ro_gpio_invert)
+               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ 
+-      if (of_property_read_bool(np, "cap-sd-highspeed"))
++      if (device_property_read_bool(dev, "cap-sd-highspeed"))
+               host->caps |= MMC_CAP_SD_HIGHSPEED;
+-      if (of_property_read_bool(np, "cap-mmc-highspeed"))
++      if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+               host->caps |= MMC_CAP_MMC_HIGHSPEED;
+-      if (of_property_read_bool(np, "sd-uhs-sdr12"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+               host->caps |= MMC_CAP_UHS_SDR12;
+-      if (of_property_read_bool(np, "sd-uhs-sdr25"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+               host->caps |= MMC_CAP_UHS_SDR25;
+-      if (of_property_read_bool(np, "sd-uhs-sdr50"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+               host->caps |= MMC_CAP_UHS_SDR50;
+-      if (of_property_read_bool(np, "sd-uhs-sdr104"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+               host->caps |= MMC_CAP_UHS_SDR104;
+-      if (of_property_read_bool(np, "sd-uhs-ddr50"))
++      if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+               host->caps |= MMC_CAP_UHS_DDR50;
+-      if (of_property_read_bool(np, "cap-power-off-card"))
++      if (device_property_read_bool(dev, "cap-power-off-card"))
+               host->caps |= MMC_CAP_POWER_OFF_CARD;
+-      if (of_property_read_bool(np, "cap-mmc-hw-reset"))
++      if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+               host->caps |= MMC_CAP_HW_RESET;
+-      if (of_property_read_bool(np, "cap-sdio-irq"))
++      if (device_property_read_bool(dev, "cap-sdio-irq"))
+               host->caps |= MMC_CAP_SDIO_IRQ;
+-      if (of_property_read_bool(np, "full-pwr-cycle"))
++      if (device_property_read_bool(dev, "full-pwr-cycle"))
+               host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+-      if (of_property_read_bool(np, "keep-power-in-suspend"))
++      if (device_property_read_bool(dev, "keep-power-in-suspend"))
+               host->pm_caps |= MMC_PM_KEEP_POWER;
+-      if (of_property_read_bool(np, "wakeup-source") ||
+-          of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
++      if (device_property_read_bool(dev, "wakeup-source") ||
++          device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+               host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+-      if (of_property_read_bool(np, "mmc-ddr-1_8v"))
++      if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
+               host->caps |= MMC_CAP_1_8V_DDR;
+-      if (of_property_read_bool(np, "mmc-ddr-1_2v"))
++      if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
+               host->caps |= MMC_CAP_1_2V_DDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_8v"))
++      if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
+               host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_2v"))
++      if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
+               host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_8v"))
++      if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
+               host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_2v"))
++      if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
+               host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
++      if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
+               host->caps2 |= MMC_CAP2_HS400_ES;
+-      if (of_property_read_bool(np, "no-sdio"))
++      if (device_property_read_bool(dev, "no-sdio"))
+               host->caps2 |= MMC_CAP2_NO_SDIO;
+-      if (of_property_read_bool(np, "no-sd"))
++      if (device_property_read_bool(dev, "no-sd"))
+               host->caps2 |= MMC_CAP2_NO_SD;
+-      if (of_property_read_bool(np, "no-mmc"))
++      if (device_property_read_bool(dev, "no-mmc"))
+               host->caps2 |= MMC_CAP2_NO_MMC;
+ 
+-      host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
++      host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+       if (host->dsr_req && (host->dsr & ~0xffff)) {
+               dev_err(host->parent,
+                       "device tree specified broken value for DSR: 0x%x, 
ignoring\n",
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index f57700c4b8f0..323dba35bc9a 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1690,7 +1690,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+               err = mmc_select_hs400(card);
+               if (err)
+                       goto free_card;
+-      } else {
++      } else if (!mmc_card_hs400es(card)) {
+               /* Select the desired bus width optionally */
+               err = mmc_select_bus_width(card);
+               if (err > 0 && mmc_card_hs(card)) {
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index df478ae72e23..f81f4175f49a 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2610,8 +2610,8 @@ static int dw_mci_init_slot(struct dw_mci *host, 
unsigned int id)
+       host->slot[id] = slot;
+ 
+       mmc->ops = &dw_mci_ops;
+-      if (of_property_read_u32_array(host->dev->of_node,
+-                                     "clock-freq-min-max", freq, 2)) {
++      if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
++                                         freq, 2)) {
+               mmc->f_min = DW_MCI_FREQ_MIN;
+               mmc->f_max = DW_MCI_FREQ_MAX;
+       } else {
+@@ -2709,7 +2709,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
+ {
+       int addr_config;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+ 
+       /*
+       * Check tansfer mode from HCON[17:16]
+@@ -2770,8 +2769,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
+               dev_info(host->dev, "Using internal DMA controller.\n");
+       } else {
+               /* TRANS_MODE_EDMAC: check dma bindings again */
+-              if ((of_property_count_strings(np, "dma-names") < 0) ||
+-                  (!of_find_property(np, "dmas", NULL))) {
++              if ((device_property_read_string_array(dev, "dma-names",
++                                                     NULL, 0) < 0) ||
++                  !device_property_present(dev, "dmas")) {
+                       goto no_dma;
+               }
+               host->dma_ops = &dw_mci_edmac_ops;
+@@ -2931,7 +2931,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct 
dw_mci *host)
+ {
+       struct dw_mci_board *pdata;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+       const struct dw_mci_drv_data *drv_data = host->drv_data;
+       int ret;
+       u32 clock_frequency;
+@@ -2948,15 +2947,16 @@ static struct dw_mci_board *dw_mci_parse_dt(struct 
dw_mci *host)
+       }
+ 
+       /* find out number of slots supported */
+-      of_property_read_u32(np, "num-slots", &pdata->num_slots);
++      device_property_read_u32(dev, "num-slots", &pdata->num_slots);
+ 
+-      if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
++      if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
+               dev_info(dev,
+                        "fifo-depth property not found, using value of FIFOTH 
register as default\n");
+ 
+-      of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
++      device_property_read_u32(dev, "card-detect-delay",
++                               &pdata->detect_delay_ms);
+ 
+-      if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
++      if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
+               pdata->bus_hz = clock_frequency;
+ 
+       if (drv_data && drv_data->parse_dt) {
+diff --git a/drivers/mmc/host/sdhci-of-at91.c 
b/drivers/mmc/host/sdhci-of-at91.c
+index a8b430ff117b..83b84ffec27d 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -31,6 +31,7 @@
+ 
+ #define SDMMC_MC1R    0x204
+ #define               SDMMC_MC1R_DDR          BIT(3)
++#define               SDMMC_MC1R_FCD          BIT(7)
+ #define SDMMC_CACR    0x230
+ #define               SDMMC_CACR_CAPWREN      BIT(0)
+ #define               SDMMC_CACR_KEY          (0x46 << 8)
+@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
+       struct clk *mainck;
+ };
+ 
++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
++{
++      u8 mc1r;
++
++      mc1r = readb(host->ioaddr + SDMMC_MC1R);
++      mc1r |= SDMMC_MC1R_FCD;
++      writeb(mc1r, host->ioaddr + SDMMC_MC1R);
++}
++
+ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+       u16 clk;
+@@ -112,10 +122,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host 
*host, unsigned int timing)
+       sdhci_set_uhs_signaling(host, timing);
+ }
+ 
++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
++{
++      sdhci_reset(host, mask);
++
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+       .set_clock              = sdhci_at91_set_clock,
+       .set_bus_width          = sdhci_set_bus_width,
+-      .reset                  = sdhci_reset,
++      .reset                  = sdhci_at91_reset,
+       .set_uhs_signaling      = sdhci_at91_set_uhs_signaling,
+       .set_power              = sdhci_at91_set_power,
+ };
+@@ -322,6 +340,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+       }
+ 
++      /*
++       * If the device attached to the MMC bus is not removable, it is safer
++       * to set the Force Card Detect bit. People often don't connect the
++       * card detect signal and use this pin for another purpose. If the card
++       * detect pin is not muxed to SDHCI controller, a default value is
++       * used. This value can be different from a SoC revision to another
++       * one. Problems come when this default value is not card present. To
++       * avoid this case, if the device is non removable then the card
++       * detection procedure using the SDMCC_CD signal is bypassed.
++       * This bit is reset when a software reset for all command is performed
++       * so we need to implement our own reset function to set back this bit.
++       */
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++
+       pm_runtime_put_autosuspend(&pdev->dev);
+ 
+       return 0;
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index 947adda3397d..3ec573c13dac 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1558,6 +1558,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .dev_name = "BCM53125",
+               .vlans = 4096,
+               .enabled_ports = 0xff,
++              .arl_entries = 4,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+diff --git a/drivers/net/ethernet/aurora/nb8800.c 
b/drivers/net/ethernet/aurora/nb8800.c
+index e078d8da978c..29d29af612d1 100644
+--- a/drivers/net/ethernet/aurora/nb8800.c
++++ b/drivers/net/ethernet/aurora/nb8800.c
+@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
+               mac_mode |= HALF_DUPLEX;
+ 
+       if (gigabit) {
+-              if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
++              if (phy_interface_is_rgmii(dev->phydev))
+                       mac_mode |= RGMII_MODE;
+ 
+               mac_mode |= GMAC_MODE;
+@@ -1277,11 +1277,10 @@ static int nb8800_tangox_init(struct net_device *dev)
+               break;
+ 
+       case PHY_INTERFACE_MODE_RGMII:
+-              pad_mode = PAD_MODE_RGMII;
+-              break;
+-
++      case PHY_INTERFACE_MODE_RGMII_ID:
++      case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+-              pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
++              pad_mode = PAD_MODE_RGMII;
+               break;
+ 
+       default:
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index a927a730da10..edae2dcc4927 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
+       tg3_mem_rx_release(tp);
+       tg3_mem_tx_release(tp);
+ 
++      /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
++      tg3_full_lock(tp, 0);
+       if (tp->hw_stats) {
+               dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+                                 tp->hw_stats, tp->stats_mapping);
+               tp->hw_stats = NULL;
+       }
++      tg3_full_unlock(tp);
+ }
+ 
+ /*
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c 
b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index cb45390c7623..f7fabecc104f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -770,6 +770,10 @@ static void cb_timeout_handler(struct work_struct *work)
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ }
+ 
++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
++                            struct mlx5_cmd_msg *msg);
++
+ static void cmd_work_handler(struct work_struct *work)
+ {
+       struct mlx5_cmd_work_ent *ent = container_of(work, struct 
mlx5_cmd_work_ent, work);
+@@ -779,16 +783,27 @@ static void cmd_work_handler(struct work_struct *work)
+       struct mlx5_cmd_layout *lay;
+       struct semaphore *sem;
+       unsigned long flags;
++      int alloc_ret;
+ 
+       sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+       down(sem);
+       if (!ent->page_queue) {
+-              ent->idx = alloc_ent(cmd);
+-              if (ent->idx < 0) {
++              alloc_ret = alloc_ent(cmd);
++              if (alloc_ret < 0) {
++                      if (ent->callback) {
++                              ent->callback(-EAGAIN, ent->context);
++                              mlx5_free_cmd_msg(dev, ent->out);
++                              free_msg(dev, ent->in);
++                              free_cmd(ent);
++                      } else {
++                              ent->ret = -EAGAIN;
++                              complete(&ent->done);
++                      }
+                       mlx5_core_err(dev, "failed to allocate command 
entry\n");
+                       up(sem);
+                       return;
+               }
++              ent->idx = alloc_ret;
+       } else {
+               ent->idx = cmd->max_reg_cmds;
+               spin_lock_irqsave(&cmd->alloc_lock, flags);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+index 13dc388667b6..1612ec0d9103 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+@@ -62,12 +62,14 @@ static void mlx5e_timestamp_overflow(struct work_struct 
*work)
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
+                                                  overflow_work);
++      struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, 
tstamp);
+       unsigned long flags;
+ 
+       write_lock_irqsave(&tstamp->lock, flags);
+       timecounter_read(&tstamp->clock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
+-      schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
++      queue_delayed_work(priv->wq, &tstamp->overflow_work,
++                         msecs_to_jiffies(tstamp->overflow_period * 1000));
+ }
+ 
+ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+@@ -263,7 +265,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+ 
+       INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
+       if (tstamp->overflow_period)
+-              schedule_delayed_work(&tstamp->overflow_work, 0);
++              queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
+       else
+               mlx5_core_warn(priv->mdev, "invalid overflow period, 
overflow_work is not scheduled\n");
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index e034dbc4913d..cf070fc0fb6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
+ 
+ static bool outer_header_zero(u32 *match_criteria)
+ {
+-      int size = MLX5_ST_SZ_BYTES(fte_match_param);
++      int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
+       char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                            outer_headers);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 6ffd5d2a70aa..52a38106448e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -651,9 +651,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int 
nvports)
+       int vport;
+       int err;
+ 
++      /* disable PF RoCE so missed packets don't go through RoCE steering */
++      mlx5_dev_list_lock();
++      mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
++      mlx5_dev_list_unlock();
++
+       err = esw_create_offloads_fdb_table(esw, nvports);
+       if (err)
+-              return err;
++              goto create_fdb_err;
+ 
+       err = esw_create_offloads_table(esw);
+       if (err)
+@@ -673,11 +678,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int 
nvports)
+                       goto err_reps;
+       }
+ 
+-      /* disable PF RoCE so missed packets don't go through RoCE steering */
+-      mlx5_dev_list_lock();
+-      mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+-      mlx5_dev_list_unlock();
+-
+       return 0;
+ 
+ err_reps:
+@@ -694,6 +694,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int 
nvports)
+ 
+ create_ft_err:
+       esw_destroy_offloads_fdb_table(esw);
++
++create_fdb_err:
++      /* enable back PF RoCE */
++      mlx5_dev_list_lock();
++      mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
++      mlx5_dev_list_unlock();
++
+       return err;
+ }
+ 
+@@ -701,11 +708,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
+ {
+       int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+ 
+-      /* enable back PF RoCE */
+-      mlx5_dev_list_lock();
+-      mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+-      mlx5_dev_list_unlock();
+-
+       mlx5_eswitch_disable_sriov(esw);
+       err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+       if (err) {
+@@ -715,6 +717,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
+                       esw_warn(esw->dev, "Failed setting eswitch back to 
offloads, err %d\n", err);
+       }
+ 
++      /* enable back PF RoCE */
++      mlx5_dev_list_lock();
++      mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
++      mlx5_dev_list_unlock();
++
+       return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c 
b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index b5d5519542e8..0ca4623bda6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+                                          u8 *port1, u8 *port2)
+ {
+-      if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+-              if (tracker->netdev_state[0].tx_enabled) {
+-                      *port1 = 1;
+-                      *port2 = 1;
+-              } else {
+-                      *port1 = 2;
+-                      *port2 = 2;
+-              }
+-      } else {
+-              *port1 = 1;
+-              *port2 = 2;
+-              if (!tracker->netdev_state[0].link_up)
+-                      *port1 = 2;
+-              else if (!tracker->netdev_state[1].link_up)
+-                      *port2 = 1;
++      *port1 = 1;
++      *port2 = 2;
++      if (!tracker->netdev_state[0].tx_enabled ||
++          !tracker->netdev_state[0].link_up) {
++              *port1 = 2;
++              return;
+       }
++
++      if (!tracker->netdev_state[1].tx_enabled ||
++          !tracker->netdev_state[1].link_up)
++              *port2 = 1;
+ }
+ 
+ static void mlx5_activate_lag(struct mlx5_lag *ldev,
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c 
b/drivers/net/ethernet/renesas/sh_eth.c
+index 12be259394c6..2140dedab712 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
+       .rpadir_value   = 2 << 16,
+       .no_trimd       = 1,
+       .no_ade         = 1,
++      .hw_crc         = 1,
+       .tsu            = 1,
+       .select_mii     = 1,
+       .shift_rd0      = 1,
+@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
+ 
+       .ecsr_value     = ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+-      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
++      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ 
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+@@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
+ 
+       .ecsr_value     = ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+-      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
++      .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ 
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
+index bca6a1e72d1d..e1bb802d4a4d 100644
+--- a/drivers/net/irda/mcs7780.c
++++ b/drivers/net/irda/mcs7780.c
+@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, 
__u16 val)
+ static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+ {
+       struct usb_device *dev = mcs->usbdev;
+-      int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+-                                MCS_RD_RTYPE, 0, reg, val, 2,
+-                                msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++      void *dmabuf;
++      int ret;
++
++      dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
++      if (!dmabuf)
++              return -ENOMEM;
++
++      ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
++                            MCS_RD_RTYPE, 0, reg, dmabuf, 2,
++                            msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++
++      memcpy(val, dmabuf, sizeof(__u16));
++      kfree(dmabuf);
+ 
+       return ret;
+ }
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 4cad95552cf1..01cf094bee18 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -29,6 +29,7 @@
+ #define MII_DP83867_MICR      0x12
+ #define MII_DP83867_ISR               0x13
+ #define DP83867_CTRL          0x1f
++#define DP83867_CFG3          0x1e
+ 
+ /* Extended Registers */
+ #define DP83867_RGMIICTL      0x0032
+@@ -90,6 +91,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
+               micr_status |=
+                       (MII_DP83867_MICR_AN_ERR_INT_EN |
+                       MII_DP83867_MICR_SPEED_CHNG_INT_EN |
++                      MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
++                      MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
+                       MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+                       MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+ 
+@@ -190,6 +193,13 @@ static int dp83867_config_init(struct phy_device *phydev)
+                                      DP83867_DEVADDR, delay);
+       }
+ 
++      /* Enable Interrupt output INT_OE in CFG3 register */
++      if (phy_interrupt_is_valid(phydev)) {
++              val = phy_read(phydev, DP83867_CFG3);
++              val |= BIT(7);
++              phy_write(phydev, DP83867_CFG3, val);
++      }
++
+       return 0;
+ }
+ 
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index edd30ebbf275..775a6e1fdef9 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -674,6 +674,9 @@ void phy_stop_machine(struct phy_device *phydev)
+       if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+               phydev->state = PHY_UP;
+       mutex_unlock(&phydev->lock);
++
++      /* Now we can run the state machine synchronously */
++      phy_state_machine(&phydev->state_queue.work);
+ }
+ 
+ /**
+@@ -1060,6 +1063,15 @@ void phy_state_machine(struct work_struct *work)
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
++              /*
++               * Failsafe: check that nobody set phydev->link=0 between two
++               * poll cycles, otherwise we won't leave RUNNING state as long
++               * as link remains down.
++               */
++              if (!phydev->link && phydev->state == PHY_RUNNING) {
++                      phydev->state = PHY_CHANGELINK;
++                      phydev_err(phydev, "no link in PHY_RUNNING\n");
++              }
+               break;
+       case PHY_CHANGELINK:
+               err = phy_read_status(phydev);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 9e7b7836774f..bf02f8e4648a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1714,6 +1714,8 @@ static int phy_remove(struct device *dev)
+ {
+       struct phy_device *phydev = to_phy_device(dev);
+ 
++      cancel_delayed_work_sync(&phydev->state_queue);
++
+       mutex_lock(&phydev->lock);
+       phydev->state = PHY_DOWN;
+       mutex_unlock(&phydev->lock);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 8744b9beda33..8e3c6f4bdaa0 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4161,11 +4161,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct 
brcmf_sdio_dev *sdiodev)
+               goto fail;
+       }
+ 
+-      /* allocate scatter-gather table. sg support
+-       * will be disabled upon allocation failure.
+-       */
+-      brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+-
+       /* Query the F2 block size, set roundup accordingly */
+       bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+       bus->roundup = min(max_roundup, bus->blocksize);
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c 
b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 4b97371c3b42..838946d17b59 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct 
iwl_rx_cmd_buffer *rxb)
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
++                      iwlagn_check_ratid_empty(priv, sta_id, tid);
+               }
+ 
+               iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+ 
+-              iwlagn_check_ratid_empty(priv, sta_id, tid);
+               freed = 0;
+ 
+               /* process frames */
+diff --git a/drivers/net/xen-netback/common.h 
b/drivers/net/xen-netback/common.h
+index 3ce1f7da8647..cb7365bdf6e0 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
+       unsigned long   remaining_credit;
+       struct timer_list credit_timeout;
+       u64 credit_window_start;
++      bool rate_limited;
+ 
+       /* Statistics */
+       struct xenvif_stats stats;
+diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
+index b009d7966b46..5bfaf5578810 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int 
budget)
+ 
+       if (work_done < budget) {
+               napi_complete(napi);
+-              xenvif_napi_schedule_or_enable_events(queue);
++              /* If the queue is rate-limited, it shall be
++               * rescheduled in the timer callback.
++               */
++              if (likely(!queue->rate_limited))
++                      xenvif_napi_schedule_or_enable_events(queue);
+       }
+ 
+       return work_done;
+diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
+index 47b481095d77..d9b5b73c35a0 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -179,6 +179,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
+               max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
+ 
+       queue->remaining_credit = min(max_credit, max_burst);
++      queue->rate_limited = false;
+ }
+ 
+ void xenvif_tx_credit_callback(unsigned long data)
+@@ -685,8 +686,10 @@ static bool tx_credit_exceeded(struct xenvif_queue 
*queue, unsigned size)
+               msecs_to_jiffies(queue->credit_usec / 1000);
+ 
+       /* Timer could already be pending in rare cases. */
+-      if (timer_pending(&queue->credit_timeout))
++      if (timer_pending(&queue->credit_timeout)) {
++              queue->rate_limited = true;
+               return true;
++      }
+ 
+       /* Passed the point where we can replenish credit? */
+       if (time_after_eq64(now, next_credit)) {
+@@ -701,6 +704,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, 
unsigned size)
+               mod_timer(&queue->credit_timeout,
+                         next_credit);
+               queue->credit_window_start = next_credit;
++              queue->rate_limited = true;
+ 
+               return true;
+       }
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index ad33238cef17..8c4641b518b5 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct 
kobject *kobj,
+       struct qla_hw_data *ha = vha->hw;
+       ssize_t rval = 0;
+ 
++      mutex_lock(&ha->optrom_mutex);
++
+       if (ha->optrom_state != QLA_SREADING)
+-              return 0;
++              goto out;
+ 
+-      mutex_lock(&ha->optrom_mutex);
+       rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+           ha->optrom_region_size);
++
++out:
+       mutex_unlock(&ha->optrom_mutex);
+ 
+       return rval;
+@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct 
kobject *kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+ 
+-      if (ha->optrom_state != QLA_SWRITING)
++      mutex_lock(&ha->optrom_mutex);
++
++      if (ha->optrom_state != QLA_SWRITING) {
++              mutex_unlock(&ha->optrom_mutex);
+               return -EINVAL;
+-      if (off > ha->optrom_region_size)
++      }
++      if (off > ha->optrom_region_size) {
++              mutex_unlock(&ha->optrom_mutex);
+               return -ERANGE;
++      }
+       if (off + count > ha->optrom_region_size)
+               count = ha->optrom_region_size - off;
+ 
+-      mutex_lock(&ha->optrom_mutex);
+       memcpy(&ha->optrom_buffer[off], buf, count);
+       mutex_unlock(&ha->optrom_mutex);
+ 
+diff --git a/drivers/spi/spi-axi-spi-engine.c 
b/drivers/spi/spi-axi-spi-engine.c
+index 2b1456e5e221..c1eafbd7610a 100644
+--- a/drivers/spi/spi-axi-spi-engine.c
++++ b/drivers/spi/spi-axi-spi-engine.c
+@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
+                       SPI_ENGINE_VERSION_MAJOR(version),
+                       SPI_ENGINE_VERSION_MINOR(version),
+                       SPI_ENGINE_VERSION_PATCH(version));
+-              return -ENODEV;
++              ret = -ENODEV;
++              goto err_put_master;
+       }
+ 
+       spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c 
b/drivers/target/iscsi/iscsi_target_nego.c
+index 6693d7c69f97..e8efb4299a95 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -490,14 +490,60 @@ static void iscsi_target_restore_sock_callbacks(struct 
iscsi_conn *conn)
+ 
+ static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
+ 
+-static bool iscsi_target_sk_state_check(struct sock *sk)
++static bool __iscsi_target_sk_check_close(struct sock *sk)
+ {
+       if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
+-              pr_debug("iscsi_target_sk_state_check: 
TCP_CLOSE_WAIT|TCP_CLOSE,"
++              pr_debug("__iscsi_target_sk_check_close: 
TCP_CLOSE_WAIT|TCP_CLOSE,"
+                       "returning FALSE\n");
+-              return false;
++              return true;
+       }
+-      return true;
++      return false;
++}
++
++static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
++{
++      bool state = false;
++
++      if (conn->sock) {
++              struct sock *sk = conn->sock->sk;
++
++              read_lock_bh(&sk->sk_callback_lock);
++              state = (__iscsi_target_sk_check_close(sk) ||
++                       test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
++              read_unlock_bh(&sk->sk_callback_lock);
++      }
++      return state;
++}
++
++static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int 
flag)
++{
++      bool state = false;
++
++      if (conn->sock) {
++              struct sock *sk = conn->sock->sk;
++
++              read_lock_bh(&sk->sk_callback_lock);
++              state = test_bit(flag, &conn->login_flags);
++              read_unlock_bh(&sk->sk_callback_lock);
++      }
++      return state;
++}
++
++static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned 
int flag)
++{
++      bool state = false;
++
++      if (conn->sock) {
++              struct sock *sk = conn->sock->sk;
++
++              write_lock_bh(&sk->sk_callback_lock);
++              state = (__iscsi_target_sk_check_close(sk) ||
++                       test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
++              if (!state)
++                      clear_bit(flag, &conn->login_flags);
++              write_unlock_bh(&sk->sk_callback_lock);
++      }
++      return state;
+ }
+ 
+ static void iscsi_target_login_drop(struct iscsi_conn *conn, struct 
iscsi_login *login)
+@@ -537,6 +583,20 @@ static void iscsi_target_do_login_rx(struct work_struct 
*work)
+ 
+       pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
+                       conn, current->comm, current->pid);
++      /*
++       * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
++       * before initial PDU processing in iscsi_target_start_negotiation()
++       * has completed, go ahead and retry until it's cleared.
++       *
++       * Otherwise if the TCP connection drops while this is occuring,
++       * iscsi_target_start_negotiation() will detect the failure, call
++       * cancel_delayed_work_sync(&conn->login_work), and cleanup the
++       * remaining iscsi connection resources from iscsi_np process context.
++       */
++      if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
++              schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
++              return;
++      }
+ 
+       spin_lock(&tpg->tpg_state_lock);
+       state = (tpg->tpg_state == TPG_STATE_ACTIVE);
+@@ -544,26 +604,12 @@ static void iscsi_target_do_login_rx(struct work_struct 
*work)
+ 
+       if (!state) {
+               pr_debug("iscsi_target_do_login_rx: tpg_state != 
TPG_STATE_ACTIVE\n");
+-              iscsi_target_restore_sock_callbacks(conn);
+-              iscsi_target_login_drop(conn, login);
+-              iscsit_deaccess_np(np, tpg, tpg_np);
+-              return;
++              goto err;
+       }
+ 
+-      if (conn->sock) {
+-              struct sock *sk = conn->sock->sk;
+-
+-              read_lock_bh(&sk->sk_callback_lock);
+-              state = iscsi_target_sk_state_check(sk);
+-              read_unlock_bh(&sk->sk_callback_lock);
+-
+-              if (!state) {
+-                      pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+-                      iscsi_target_restore_sock_callbacks(conn);
+-                      iscsi_target_login_drop(conn, login);
+-                      iscsit_deaccess_np(np, tpg, tpg_np);
+-                      return;
+-              }
++      if (iscsi_target_sk_check_close(conn)) {
++              pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
++              goto err;
+       }
+ 
+       conn->login_kworker = current;
+@@ -581,34 +627,29 @@ static void iscsi_target_do_login_rx(struct work_struct 
*work)
+       flush_signals(current);
+       conn->login_kworker = NULL;
+ 
+-      if (rc < 0) {
+-              iscsi_target_restore_sock_callbacks(conn);
+-              iscsi_target_login_drop(conn, login);
+-              iscsit_deaccess_np(np, tpg, tpg_np);
+-              return;
+-      }
++      if (rc < 0)
++              goto err;
+ 
+       pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
+                       conn, current->comm, current->pid);
+ 
+       rc = iscsi_target_do_login(conn, login);
+       if (rc < 0) {
+-              iscsi_target_restore_sock_callbacks(conn);
+-              iscsi_target_login_drop(conn, login);
+-              iscsit_deaccess_np(np, tpg, tpg_np);
++              goto err;
+       } else if (!rc) {
+-              if (conn->sock) {
+-                      struct sock *sk = conn->sock->sk;
+-
+-                      write_lock_bh(&sk->sk_callback_lock);
+-                      clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
+-                      write_unlock_bh(&sk->sk_callback_lock);
+-              }
++              if (iscsi_target_sk_check_and_clear(conn, 
LOGIN_FLAGS_READ_ACTIVE))
++                      goto err;
+       } else if (rc == 1) {
+               iscsi_target_nego_release(conn);
+               iscsi_post_login_handler(np, conn, zero_tsih);
+               iscsit_deaccess_np(np, tpg, tpg_np);
+       }
++      return;
++
++err:
++      iscsi_target_restore_sock_callbacks(conn);
++      iscsi_target_login_drop(conn, login);
++      iscsit_deaccess_np(np, tpg, tpg_np);
+ }
+ 
+ static void iscsi_target_do_cleanup(struct work_struct *work)
+@@ -656,31 +697,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
+               orig_state_change(sk);
+               return;
+       }
++      state = __iscsi_target_sk_check_close(sk);
++      pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
++
+       if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+               pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
+                        " conn: %p\n", conn);
++              if (state)
++                      set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+               write_unlock_bh(&sk->sk_callback_lock);
+               orig_state_change(sk);
+               return;
+       }
+-      if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
++      if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+               pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
+                        conn);
+               write_unlock_bh(&sk->sk_callback_lock);
+               orig_state_change(sk);
+               return;
+       }
++      /*
++       * If the TCP connection has dropped, go ahead and set 
LOGIN_FLAGS_CLOSED,
++       * but only queue conn->login_work -> iscsi_target_do_login_rx()
++       * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
++       *
++       * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
++       * will detect the dropped TCP connection from delayed workqueue 
context.
++       *
++       * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
++       * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
++       * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
++       * via iscsi_target_sk_check_and_clear() is responsible for detecting 
the
++       * dropped TCP connection in iscsi_np process context, and cleaning up
++       * the remaining iscsi connection resources.
++       */
++      if (state) {
++              pr_debug("iscsi_target_sk_state_change got failed state\n");
++              set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
++              state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
++              write_unlock_bh(&sk->sk_callback_lock);
+ 
+-      state = iscsi_target_sk_state_check(sk);
+-      write_unlock_bh(&sk->sk_callback_lock);
+-
+-      pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
++              orig_state_change(sk);
+ 
+-      if (!state) {
+-              pr_debug("iscsi_target_sk_state_change got failed state\n");
+-              schedule_delayed_work(&conn->login_cleanup_work, 0);
++              if (!state)
++                      schedule_delayed_work(&conn->login_work, 0);
+               return;
+       }
++      write_unlock_bh(&sk->sk_callback_lock);
++
+       orig_state_change(sk);
+ }
+ 
+@@ -945,6 +1009,15 @@ static int iscsi_target_do_login(struct iscsi_conn 
*conn, struct iscsi_login *lo
+                       if (iscsi_target_handle_csg_one(conn, login) < 0)
+                               return -1;
+                       if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
++                              /*
++                               * Check to make sure the TCP connection has not
++                               * dropped asynchronously while session 
reinstatement
++                               * was occuring in this kthread context, before
++                               * transitioning to full feature phase 
operation.
++                               */
++                              if (iscsi_target_sk_check_close(conn))
++                                      return -1;
++
+                               login->tsih = conn->sess->tsih;
+                               login->login_complete = 1;
+                               iscsi_target_restore_sock_callbacks(conn);
+@@ -971,21 +1044,6 @@ static int iscsi_target_do_login(struct iscsi_conn 
*conn, struct iscsi_login *lo
+               break;
+       }
+ 
+-      if (conn->sock) {
+-              struct sock *sk = conn->sock->sk;
+-              bool state;
+-
+-              read_lock_bh(&sk->sk_callback_lock);
+-              state = iscsi_target_sk_state_check(sk);
+-              read_unlock_bh(&sk->sk_callback_lock);
+-
+-              if (!state) {
+-                      pr_debug("iscsi_target_do_login() failed state for"
+-                               " conn: %p\n", conn);
+-                      return -1;
+-              }
+-      }
+-
+       return 0;
+ }
+ 
+@@ -1252,13 +1310,25 @@ int iscsi_target_start_negotiation(
+        if (conn->sock) {
+                struct sock *sk = conn->sock->sk;
+ 
+-               write_lock_bh(&sk->sk_callback_lock);
+-               set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+-               write_unlock_bh(&sk->sk_callback_lock);
+-       }
++              write_lock_bh(&sk->sk_callback_lock);
++              set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
++              set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
++              write_unlock_bh(&sk->sk_callback_lock);
++      }
++      /*
++       * If iscsi_target_do_login returns zero to signal more PDU
++       * exchanges are required to complete the login, go ahead and
++       * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
++       * is still active.
++       *
++       * Otherwise if TCP connection dropped asynchronously, go ahead
++       * and perform connection cleanup now.
++       */
++      ret = iscsi_target_do_login(conn, login);
++      if (!ret && iscsi_target_sk_check_and_clear(conn, 
LOGIN_FLAGS_INITIAL_PDU))
++              ret = -1;
+ 
+-       ret = iscsi_target_do_login(conn, login);
+-       if (ret < 0) {
++      if (ret < 0) {
+               cancel_delayed_work_sync(&conn->login_work);
+               cancel_delayed_work_sync(&conn->login_cleanup_work);
+               iscsi_target_restore_sock_callbacks(conn);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 14a37ff0b9e3..705bb5f5a87f 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4759,10 +4759,6 @@ static void shrink_delalloc(struct btrfs_root *root, 
u64 to_reclaim, u64 orig,
+               else
+                       flush = BTRFS_RESERVE_NO_FLUSH;
+               spin_lock(&space_info->lock);
+-              if (can_overcommit(root, space_info, orig, flush)) {
+-                      spin_unlock(&space_info->lock);
+-                      break;
+-              }
+               if (list_empty(&space_info->tickets) &&
+                   list_empty(&space_info->priority_tickets)) {
+                       spin_unlock(&space_info->lock);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 9e77c089e8cb..d17d12ed6f73 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -469,6 +469,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+                               lastoff = page_offset(page);
+                               bh = head = page_buffers(page);
+                               do {
++                                      if (lastoff + bh->b_size <= startoff)
++                                              goto next;
+                                       if (buffer_uptodate(bh) ||
+                                           buffer_unwritten(bh)) {
+                                               if (whence == SEEK_DATA)
+@@ -483,6 +485,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                               unlock_page(page);
+                                               goto out;
+                                       }
++next:
+                                       lastoff += bh->b_size;
+                                       bh = bh->b_this_page;
+                               } while (bh != head);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index cf681004b196..95bf46654153 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1926,7 +1926,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count)
+                       n_desc_blocks = o_desc_blocks +
+                               le16_to_cpu(es->s_reserved_gdt_blocks);
+                       n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+-                      n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
++                      n_blocks_count = (ext4_fsblk_t)n_group *
++                              EXT4_BLOCKS_PER_GROUP(sb);
+                       n_group--; /* set to last group number */
+               }
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 7e0c002c12e9..eb20b8767f3c 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1424,6 +1424,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+       unsigned int total, fsmeta;
+       struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
++      unsigned int main_segs, blocks_per_seg;
++      int i;
+ 
+       total = le32_to_cpu(raw_super->segment_count);
+       fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
+@@ -1435,6 +1437,20 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+       if (unlikely(fsmeta >= total))
+               return 1;
+ 
++      main_segs = le32_to_cpu(raw_super->segment_count_main);
++      blocks_per_seg = sbi->blocks_per_seg;
++
++      for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
++              if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
++                      le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
++                      return 1;
++      }
++      for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
++              if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
++                      le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
++                      return 1;
++      }
++
+       if (unlikely(f2fs_cp_error(sbi))) {
+               f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+               return 1;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 46ca7881d80d..a53b8e0c896a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7410,7 +7410,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, 
void *data)
+                       cdata->res.server_scope = NULL;
+               }
+               /* Save the EXCHANGE_ID verifier session trunk tests */
+-              memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
++              memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
+                      sizeof(clp->cl_confirm.data));
+       }
+ out:
+@@ -7447,7 +7447,6 @@ static const struct rpc_call_ops 
nfs4_exchange_id_call_ops = {
+ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred 
*cred,
+                       u32 sp4_how, struct rpc_xprt *xprt)
+ {
+-      nfs4_verifier verifier;
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+               .rpc_cred = cred,
+@@ -7470,8 +7469,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client 
*clp, struct rpc_cred *cred,
+       if (!calldata)
+               goto out;
+ 
+-      if (!xprt)
+-              nfs4_init_boot_verifier(clp, &verifier);
++      nfs4_init_boot_verifier(clp, &calldata->args.verifier);
+ 
+       status = nfs4_init_uniform_client_string(clp);
+       if (status)
+@@ -7516,9 +7514,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client 
*clp, struct rpc_cred *cred,
+               task_setup_data.rpc_xprt = xprt;
+               task_setup_data.flags =
+                               RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+-              calldata->args.verifier = &clp->cl_confirm;
+-      } else {
+-              calldata->args.verifier = &verifier;
++              memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
++                              sizeof(calldata->args.verifier.data));
+       }
+       calldata->args.client = clp;
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index c9c4d9855976..5e2724a928ed 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1761,7 +1761,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
+       int len = 0;
+ 
+       encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+-      encode_nfs4_verifier(xdr, args->verifier);
++      encode_nfs4_verifier(xdr, &args->verifier);
+ 
+       encode_string(xdr, strlen(args->client->cl_owner_id),
+                       args->client->cl_owner_id);
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index bfc204e70338..cd32a49ae81e 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -16,6 +16,19 @@
+ 
+ #ifdef CONFIG_CPUSETS
+ 
++/*
++ * Static branch rewrites can happen in an arbitrary order for a given
++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
++ * to ensure that begin() always gets rewritten before retry() in the
++ * disabled -> enabled transition. If not, then if local irqs are disabled
++ * around the loop, we can deadlock since retry() would always be
++ * comparing the latest value of the mems_allowed seqcount against 0 as
++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
++ * transition should happen in reverse order for the same reasons (want to 
stop
++ * looking at real value of mems_allowed.sequence in retry() first).
++ */
++extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
+ static inline bool cpusets_enabled(void)
+ {
+@@ -30,12 +43,14 @@ static inline int nr_cpusets(void)
+ 
+ static inline void cpuset_inc(void)
+ {
++      static_branch_inc(&cpusets_pre_enable_key);
+       static_branch_inc(&cpusets_enabled_key);
+ }
+ 
+ static inline void cpuset_dec(void)
+ {
+       static_branch_dec(&cpusets_enabled_key);
++      static_branch_dec(&cpusets_pre_enable_key);
+ }
+ 
+ extern int cpuset_init(void);
+@@ -113,7 +128,7 @@ extern void cpuset_print_current_mems_allowed(void);
+  */
+ static inline unsigned int read_mems_allowed_begin(void)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_pre_enable_key))
+               return 0;
+ 
+       return read_seqcount_begin(&current->mems_allowed_seq);
+@@ -127,7 +142,7 @@ static inline unsigned int read_mems_allowed_begin(void)
+  */
+ static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_enabled_key))
+               return false;
+ 
+       return read_seqcount_retry(&current->mems_allowed_seq, seq);
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 08d947fc4c59..e8471c2ca83a 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -507,6 +507,10 @@ struct mm_struct {
+        * PROT_NONE or PROT_NUMA mapped page.
+        */
+       bool tlb_flush_pending;
++#endif
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++      /* See flush_tlb_batched_pending() */
++      bool tlb_flush_batched;
+ #endif
+       struct uprobes_state uprobes_state;
+ #ifdef CONFIG_X86_INTEL_MPX
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index beb1e10f446e..3bf867a0c3b3 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1199,7 +1199,7 @@ struct nfs41_state_protection {
+ 
+ struct nfs41_exchange_id_args {
+       struct nfs_client               *client;
+-      nfs4_verifier                   *verifier;
++      nfs4_verifier                   verifier;
+       u32                             flags;
+       struct nfs41_state_protection   state_protect;
+ };
+diff --git a/include/linux/property.h b/include/linux/property.h
+index 856e50b2140c..338f9b76914b 100644
+--- a/include/linux/property.h
++++ b/include/linux/property.h
+@@ -33,6 +33,8 @@ enum dev_dma_attr {
+       DEV_DMA_COHERENT,
+ };
+ 
++struct fwnode_handle *dev_fwnode(struct device *dev);
++
+ bool device_property_present(struct device *dev, const char *propname);
+ int device_property_read_u8_array(struct device *dev, const char *propname,
+                                 u8 *val, size_t nval);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index f425eb3318ab..14f58cf06054 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -830,6 +830,16 @@ struct signal_struct {
+ 
+ #define SIGNAL_UNKILLABLE     0x00000040 /* for init: ignore fatal signals */
+ 
++#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
++                        SIGNAL_STOP_CONTINUED)
++
++static inline void signal_set_stop_flags(struct signal_struct *sig,
++                                       unsigned int flags)
++{
++      WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
++      sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
++}
++
+ /* If true, all threads except ->group_exit_task have pending SIGKILL */
+ static inline int signal_group_exit(const struct signal_struct *sig)
+ {
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 084b12bad198..4c5363566815 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void 
*ptr,
+  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
+  */
+ #define KMALLOC_SHIFT_HIGH    (PAGE_SHIFT + 1)
+-#define KMALLOC_SHIFT_MAX     (MAX_ORDER + PAGE_SHIFT)
++#define KMALLOC_SHIFT_MAX     (MAX_ORDER + PAGE_SHIFT - 1)
+ #ifndef KMALLOC_SHIFT_LOW
+ #define KMALLOC_SHIFT_LOW     3
+ #endif
+@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void 
*ptr,
+  * be allocated from the same page.
+  */
+ #define KMALLOC_SHIFT_HIGH    PAGE_SHIFT
+-#define KMALLOC_SHIFT_MAX     30
++#define KMALLOC_SHIFT_MAX     (MAX_ORDER + PAGE_SHIFT - 1)
+ #ifndef KMALLOC_SHIFT_LOW
+ #define KMALLOC_SHIFT_LOW     3
+ #endif
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index fc6e22186405..733a21ef8da4 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -311,6 +311,7 @@ enum {
+ 
+       __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
+       __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
++      __WQ_ORDERED_EXPLICIT   = 1 << 18, /* internal: 
alloc_ordered_workqueue() */
+       __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
+ 
+       WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
+@@ -409,7 +410,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, 
int max_active,
+  * Pointer to the allocated workqueue on success, %NULL on failure.
+  */
+ #define alloc_ordered_workqueue(fmt, flags, args...)                  \
+-      alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
++      alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |                \
++                      __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
+ 
+ #define create_workqueue(name)                                                
\
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
+index e0f4109e64c6..c2aa73e5e6bb 100644
+--- a/include/net/iw_handler.h
++++ b/include/net/iw_handler.h
+@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char 
*stream, char *ends,
+               memcpy(stream + lcp_len,
+                      ((char *) &iwe->u) + IW_EV_POINT_OFF,
+                      IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
+-              memcpy(stream + point_len, extra, iwe->u.data.length);
++              if (iwe->u.data.length && extra)
++                      memcpy(stream + point_len, extra, iwe->u.data.length);
+               stream += event_len;
+       }
+       return stream;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 31acc3f4f132..61d9ce89d10d 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -460,6 +460,8 @@ _sctp_walk_params((pos), (chunk), 
ntohs((chunk)->chunk_hdr.length), member)
+ 
+ #define _sctp_walk_params(pos, chunk, end, member)\
+ for (pos.v = chunk->member;\
++     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) 
<=\
++      (void *)chunk + end) &&\
+      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+      pos.v += SCTP_PAD4(ntohs(pos.p->length)))
+@@ -470,6 +472,8 @@ _sctp_walk_errors((err), (chunk_hdr), 
ntohs((chunk_hdr)->length))
+ #define _sctp_walk_errors(err, chunk_hdr, end)\
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+           sizeof(sctp_chunkhdr_t));\
++     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
++      (void *)chunk_hdr + end) &&\
+      (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+      err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+diff --git a/include/target/iscsi/iscsi_target_core.h 
b/include/target/iscsi/iscsi_target_core.h
+index 33b2e75bf2eb..c8132b419148 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -563,6 +563,7 @@ struct iscsi_conn {
+ #define LOGIN_FLAGS_READ_ACTIVE               1
+ #define LOGIN_FLAGS_CLOSED            2
+ #define LOGIN_FLAGS_READY             4
++#define LOGIN_FLAGS_INITIAL_PDU               8
+       unsigned long           login_flags;
+       struct delayed_work     login_work;
+       struct delayed_work     login_cleanup_work;
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 1fde8eec9529..4c233437ee1a 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -3487,11 +3487,11 @@ static ssize_t cgroup_subtree_control_write(struct 
kernfs_open_file *of,
+       cgrp->subtree_control &= ~disable;
+ 
+       ret = cgroup_apply_control(cgrp);
+-
+       cgroup_finalize_control(cgrp, ret);
++      if (ret)
++              goto out_unlock;
+ 
+       kernfs_activate(cgrp->kn);
+-      ret = 0;
+ out_unlock:
+       cgroup_kn_unlock(of->kn);
+       return ret ?: nbytes;
+@@ -5718,6 +5718,10 @@ int __init cgroup_init(void)
+ 
+               if (ss->bind)
+                       ss->bind(init_css_set.subsys[ssid]);
++
++              mutex_lock(&cgroup_mutex);
++              css_populate_dir(init_css_set.subsys[ssid]);
++              mutex_unlock(&cgroup_mutex);
+       }
+ 
+       /* init_css_set.subsys[] has been updated, re-hash */
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 24d175d2b62d..247afb108343 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -61,6 +61,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+ 
++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+ 
+ /* See "Frequency meter" comments, below. */
+diff --git a/kernel/signal.c b/kernel/signal.c
+index deb04d5983ed..e48668c3c972 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct 
*task)
+        * fresh group stop.  Read comment in do_signal_stop() for details.
+        */
+       if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
+-              sig->flags = SIGNAL_STOP_STOPPED;
++              signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
+               return true;
+       }
+       return false;
+@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, 
bool force)
+                        * will take ->siglock, notice SIGNAL_CLD_MASK, and
+                        * notify its parent. See get_signal_to_deliver().
+                        */
+-                      signal->flags = why | SIGNAL_STOP_CONTINUED;
++                      signal_set_stop_flags(signal, why | 
SIGNAL_STOP_CONTINUED);
+                       signal->group_stop_count = 0;
+                       signal->group_exit_code = 0;
+               }
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index c611c47de884..944ad64277a6 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1536,7 +1536,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 
basem)
+               base->is_idle = false;
+       } else {
+               if (!is_max_delta)
+-                      expires = basem + (nextevt - basej) * TICK_NSEC;
++                      expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
+               /*
+                * If we expect to sleep more than a tick, mark the base idle:
+                */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 479d840db286..776dda02e751 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3730,8 +3730,12 @@ static int apply_workqueue_attrs_locked(struct 
workqueue_struct *wq,
+               return -EINVAL;
+ 
+       /* creating multiple pwqs breaks ordering guarantee */
+-      if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+-              return -EINVAL;
++      if (!list_empty(&wq->pwqs)) {
++              if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
++                      return -EINVAL;
++
++              wq->flags &= ~__WQ_ORDERED;
++      }
+ 
+       ctx = apply_wqattrs_prepare(wq, attrs);
+       if (!ctx)
+@@ -3915,6 +3919,16 @@ struct workqueue_struct *__alloc_workqueue_key(const 
char *fmt,
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
+ 
++      /*
++       * Unbound && max_active == 1 used to imply ordered, which is no
++       * longer the case on NUMA machines due to per-node pools.  While
++       * alloc_ordered_workqueue() is the right way to create an ordered
++       * workqueue, keep the previous behavior to avoid subtle breakages
++       * on NUMA.
++       */
++      if ((flags & WQ_UNBOUND) && max_active == 1)
++              flags |= __WQ_ORDERED;
++
+       /* see the comment above the definition of WQ_POWER_EFFICIENT */
+       if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+               flags |= WQ_UNBOUND;
+@@ -4103,13 +4117,14 @@ void workqueue_set_max_active(struct workqueue_struct 
*wq, int max_active)
+       struct pool_workqueue *pwq;
+ 
+       /* disallow meddling with max_active for ordered workqueues */
+-      if (WARN_ON(wq->flags & __WQ_ORDERED))
++      if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+               return;
+ 
+       max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
+ 
+       mutex_lock(&wq->mutex);
+ 
++      wq->flags &= ~__WQ_ORDERED;
+       wq->saved_max_active = max_active;
+ 
+       for_each_pwq(pwq, wq)
+@@ -5214,7 +5229,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+        * attributes breaks ordering guarantee.  Disallow exposing ordered
+        * workqueues.
+        */
+-      if (WARN_ON(wq->flags & __WQ_ORDERED))
++      if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+               return -EINVAL;
+ 
+       wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index a6c8db1d62f6..f60e67217f18 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
+ 
+ config DEBUG_INFO_SPLIT
+       bool "Produce split debuginfo in .dwo files"
+-      depends on DEBUG_INFO
++      depends on DEBUG_INFO && !FRV
+       help
+         Generate debug info into separate .dwo files. This significantly
+         reduces the build directory size for builds with DEBUG_INFO,
+diff --git a/mm/internal.h b/mm/internal.h
+index 537ac9951f5f..34a5459e5989 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -472,6 +472,7 @@ struct tlbflush_unmap_batch;
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ void try_to_unmap_flush(void);
+ void try_to_unmap_flush_dirty(void);
++void flush_tlb_batched_pending(struct mm_struct *mm);
+ #else
+ static inline void try_to_unmap_flush(void)
+ {
+@@ -479,7 +480,9 @@ static inline void try_to_unmap_flush(void)
+ static inline void try_to_unmap_flush_dirty(void)
+ {
+ }
+-
++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++}
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+ 
+ extern const struct trace_print_flags pageflag_names[];
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 93fb63e88b5e..253b1533fba5 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -21,6 +21,7 @@
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
+ #include <linux/mmu_notifier.h>
++#include "internal.h"
+ 
+ #include <asm/tlb.h>
+ 
+@@ -282,6 +283,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned 
long addr,
+               return 0;
+ 
+       orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+diff --git a/mm/memory.c b/mm/memory.c
+index e6a5a1f20492..9bf3da0d0e14 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1124,6 +1124,7 @@ static unsigned long zap_pte_range(struct mmu_gather 
*tlb,
+       init_rss_vec(rss);
+       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       pte = start_pte;
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               pte_t ptent = *pte;
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 11936526b08b..ae740c9b1f9b 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -74,6 +74,7 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
+       if (!pte)
+               return 0;
+ 
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               oldpte = *pte;
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 30d7d2482eea..15976716dd40 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
+       new_ptl = pte_lockptr(mm, new_pmd);
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+ 
+       for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 56df8c24689d..77b797c2d094 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1875,14 +1875,14 @@ int move_freepages(struct zone *zone,
+ #endif
+ 
+       for (page = start_page; page <= end_page;) {
+-              /* Make sure we are not inadvertently changing nodes */
+-              VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+-
+               if (!pfn_valid_within(page_to_pfn(page))) {
+                       page++;
+                       continue;
+               }
+ 
++              /* Make sure we are not inadvertently changing nodes */
++              VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
++
+               if (!PageBuddy(page)) {
+                       page++;
+                       continue;
+@@ -6445,8 +6445,8 @@ unsigned long free_reserved_area(void *start, void *end, 
int poison, char *s)
+       }
+ 
+       if (pages && s)
+-              pr_info("Freeing %s memory: %ldK (%p - %p)\n",
+-                      s, pages << (PAGE_SHIFT - 10), start, end);
++              pr_info("Freeing %s memory: %ldK\n",
++                      s, pages << (PAGE_SHIFT - 10));
+ 
+       return pages;
+ }
+diff --git a/mm/rmap.c b/mm/rmap.c
+index cd37c1c7e21b..94488b0362f8 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct 
*mm,
+       cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+       tlb_ubc->flush_required = true;
+ 
++      /*
++       * Ensure compiler does not re-order the setting of tlb_flush_batched
++       * before the PTE is cleared.
++       */
++      barrier();
++      mm->tlb_flush_batched = true;
++
+       /*
+        * If the PTE was dirty then it's best to assume it's writable. The
+        * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum 
ttu_flags flags)
+ 
+       return should_defer;
+ }
++
++/*
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
++ * operation such as mprotect or munmap to race between reclaim unmapping
++ * the page and flushing the page. If this race occurs, it potentially allows
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
++ * batching in flight would be expensive during reclaim so instead track
++ * whether TLB batching occurred in the past and if so then do a flush here
++ * if required. This will cost one additional flush per reclaim cycle paid
++ * by the first operation at risk such as mprotect and mumap.
++ *
++ * This must be called under the PTL so that an access to tlb_flush_batched
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will 
synchronise
++ * via the PTL.
++ */
++void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++      if (mm->tlb_flush_batched) {
++              flush_tlb_mm(mm);
++
++              /*
++               * Do not allow the compiler to re-order the clearing of
++               * tlb_flush_batched before the tlb is flushed.
++               */
++              barrier();
++              mm->tlb_flush_batched = false;
++      }
++}
+ #else
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+               struct page *page, bool writable)
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index b94b1d293506..151e047ce072 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user 
*arg)
+ 
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
++      ifr.ifr_name[IFNAMSIZ-1] = 0;
+ 
+       error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+       if (error)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9c6fd7f83a4a..4d2629781e8b 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1965,7 +1965,8 @@ static int do_setlink(const struct sk_buff *skb,
+               struct sockaddr *sa;
+               int len;
+ 
+-              len = sizeof(sa_family_t) + dev->addr_len;
++              len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
++                                                sizeof(*sa));
+               sa = kmalloc(len, GFP_KERNEL);
+               if (!sa) {
+                       err = -ENOMEM;
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c
+index 1704948e6a12..f227f002c73d 100644
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
+        * singleton values (which always leads to failure).
+        * These settings can still (later) be overridden via sockopts.
+        */
+-      if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
+-          ccid_get_builtin_ccids(&rx.val, &rx.len))
++      if (ccid_get_builtin_ccids(&tx.val, &tx.len))
+               return -ENOBUFS;
++      if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
++              kfree(tx.val);
++              return -ENOBUFS;
++      }
+ 
+       if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
+           !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 86b0933ecd45..8fc160098e11 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -637,6 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff 
*skb)
+               goto drop_and_free;
+ 
+       inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++      reqsk_put(req);
+       return 0;
+ 
+ drop_and_free:
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 2ac9d2a1aaab..28e8252cc5ea 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct 
sk_buff *skb)
+               goto drop_and_free;
+ 
+       inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++      reqsk_put(req);
+       return 0;
+ 
+ drop_and_free:
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 3d92534c4450..968d8e165e3d 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1319,13 +1319,14 @@ static struct pernet_operations fib_net_ops = {
+ 
+ void __init ip_fib_init(void)
+ {
+-      rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+-      rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+-      rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
++      fib_trie_init();
+ 
+       register_pernet_subsys(&fib_net_ops);
++
+       register_netdevice_notifier(&fib_netdev_notifier);
+       register_inetaddr_notifier(&fib_inetaddr_notifier);
+ 
+-      fib_trie_init();
++      rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
++      rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
++      rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index e5c1dbef3626..06215ba88b93 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -936,7 +936,8 @@ static int __ip_append_data(struct sock *sk,
+               csummode = CHECKSUM_PARTIAL;
+ 
+       cork->length += length;
+-      if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++      if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
++           (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+           (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c 
b/net/ipv4/netfilter/nf_reject_ipv4.c
+index fd8220213afc..146d86105183 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff 
*oldskb, int hook)
+       /* ip_route_me_harder expects skb->dst to be set */
+       skb_dst_set_noref(nskb, skb_dst(oldskb));
+ 
++      nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
++
+       skb_reserve(nskb, LL_MAX_HEADER);
+       niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
+                                  ip4_dst_hoplimit(skb_dst(nskb)));
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index e3c4043c27de..b6f710d515d0 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -334,6 +334,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       treq = tcp_rsk(req);
+       treq->rcv_isn           = ntohl(th->seq) - 1;
+       treq->snt_isn           = cookie;
++      treq->txhash            = net_tx_rndhash();
+       req->mss                = mss;
+       ireq->ir_num            = ntohs(th->dest);
+       ireq->ir_rmt_port       = th->source;
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 80bc36b25de2..566cfc50f7cf 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -958,7 +958,7 @@ static struct ctl_table ipv4_net_table[] = {
+               .data           = &init_net.ipv4.sysctl_tcp_notsent_lowat,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+-              .proc_handler   = proc_dointvec,
++              .proc_handler   = proc_douintvec,
+       },
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+       {
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index 0ea66c2c9344..cb8db347c680 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -83,7 +83,8 @@ struct bbr {
+               cwnd_gain:10,   /* current gain for setting cwnd */
+               full_bw_cnt:3,  /* number of rounds without large bw gains */
+               cycle_idx:3,    /* current index in pacing_gain cycle array */
+-              unused_b:6;
++              has_seen_rtt:1, /* have we seen an RTT sample yet? */
++              unused_b:5;
+       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
+       u32     full_bw;        /* recent bw, to estimate if pipe is full */
+ };
+@@ -182,6 +183,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 
rate, int gain)
+       return rate >> BW_SCALE;
+ }
+ 
++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
++{
++      u64 rate = bw;
++
++      rate = bbr_rate_bytes_per_sec(sk, rate, gain);
++      rate = min_t(u64, rate, sk->sk_max_pacing_rate);
++      return rate;
++}
++
++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
++static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
++{
++      struct tcp_sock *tp = tcp_sk(sk);
++      struct bbr *bbr = inet_csk_ca(sk);
++      u64 bw;
++      u32 rtt_us;
++
++      if (tp->srtt_us) {              /* any RTT sample yet? */
++              rtt_us = max(tp->srtt_us >> 3, 1U);
++              bbr->has_seen_rtt = 1;
++      } else {                         /* no RTT sample yet */
++              rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
++      }
++      bw = (u64)tp->snd_cwnd * BW_UNIT;
++      do_div(bw, rtt_us);
++      sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
++}
++
+ /* Pace using current bw estimate and a gain factor. In order to help drive 
the
+  * network toward lower queues while maintaining high utilization and low
+  * latency, the average pacing rate aims to be slightly (~1%) lower than the
+@@ -191,12 +221,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 
rate, int gain)
+  */
+ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
+ {
++      struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+-      u64 rate = bw;
++      u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
+ 
+-      rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+-      rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+-      if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
++      if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
++              bbr_init_pacing_rate_from_rtt(sk);
++      if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
+               sk->sk_pacing_rate = rate;
+ }
+ 
+@@ -769,7 +800,6 @@ static void bbr_init(struct sock *sk)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+-      u64 bw;
+ 
+       bbr->prior_cwnd = 0;
+       bbr->tso_segs_goal = 0;  /* default segs per skb until first ACK */
+@@ -785,11 +815,8 @@ static void bbr_init(struct sock *sk)
+ 
+       minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
+ 
+-      /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+-      bw = (u64)tp->snd_cwnd * BW_UNIT;
+-      do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
+-      sk->sk_pacing_rate = 0;         /* force an update of sk_pacing_rate */
+-      bbr_set_pacing_rate(sk, bw, bbr_high_gain);
++      bbr->has_seen_rtt = 0;
++      bbr_init_pacing_rate_from_rtt(sk);
+ 
+       bbr->restore_cwnd = 0;
+       bbr->round_start = 0;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 5a4b8e7bcedd..a5cdf2a23609 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -662,8 +662,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               *prevhdr = NEXTHDR_FRAGMENT;
+               tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
+               if (!tmp_hdr) {
+-                      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                    IPSTATS_MIB_FRAGFAILS);
+                       err = -ENOMEM;
+                       goto fail;
+               }
+@@ -782,8 +780,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+                                hroom + troom, GFP_ATOMIC);
+               if (!frag) {
+-                      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                    IPSTATS_MIB_FRAGFAILS);
+                       err = -ENOMEM;
+                       goto fail;
+               }
+@@ -1376,7 +1372,7 @@ static int __ip6_append_data(struct sock *sk,
+        */
+ 
+       cork->length += length;
+-      if ((((length + fragheaderlen) > mtu) ||
++      if ((((length + (skb ? skb->len : headersize)) > mtu) ||
+            (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+           (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c 
b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 10090400c72f..eedee5d108d9 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff 
*oldskb, int hook)
+       fl6.fl6_sport = otcph->dest;
+       fl6.fl6_dport = otcph->source;
+       fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
++      fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
+       security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst->error) {
+@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff 
*oldskb, int hook)
+ 
+       skb_dst_set(nskb, dst);
+ 
++      nskb->mark = fl6.flowi6_mark;
++
+       skb_reserve(nskb, hh_len + dst->header_len);
+       ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
+                                   ip6_dst_hoplimit(dst));
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index e9065b8d3af8..abb2c307fbe8 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+-      u16 offset = sizeof(struct ipv6hdr);
++      unsigned int offset = sizeof(struct ipv6hdr);
+       unsigned int packet_len = skb_tail_pointer(skb) -
+               skb_network_header(skb);
+       int found_rhdr = 0;
+@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+       while (offset <= packet_len) {
+               struct ipv6_opt_hdr *exthdr;
++              unsigned int len;
+ 
+               switch (**nexthdr) {
+ 
+@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+               exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+                                                offset);
+-              offset += ipv6_optlen(exthdr);
++              len = ipv6_optlen(exthdr);
++              if (len + offset >= IPV6_MAXPLEN)
++                      return -EINVAL;
++              offset += len;
+               *nexthdr = &exthdr->nexthdr;
+       }
+ 
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 59c483937aec..7a86433d8896 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -209,6 +209,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       treq->snt_synack.v64    = 0;
+       treq->rcv_isn = ntohl(th->seq) - 1;
+       treq->snt_isn = cookie;
++      treq->txhash = net_tx_rndhash();
+ 
+       /*
+        * We need to lookup the dst_entry to get the correct window size.
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 48386bff8b4e..b28e45b691de 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1088,8 +1088,8 @@ static int parse_ct(const struct nlattr *attr, struct 
ovs_conntrack_info *info,
+ 
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+-              int maxlen = ovs_ct_attr_lens[type].maxlen;
+-              int minlen = ovs_ct_attr_lens[type].minlen;
++              int maxlen;
++              int minlen;
+ 
+               if (type > OVS_CT_ATTR_MAX) {
+                       OVS_NLERR(log,
+@@ -1097,6 +1097,9 @@ static int parse_ct(const struct nlattr *attr, struct 
ovs_conntrack_info *info,
+                                 type, OVS_CT_ATTR_MAX);
+                       return -EINVAL;
+               }
++
++              maxlen = ovs_ct_attr_lens[type].maxlen;
++              minlen = ovs_ct_attr_lens[type].minlen;
+               if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+                       OVS_NLERR(log,
+                                 "Conntrack attr type has unexpected length 
(type=%d, length=%d, expected=%d)",
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 6a563e6e24de..365c83fcee02 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4322,7 +4322,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               register_prot_hook(sk);
+       }
+       spin_unlock(&po->bind_lock);
+-      if (closing && (po->tp_version > TPACKET_V2)) {
++      if (pg_vec && (po->tp_version > TPACKET_V2)) {
+               /* Because we don't support block-based V3 on tx-ring */
+               if (!tx_ring)
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index bb1aad39d987..6f337f00ba58 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", 
ALC882_FIXUP_GPIO3),
+       SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+       SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", 
ALC882_FIXUP_NO_PRIMARY_HP),
++      SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", 
ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", 
ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", 
ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 10c2a564a715..1ac96ef9ee20 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
+               }
+       }
+ 
++      regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
++              RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
++
+       if (rt5645->pdata.jd_invert) {
+               regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+                       RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 21c3ef01c438..80088c98ce27 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime 
*fe, int dir,
+               dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+                               be->dai_link->name, event, dir);
+ 
++              if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++                  (be->dpcm[dir].users >= 1))
++                      continue;
++
+               snd_soc_dapm_stream_event(be, dir, event);
+       }
+ 

Reply via email to