commit:     c20ad5072b7e3f69f4ff535dd534453ad3d7b8ec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 11 17:39:51 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 11 17:39:51 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c20ad507

Linux patch 4.12.6

 0000_README             |    4 +
 1005_linux-4.12.6.patch | 3935 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3939 insertions(+)

diff --git a/0000_README b/0000_README
index 29e1ca2..b88e1e0 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.12.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.12.5
 
+Patch:  1005_linux-4.12.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.12.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.12.6.patch b/1005_linux-4.12.6.patch
new file mode 100644
index 0000000..461714c
--- /dev/null
+++ b/1005_linux-4.12.6.patch
@@ -0,0 +1,3935 @@
+diff --git a/Makefile b/Makefile
+index 382e967b0792..c8d80b50495a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts 
b/arch/arm/boot/dts/armada-388-gp.dts
+index 895fa6cfa15a..563901e0ec07 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -75,7 +75,7 @@
+                                       pinctrl-names = "default";
+                                       pinctrl-0 = <&pca0_pins>;
+                                       interrupt-parent = <&gpio0>;
+-                                      interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++                                      interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+@@ -87,7 +87,7 @@
+                                       compatible = "nxp,pca9555";
+                                       pinctrl-names = "default";
+                                       interrupt-parent = <&gpio0>;
+-                                      interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++                                      interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       interrupt-controller;
+diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts 
b/arch/arm/boot/dts/tango4-vantage-1172.dts
+index 86d8df98802f..13bcc460bcb2 100644
+--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
++++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
+@@ -22,7 +22,7 @@
+ };
+ 
+ &eth0 {
+-      phy-connection-type = "rgmii";
++      phy-connection-type = "rgmii-id";
+       phy-handle = <&eth0_phy>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
+index e62273aacb43..4ffbbd217e82 100644
+--- a/arch/arm/mach-mvebu/platsmp.c
++++ b/arch/arm/mach-mvebu/platsmp.c
+@@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, 
void *boot_addr)
+               return PTR_ERR(base);
+ 
+       writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
+-      writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
++      writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
+ 
+       iounmap(base);
+ 
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi 
b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index bc179efb10ef..b69e4a4ecdd8 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -219,7 +219,7 @@
+                               reg = <0x18800 0x100>, <0x18C00 0x20>;
+                               gpiosb: gpio {
+                                       #gpio-cells = <2>;
+-                                      gpio-ranges = <&pinctrl_sb 0 0 29>;
++                                      gpio-ranges = <&pinctrl_sb 0 0 30>;
+                                       gpio-controller;
+                                       interrupts =
+                                       <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h 
b/arch/mips/include/asm/mach-ralink/ralink_regs.h
+index 9df1a53bcb36..b4e7dfa214eb 100644
+--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
++++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
+@@ -13,6 +13,8 @@
+ #ifndef _RALINK_REGS_H_
+ #define _RALINK_REGS_H_
+ 
++#include <linux/io.h>
++
+ enum ralink_soc_type {
+       RALINK_UNKNOWN = 0,
+       RT2880_SOC,
+diff --git a/arch/parisc/include/asm/thread_info.h 
b/arch/parisc/include/asm/thread_info.h
+index 88fe0aad4390..bc208136bbb2 100644
+--- a/arch/parisc/include/asm/thread_info.h
++++ b/arch/parisc/include/asm/thread_info.h
+@@ -34,7 +34,7 @@ struct thread_info {
+ 
+ /* thread information allocation */
+ 
+-#define THREAD_SIZE_ORDER     2 /* PA-RISC requires at least 16k stack */
++#define THREAD_SIZE_ORDER     3 /* PA-RISC requires at least 32k stack */
+ /* Be sure to hunt all references to this down when you change the size of
+  * the kernel stack */
+ #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 85a92db70afc..19c0c141bc3f 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -587,13 +587,12 @@ void flush_cache_range(struct vm_area_struct *vma,
+       if (parisc_requires_coherency())
+               flush_tlb_range(vma, start, end);
+ 
+-      if ((end - start) >= parisc_cache_flush_threshold) {
++      if ((end - start) >= parisc_cache_flush_threshold
++          || vma->vm_mm->context != mfsp(3)) {
+               flush_cache_all();
+               return;
+       }
+ 
+-      BUG_ON(vma->vm_mm->context != mfsp(3));
+-
+       flush_user_dcache_range_asm(start, end);
+       if (vma->vm_flags & VM_EXEC)
+               flush_user_icache_range_asm(start, end);
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index ba5e1c7b1f17..ef9a4eea662f 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
+ /*
+  * IRQ STACK - used for irq handler
+  */
+-#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
++#define IRQ_STACK_SIZE      (4096 << 3) /* 32k irq stack size */
+ 
+ union irq_stack_union {
+       unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 5c291df30fe3..40d8b552d15a 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void)
+ 
+       /* Clear bit 0 which we wouldn't clear otherwise */
+       local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
++      if (happened & PACA_IRQ_HARD_DIS) {
++              /*
++               * We may have missed a decrementer interrupt if hard disabled.
++               * Check the decrementer register in case we had a rollover
++               * while hard disabled.
++               */
++              if (!(happened & PACA_IRQ_DEC)) {
++                      if (decrementer_check_overflow()) {
++                              local_paca->irq_happened |= PACA_IRQ_DEC;
++                              happened |= PACA_IRQ_DEC;
++                      }
++              }
++      }
+ 
+       /*
+        * Force the delivery of pending soft-disabled interrupts on PS3.
+@@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void)
+        * in case we also had a rollover while hard disabled
+        */
+       local_paca->irq_happened &= ~PACA_IRQ_DEC;
+-      if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
++      if (happened & PACA_IRQ_DEC)
+               return 0x900;
+ 
+       /* Finally check if an external interrupt happened */
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 925a4ef90559..660ed39e9c9a 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct 
*tsk)
+        * If task is not current, it will have been flushed already to
+        * it's thread_struct during __switch_to().
+        *
+-       * A reclaim flushes ALL the state.
++       * A reclaim flushes ALL the state or if not in TM save TM SPRs
++       * in the appropriate thread structures from live.
+        */
+ 
+-      if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
+-              tm_reclaim_current(TM_CAUSE_SIGNAL);
++      if (tsk != current)
++              return;
+ 
++      if (MSR_TM_SUSPENDED(mfmsr())) {
++              tm_reclaim_current(TM_CAUSE_SIGNAL);
++      } else {
++              tm_enable();
++              tm_save_sprs(&(tsk->thread));
++      }
+ }
+ #else
+ static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+diff --git a/arch/sparc/include/asm/mmu_context_64.h 
b/arch/sparc/include/asm/mmu_context_64.h
+index 2cddcda4f85f..87841d687f8d 100644
+--- a/arch/sparc/include/asm/mmu_context_64.h
++++ b/arch/sparc/include/asm/mmu_context_64.h
+@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
+ void __tsb_context_switch(unsigned long pgd_pa,
+                         struct tsb_config *tsb_base,
+                         struct tsb_config *tsb_huge,
+-                        unsigned long tsb_descr_pa);
++                        unsigned long tsb_descr_pa,
++                        unsigned long secondary_ctx);
+ 
+-static inline void tsb_context_switch(struct mm_struct *mm)
++static inline void tsb_context_switch_ctx(struct mm_struct *mm,
++                                        unsigned long ctx)
+ {
+       __tsb_context_switch(__pa(mm->pgd),
+                            &mm->context.tsb_block[MM_TSB_BASE],
+@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
+ #else
+                            NULL
+ #endif
+-                           , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
++                           , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
++                           ctx);
+ }
+ 
++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
++
+ void tsb_grow(struct mm_struct *mm,
+             unsigned long tsb_index,
+             unsigned long mm_rss);
+@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, 
struct mm_struct *mm, str
+        * cpu0 to update it's TSB because at that point the cpu_vm_mask
+        * only had cpu1 set in it.
+        */
+-      load_secondary_context(mm);
+-      tsb_context_switch(mm);
++      tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+ 
+       /* Any time a processor runs a context on an address space
+        * for the first time, we must flush that context out of the
+diff --git a/arch/sparc/include/asm/trap_block.h 
b/arch/sparc/include/asm/trap_block.h
+index ec9c04de3664..ff05992dae7a 100644
+--- a/arch/sparc/include/asm/trap_block.h
++++ b/arch/sparc/include/asm/trap_block.h
+@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
+ void init_cur_cpu_trap(struct thread_info *);
+ void setup_tba(void);
+ extern int ncpus_probed;
++extern u64 cpu_mondo_counter[NR_CPUS];
+ 
+ unsigned long real_hard_smp_processor_id(void);
+ 
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index fdf31040a7dc..3218bc43302e 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -622,22 +622,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu 
*tb, int cnt)
+       }
+ }
+ 
+-/* Multi-cpu list version.  */
++#define       CPU_MONDO_COUNTER(cpuid)        (cpu_mondo_counter[cpuid])
++#define       MONDO_USEC_WAIT_MIN             2
++#define       MONDO_USEC_WAIT_MAX             100
++#define       MONDO_RETRY_LIMIT               500000
++
++/* Multi-cpu list version.
++ *
++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
++ * Sometimes not all cpus receive the mondo, requiring us to re-send
++ * the mondo until all cpus have received, or cpus are truly stuck
++ * unable to receive mondo, and we timeout.
++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
++ * perform guest service, such as PCIe error handling. Consider the
++ * service time, 1 second overall wait is reasonable for 1 cpu.
++ * Here two in-between mondo check wait time are defined: 2 usec for
++ * single cpu quick turn around and up to 100usec for large cpu count.
++ * Deliver mondo to large number of cpus could take longer, we adjusts
++ * the retry count as long as target cpus are making forward progress.
++ */
+ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ {
+-      int retries, this_cpu, prev_sent, i, saw_cpu_error;
++      int this_cpu, tot_cpus, prev_sent, i, rem;
++      int usec_wait, retries, tot_retries;
++      u16 first_cpu = 0xffff;
++      unsigned long xc_rcvd = 0;
+       unsigned long status;
++      int ecpuerror_id = 0;
++      int enocpu_id = 0;
+       u16 *cpu_list;
++      u16 cpu;
+ 
+       this_cpu = smp_processor_id();
+-
+       cpu_list = __va(tb->cpu_list_pa);
+-
+-      saw_cpu_error = 0;
+-      retries = 0;
++      usec_wait = cnt * MONDO_USEC_WAIT_MIN;
++      if (usec_wait > MONDO_USEC_WAIT_MAX)
++              usec_wait = MONDO_USEC_WAIT_MAX;
++      retries = tot_retries = 0;
++      tot_cpus = cnt;
+       prev_sent = 0;
++
+       do {
+-              int forward_progress, n_sent;
++              int n_sent, mondo_delivered, target_cpu_busy;
+ 
+               status = sun4v_cpu_mondo_send(cnt,
+                                             tb->cpu_list_pa,
+@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct 
trap_per_cpu *tb, int cnt)
+ 
+               /* HV_EOK means all cpus received the xcall, we're done.  */
+               if (likely(status == HV_EOK))
+-                      break;
++                      goto xcall_done;
++
++              /* If not these non-fatal errors, panic */
++              if (unlikely((status != HV_EWOULDBLOCK) &&
++                      (status != HV_ECPUERROR) &&
++                      (status != HV_ENOCPU)))
++                      goto fatal_errors;
+ 
+               /* First, see if we made any forward progress.
++               *
++               * Go through the cpu_list, count the target cpus that have
++               * received our mondo (n_sent), and those that did not (rem).
++               * Re-pack cpu_list with the cpus remain to be retried in the
++               * front - this simplifies tracking the truly stalled cpus.
+                *
+                * The hypervisor indicates successful sends by setting
+                * cpu list entries to the value 0xffff.
++               *
++               * EWOULDBLOCK means some target cpus did not receive the
++               * mondo and retry usually helps.
++               *
++               * ECPUERROR means at least one target cpu is in error state,
++               * it's usually safe to skip the faulty cpu and retry.
++               *
++               * ENOCPU means one of the target cpu doesn't belong to the
++               * domain, perhaps offlined which is unexpected, but not
++               * fatal and it's okay to skip the offlined cpu.
+                */
++              rem = 0;
+               n_sent = 0;
+               for (i = 0; i < cnt; i++) {
+-                      if (likely(cpu_list[i] == 0xffff))
++                      cpu = cpu_list[i];
++                      if (likely(cpu == 0xffff)) {
+                               n_sent++;
++                      } else if ((status == HV_ECPUERROR) &&
++                              (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
++                              ecpuerror_id = cpu + 1;
++                      } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
++                              enocpu_id = cpu + 1;
++                      } else {
++                              cpu_list[rem++] = cpu;
++                      }
+               }
+ 
+-              forward_progress = 0;
+-              if (n_sent > prev_sent)
+-                      forward_progress = 1;
++              /* No cpu remained, we're done. */
++              if (rem == 0)
++                      break;
+ 
+-              prev_sent = n_sent;
++              /* Otherwise, update the cpu count for retry. */
++              cnt = rem;
+ 
+-              /* If we get a HV_ECPUERROR, then one or more of the cpus
+-               * in the list are in error state.  Use the cpu_state()
+-               * hypervisor call to find out which cpus are in error state.
++              /* Record the overall number of mondos received by the
++               * first of the remaining cpus.
+                */
+-              if (unlikely(status == HV_ECPUERROR)) {
+-                      for (i = 0; i < cnt; i++) {
+-                              long err;
+-                              u16 cpu;
++              if (first_cpu != cpu_list[0]) {
++                      first_cpu = cpu_list[0];
++                      xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
++              }
+ 
+-                              cpu = cpu_list[i];
+-                              if (cpu == 0xffff)
+-                                      continue;
++              /* Was any mondo delivered successfully? */
++              mondo_delivered = (n_sent > prev_sent);
++              prev_sent = n_sent;
+ 
+-                              err = sun4v_cpu_state(cpu);
+-                              if (err == HV_CPU_STATE_ERROR) {
+-                                      saw_cpu_error = (cpu + 1);
+-                                      cpu_list[i] = 0xffff;
+-                              }
+-                      }
+-              } else if (unlikely(status != HV_EWOULDBLOCK))
+-                      goto fatal_mondo_error;
++              /* or, was any target cpu busy processing other mondos? */
++              target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
++              xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ 
+-              /* Don't bother rewriting the CPU list, just leave the
+-               * 0xffff and non-0xffff entries in there and the
+-               * hypervisor will do the right thing.
+-               *
+-               * Only advance timeout state if we didn't make any
+-               * forward progress.
++              /* Retry count is for no progress. If we're making progress,
++               * reset the retry count.
+                */
+-              if (unlikely(!forward_progress)) {
+-                      if (unlikely(++retries > 10000))
+-                              goto fatal_mondo_timeout;
+-
+-                      /* Delay a little bit to let other cpus catch up
+-                       * on their cpu mondo queue work.
+-                       */
+-                      udelay(2 * cnt);
++              if (likely(mondo_delivered || target_cpu_busy)) {
++                      tot_retries += retries;
++                      retries = 0;
++              } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
++                      goto fatal_mondo_timeout;
+               }
+-      } while (1);
+ 
+-      if (unlikely(saw_cpu_error))
+-              goto fatal_mondo_cpu_error;
++              /* Delay a little bit to let other cpus catch up on
++               * their cpu mondo queue work.
++               */
++              if (!mondo_delivered)
++                      udelay(usec_wait);
+ 
+-      return;
++              retries++;
++      } while (1);
+ 
+-fatal_mondo_cpu_error:
+-      printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+-             "(including %d) were in error state\n",
+-             this_cpu, saw_cpu_error - 1);
++xcall_done:
++      if (unlikely(ecpuerror_id > 0)) {
++              pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in 
error state\n",
++                     this_cpu, ecpuerror_id - 1);
++      } else if (unlikely(enocpu_id > 0)) {
++              pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does 
not belong to the domain\n",
++                     this_cpu, enocpu_id - 1);
++      }
+       return;
+ 
++fatal_errors:
++      /* fatal errors include bad alignment, etc */
++      pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) 
mondo_block_pa(%lx)\n",
++             this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
++      panic("Unexpected SUN4V mondo error %lu\n", status);
++
+ fatal_mondo_timeout:
+-      printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+-             " progress after %d retries.\n",
+-             this_cpu, retries);
+-      goto dump_cpu_list_and_out;
+-
+-fatal_mondo_error:
+-      printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+-             this_cpu, status);
+-      printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+-             "mondo_block_pa(%lx)\n",
+-             this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+-
+-dump_cpu_list_and_out:
+-      printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+-      for (i = 0; i < cnt; i++)
+-              printk("%u ", cpu_list[i]);
+-      printk("]\n");
++      /* some cpus being non-responsive to the cpu mondo */
++      pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress 
after %d retries. Total target cpus(%d).\n",
++             this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
++      panic("SUN4V mondo timeout panic\n");
+ }
+ 
+ static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
+diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
+index 559bc5e9c199..34631995859a 100644
+--- a/arch/sparc/kernel/sun4v_ivec.S
++++ b/arch/sparc/kernel/sun4v_ivec.S
+@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
+       ldxa    [%g0] ASI_SCRATCHPAD, %g4
+       sub     %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ 
++      /* Get smp_processor_id() into %g3 */
++      sethi   %hi(trap_block), %g5
++      or      %g5, %lo(trap_block), %g5
++      sub     %g4, %g5, %g3
++      srlx    %g3, TRAP_BLOCK_SZ_SHIFT, %g3
++
++      /* Increment cpu_mondo_counter[smp_processor_id()] */
++      sethi   %hi(cpu_mondo_counter), %g5
++      or      %g5, %lo(cpu_mondo_counter), %g5
++      sllx    %g3, 3, %g3
++      add     %g5, %g3, %g5
++      ldx     [%g5], %g3
++      add     %g3, 1, %g3
++      stx     %g3, [%g5]
++
+       /* Get CPU mondo queue base phys address into %g7.  */
+       ldx     [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+ 
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 196ee5eb4d48..ad31af1dd726 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
+       }
+ }
+ 
++u64 cpu_mondo_counter[NR_CPUS] = {0};
+ struct trap_per_cpu trap_block[NR_CPUS];
+ EXPORT_SYMBOL(trap_block);
+ 
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index 07c0df924960..db872dbfafe9 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -360,6 +360,7 @@ tsb_flush:
+        * %o1: TSB base config pointer
+        * %o2: TSB huge config pointer, or NULL if none
+        * %o3: Hypervisor TSB descriptor physical address
++       * %o4: Secondary context to load, if non-zero
+        *
+        * We have to run this whole thing with interrupts
+        * disabled so that the current cpu doesn't change
+@@ -372,6 +373,17 @@ __tsb_context_switch:
+       rdpr    %pstate, %g1
+       wrpr    %g1, PSTATE_IE, %pstate
+ 
++      brz,pn  %o4, 1f
++       mov    SECONDARY_CONTEXT, %o5
++
++661:  stxa    %o4, [%o5] ASI_DMMU
++      .section .sun4v_1insn_patch, "ax"
++      .word   661b
++      stxa    %o4, [%o5] ASI_MMU
++      .previous
++      flush   %g6
++
++1:
+       TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+ 
+       stx     %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
+diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
+index 54f98706b03b..5a8cb37f0a3b 100644
+--- a/arch/sparc/lib/U3memcpy.S
++++ b/arch/sparc/lib/U3memcpy.S
+@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
+ ENTRY(U3_retl_o2_and_7_plus_GS)
+       and     %o2, 7, %o2
+       retl
+-       add    %o2, GLOBAL_SPARE, %o2
++       add    %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS)
+ ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
+       add     GLOBAL_SPARE, 8, GLOBAL_SPARE
+       and     %o2, 7, %o2
+       retl
+-       add    %o2, GLOBAL_SPARE, %o2
++       add    %o2, GLOBAL_SPARE, %o0
+ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
+ #endif
+ 
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 3c40ebd50f92..fed73f14aa49 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, 
unsigned long tsb_inde
+ }
+ 
+ #ifdef CONFIG_HUGETLB_PAGE
++static void __init add_huge_page_size(unsigned long size)
++{
++      unsigned int order;
++
++      if (size_to_hstate(size))
++              return;
++
++      order = ilog2(size) - PAGE_SHIFT;
++      hugetlb_add_hstate(order);
++}
++
++static int __init hugetlbpage_init(void)
++{
++      add_huge_page_size(1UL << HPAGE_64K_SHIFT);
++      add_huge_page_size(1UL << HPAGE_SHIFT);
++      add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
++      add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
++
++      return 0;
++}
++
++arch_initcall(hugetlbpage_init);
++
+ static int __init setup_hugepagesz(char *string)
+ {
+       unsigned long long hugepage_size;
+@@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string)
+               goto out;
+       }
+ 
+-      hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
++      add_huge_page_size(hugepage_size);
+       rc = 1;
+ 
+ out:
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 17bd2e167e07..df707a8ad311 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -35,6 +35,5 @@ void restore_processor_state(void)
+ {
+       struct mm_struct *mm = current->active_mm;
+ 
+-      load_secondary_context(mm);
+-      tsb_context_switch(mm);
++      tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+ }
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 43e10d6fdbed..44adcde7a0ca 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
+               if (hlist_unhashed(&n.link))
+                       break;
+ 
++              rcu_irq_exit();
++
+               if (!n.halted) {
+                       local_irq_enable();
+                       schedule();
+@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
+                       /*
+                        * We cannot reschedule. So halt.
+                        */
+-                      rcu_irq_exit();
+                       native_safe_halt();
+                       local_irq_disable();
+-                      rcu_irq_enter();
+               }
++
++              rcu_irq_enter();
+       }
+       if (!n.halted)
+               finish_swait(&n.wq, &wait);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a7421b772d0e..56a7fac71439 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3307,6 +3307,10 @@ EXPORT_SYMBOL(blk_finish_plug);
+  */
+ void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+ {
++      /* not support for RQF_PM and ->rpm_status in blk-mq yet */
++      if (q->mq_ops)
++              return;
++
+       q->dev = dev;
+       q->rpm_status = RPM_ACTIVE;
+       pm_runtime_set_autosuspend_delay(q->dev, -1);
+diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
+index 8e61e8640e17..5eaecd40f701 100644
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -35,7 +35,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+ {
+       unsigned int *map = set->mq_map;
+       unsigned int nr_queues = set->nr_hw_queues;
+-      const struct cpumask *online_mask = cpu_online_mask;
+       unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+       cpumask_var_t cpus;
+ 
+@@ -44,7 +43,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+ 
+       cpumask_clear(cpus);
+       nr_cpus = nr_uniq_cpus = 0;
+-      for_each_cpu(i, online_mask) {
++      for_each_present_cpu(i) {
+               nr_cpus++;
+               first_sibling = get_first_sibling(i);
+               if (!cpumask_test_cpu(first_sibling, cpus))
+@@ -54,7 +53,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
+ 
+       queue = 0;
+       for_each_possible_cpu(i) {
+-              if (!cpumask_test_cpu(i, online_mask)) {
++              if (!cpumask_test_cpu(i, cpu_present_mask)) {
+                       map[i] = 0;
+                       continue;
+               }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 958cedaff8b8..7353e0080062 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -37,9 +37,6 @@
+ #include "blk-wbt.h"
+ #include "blk-mq-sched.h"
+ 
+-static DEFINE_MUTEX(all_q_mutex);
+-static LIST_HEAD(all_q_list);
+-
+ static void blk_mq_poll_stats_start(struct request_queue *q);
+ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+ static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
+@@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue 
*q,
+               INIT_LIST_HEAD(&__ctx->rq_list);
+               __ctx->queue = q;
+ 
+-              /* If the cpu isn't online, the cpu is mapped to first hctx */
+-              if (!cpu_online(i))
++              /* If the cpu isn't present, the cpu is mapped to first hctx */
++              if (!cpu_present(i))
+                       continue;
+ 
+               hctx = blk_mq_map_queue(q, i);
+@@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct 
blk_mq_tag_set *set,
+       }
+ }
+ 
+-static void blk_mq_map_swqueue(struct request_queue *q,
+-                             const struct cpumask *online_mask)
++static void blk_mq_map_swqueue(struct request_queue *q)
+ {
+       unsigned int i, hctx_idx;
+       struct blk_mq_hw_ctx *hctx;
+@@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
+       }
+ 
+       /*
+-       * Map software to hardware queues
++       * Map software to hardware queues.
++       *
++       * If the cpu isn't present, the cpu is mapped to first hctx.
+        */
+-      for_each_possible_cpu(i) {
+-              /* If the cpu isn't online, the cpu is mapped to first hctx */
+-              if (!cpumask_test_cpu(i, online_mask))
+-                      continue;
+-
++      for_each_present_cpu(i) {
+               hctx_idx = q->mq_map[i];
+               /* unmapped hw queue can be remapped after CPU topo changed */
+               if (!set->tags[hctx_idx] &&
+@@ -2340,16 +2334,8 @@ struct request_queue 
*blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+               blk_queue_softirq_done(q, set->ops->complete);
+ 
+       blk_mq_init_cpu_queues(q, set->nr_hw_queues);
+-
+-      get_online_cpus();
+-      mutex_lock(&all_q_mutex);
+-
+-      list_add_tail(&q->all_q_node, &all_q_list);
+       blk_mq_add_queue_tag_set(set, q);
+-      blk_mq_map_swqueue(q, cpu_online_mask);
+-
+-      mutex_unlock(&all_q_mutex);
+-      put_online_cpus();
++      blk_mq_map_swqueue(q);
+ 
+       if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
+               int ret;
+@@ -2375,18 +2361,12 @@ void blk_mq_free_queue(struct request_queue *q)
+ {
+       struct blk_mq_tag_set   *set = q->tag_set;
+ 
+-      mutex_lock(&all_q_mutex);
+-      list_del_init(&q->all_q_node);
+-      mutex_unlock(&all_q_mutex);
+-
+       blk_mq_del_queue_tag_set(q);
+-
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
+ }
+ 
+ /* Basically redo blk_mq_init_queue with queue frozen */
+-static void blk_mq_queue_reinit(struct request_queue *q,
+-                              const struct cpumask *online_mask)
++static void blk_mq_queue_reinit(struct request_queue *q)
+ {
+       WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
+ 
+@@ -2399,76 +2379,12 @@ static void blk_mq_queue_reinit(struct request_queue 
*q,
+        * involves free and re-allocate memory, worthy doing?)
+        */
+ 
+-      blk_mq_map_swqueue(q, online_mask);
++      blk_mq_map_swqueue(q);
+ 
+       blk_mq_sysfs_register(q);
+       blk_mq_debugfs_register_hctxs(q);
+ }
+ 
+-/*
+- * New online cpumask which is going to be set in this hotplug event.
+- * Declare this cpumasks as global as cpu-hotplug operation is invoked
+- * one-by-one and dynamically allocating this could result in a failure.
+- */
+-static struct cpumask cpuhp_online_new;
+-
+-static void blk_mq_queue_reinit_work(void)
+-{
+-      struct request_queue *q;
+-
+-      mutex_lock(&all_q_mutex);
+-      /*
+-       * We need to freeze and reinit all existing queues.  Freezing
+-       * involves synchronous wait for an RCU grace period and doing it
+-       * one by one may take a long time.  Start freezing all queues in
+-       * one swoop and then wait for the completions so that freezing can
+-       * take place in parallel.
+-       */
+-      list_for_each_entry(q, &all_q_list, all_q_node)
+-              blk_freeze_queue_start(q);
+-      list_for_each_entry(q, &all_q_list, all_q_node)
+-              blk_mq_freeze_queue_wait(q);
+-
+-      list_for_each_entry(q, &all_q_list, all_q_node)
+-              blk_mq_queue_reinit(q, &cpuhp_online_new);
+-
+-      list_for_each_entry(q, &all_q_list, all_q_node)
+-              blk_mq_unfreeze_queue(q);
+-
+-      mutex_unlock(&all_q_mutex);
+-}
+-
+-static int blk_mq_queue_reinit_dead(unsigned int cpu)
+-{
+-      cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+-      blk_mq_queue_reinit_work();
+-      return 0;
+-}
+-
+-/*
+- * Before hotadded cpu starts handling requests, new mappings must be
+- * established.  Otherwise, these requests in hw queue might never be
+- * dispatched.
+- *
+- * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
+- * for CPU0, and ctx1 for CPU1).
+- *
+- * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
+- * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
+- *
+- * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
+- * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
+- * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
+- * ignored.
+- */
+-static int blk_mq_queue_reinit_prepare(unsigned int cpu)
+-{
+-      cpumask_copy(&cpuhp_online_new, cpu_online_mask);
+-      cpumask_set_cpu(cpu, &cpuhp_online_new);
+-      blk_mq_queue_reinit_work();
+-      return 0;
+-}
+-
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+ {
+       int i;
+@@ -2679,7 +2595,7 @@ static void __blk_mq_update_nr_hw_queues(struct 
blk_mq_tag_set *set,
+       blk_mq_update_queue_map(set);
+       list_for_each_entry(q, &set->tag_list, tag_set_list) {
+               blk_mq_realloc_hw_ctxs(set, q);
+-              blk_mq_queue_reinit(q, cpu_online_mask);
++              blk_mq_queue_reinit(q);
+       }
+ 
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
+@@ -2895,24 +2811,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t 
cookie)
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_poll);
+ 
+-void blk_mq_disable_hotplug(void)
+-{
+-      mutex_lock(&all_q_mutex);
+-}
+-
+-void blk_mq_enable_hotplug(void)
+-{
+-      mutex_unlock(&all_q_mutex);
+-}
+-
+ static int __init blk_mq_init(void)
+ {
+       cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
+                               blk_mq_hctx_notify_dead);
+-
+-      cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
+-                                blk_mq_queue_reinit_prepare,
+-                                blk_mq_queue_reinit_dead);
+       return 0;
+ }
+ subsys_initcall(blk_mq_init);
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index cc67b48e3551..558df56544d2 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, 
struct request *rq,
+                               bool at_head);
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx 
*ctx,
+                               struct list_head *list);
+-/*
+- * CPU hotplug helpers
+- */
+-void blk_mq_enable_hotplug(void);
+-void blk_mq_disable_hotplug(void);
+ 
+ /*
+  * CPU -> queue mappings
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 10347e3d73ad..5bd58bd4ab05 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
+ };
+ 
+ struct lpss_private_data {
++      struct acpi_device *adev;
+       void __iomem *mmio_base;
+       resource_size_t mmio_size;
+       unsigned int fixed_clk_rate;
+@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
+ 
+ static void byt_pwm_setup(struct lpss_private_data *pdata)
+ {
++      struct acpi_device *adev = pdata->adev;
++
++      /* Only call pwm_add_table for the first PWM controller */
++      if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++              return;
++
+       if (!acpi_dev_present("INT33FD", NULL, -1))
+               pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ }
+@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
+ 
+ static void bsw_pwm_setup(struct lpss_private_data *pdata)
+ {
++      struct acpi_device *adev = pdata->adev;
++
++      /* Only call pwm_add_table for the first PWM controller */
++      if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
++              return;
++
+       pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
+ }
+ 
+@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device 
*adev,
+               goto err_out;
+       }
+ 
++      pdata->adev = adev;
+       pdata->dev_desc = dev_desc;
+ 
+       if (dev_desc->setup)
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 49ba9834c715..12d59968020f 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd 
*qc)
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+       if (!sata_pmp_attached(ap)) {
+-              if (likely(devno < ata_link_max_devices(&ap->link)))
++              if (likely(devno >= 0 &&
++                         devno < ata_link_max_devices(&ap->link)))
+                       return &ap->link.device[devno];
+       } else {
+-              if (likely(devno < ap->nr_pmp_links))
++              if (likely(devno >= 0 &&
++                         devno < ap->nr_pmp_links))
+                       return &ap->pmp_link[devno].device[0];
+       }
+ 
+diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c 
b/drivers/clk/sunxi-ng/ccu-sun5i.c
+index 5372bf8be5e6..31d7ffda9aab 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
++++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
+@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
+               .hw.init        = CLK_HW_INIT_PARENTS("cpu",
+                                                     cpu_parents,
+                                                     &ccu_mux_ops,
+-                                                    CLK_IS_CRITICAL),
++                                                    CLK_SET_RATE_PARENT | 
CLK_IS_CRITICAL),
+       }
+ };
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a42a1eea5714..2e96b3d46e0c 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
+ {
+       struct lineevent_state *le = p;
+       struct gpioevent_data ge;
+-      int ret;
++      int ret, level;
+ 
+       ge.timestamp = ktime_get_real_ns();
++      level = gpiod_get_value_cansleep(le->desc);
+ 
+       if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
+           && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+-              int level = gpiod_get_value_cansleep(le->desc);
+-
+               if (level)
+                       /* Emit low-to-high event */
+                       ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+               else
+                       /* Emit high-to-low event */
+                       ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+-      } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
++      } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
+               /* Emit low-to-high event */
+               ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+-      } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
++      } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
+               /* Emit high-to-low event */
+               ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+       } else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h 
b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+index 18fd01f3e4b2..003a131bad47 100644
+--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
++++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+@@ -1,24 +1,25 @@
+-
+ /*
+-***************************************************************************************************
+-*
+-*  Trade secret of Advanced Micro Devices, Inc.
+-*  Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
+-*
+-*  All rights reserved.  This notice is intended as a precaution against 
inadvertent publication and
+-*  does not imply publication or any waiver of confidentiality.  The year 
included in the foregoing
+-*  notice is the year of creation of the work.
+-*
+-***************************************************************************************************
+-*/
+-/**
+-***************************************************************************************************
+-* @brief gfx9 Clearstate Definitions
+-***************************************************************************************************
+-*
+-*   Do not edit! This is a machine-generated file!
+-*
+-*/
++ * Copyright 2017 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
+ 
+ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
+ {
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index c0b1aabf282f..7dbb7cf47986 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct 
amdgpu_device *adev)
+               amdgpu_program_register_sequence(adev,
+                                                pitcairn_mgcg_cgcg_init,
+                                                (const 
u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
++              break;
+       case CHIP_VERDE:
+               amdgpu_program_register_sequence(adev,
+                                                verde_golden_registers,
+@@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct 
amdgpu_device *adev)
+               amdgpu_program_register_sequence(adev,
+                                                oland_mgcg_cgcg_init,
+                                                (const 
u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
++              break;
+       case CHIP_HAINAN:
+               amdgpu_program_register_sequence(adev,
+                                                hainan_golden_registers,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 1d2db5d912b0..f8a977f86ec7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+ 
+       hotspot_x = du->hotspot_x;
+       hotspot_y = du->hotspot_y;
++
++      if (plane->fb) {
++              hotspot_x += plane->fb->hot_x;
++              hotspot_y += plane->fb->hot_y;
++      }
++
+       du->cursor_surface = vps->surf;
+       du->cursor_dmabuf = vps->dmabuf;
+ 
+@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
+               vmw_cursor_update_position(dev_priv, true,
+                                          du->cursor_x + hotspot_x,
+                                          du->cursor_y + hotspot_y);
++
++              du->core_hotspot_x = hotspot_x - du->hotspot_x;
++              du->core_hotspot_y = hotspot_y - du->hotspot_y;
+       } else {
+               DRM_ERROR("Failed to update cursor image\n");
+       }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 0f1219fa8561..28fbc81c6e9e 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -4316,6 +4316,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data 
*data, void *vcpu_info)
+               /* Setting */
+               irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
+               irte->hi.fields.vector = vcpu_pi_info->vector;
++              irte->lo.fields_vapic.ga_log_intr = 1;
+               irte->lo.fields_vapic.guest_mode = 1;
+               irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
+ 
+diff --git a/drivers/media/pci/saa7164/saa7164-bus.c 
b/drivers/media/pci/saa7164/saa7164-bus.c
+index b2ff82fa7116..ecfeac5cdbed 100644
+--- a/drivers/media/pci/saa7164/saa7164-bus.c
++++ b/drivers/media/pci/saa7164/saa7164-bus.c
+@@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+       msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
+       msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
+       msg_tmp.controlselector = le16_to_cpu((__force 
__le16)msg_tmp.controlselector);
++      memcpy(msg, &msg_tmp, sizeof(*msg));
+ 
+       /* No need to update the read positions, because this was a peek */
+       /* If the caller specifically want to peek, return */
+       if (peekonly) {
+-              memcpy(msg, &msg_tmp, sizeof(*msg));
+               goto peekout;
+       }
+ 
+@@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+               space_rem = bus->m_dwSizeGetRing - curr_grp;
+ 
+               if (space_rem < sizeof(*msg)) {
+-                      /* msg wraps around the ring */
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
space_rem);
+-                      memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
+-                              sizeof(*msg) - space_rem);
+                       if (buf)
+                               memcpy_fromio(buf, bus->m_pdwGetRing + 
sizeof(*msg) -
+                                       space_rem, buf_size);
+ 
+               } else if (space_rem == sizeof(*msg)) {
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
sizeof(*msg));
+                       if (buf)
+                               memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
+               } else {
+                       /* Additional data wraps around the ring */
+-                      memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, 
sizeof(*msg));
+                       if (buf) {
+                               memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp 
+
+                                       sizeof(*msg), space_rem - sizeof(*msg));
+@@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct 
tmComResInfo* msg,
+ 
+       } else {
+               /* No wrapping */
+-              memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+               if (buf)
+                       memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + 
sizeof(*msg),
+                               buf_size);
+       }
+-      /* Convert from little endian to CPU */
+-      msg->size = le16_to_cpu((__force __le16)msg->size);
+-      msg->command = le32_to_cpu((__force __le32)msg->command);
+-      msg->controlselector = le16_to_cpu((__force 
__le16)msg->controlselector);
+ 
+       /* Update the read positions, adjusting the ring */
+       saa7164_writel(bus->m_dwGetReadPos, new_grp);
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c 
b/drivers/media/platform/davinci/vpfe_capture.c
+index e3fe3e0635aa..1831bf5ccca5 100644
+--- a/drivers/media/platform/davinci/vpfe_capture.c
++++ b/drivers/media/platform/davinci/vpfe_capture.c
+@@ -1719,27 +1719,9 @@ static long vpfe_param_handler(struct file *file, void 
*priv,
+ 
+       switch (cmd) {
+       case VPFE_CMD_S_CCDC_RAW_PARAMS:
++              ret = -EINVAL;
+               v4l2_warn(&vpfe_dev->v4l2_dev,
+-                        "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+-              if (ccdc_dev->hw_ops.set_params) {
+-                      ret = ccdc_dev->hw_ops.set_params(param);
+-                      if (ret) {
+-                              v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                                      "Error setting parameters in CCDC\n");
+-                              goto unlock_out;
+-                      }
+-                      ret = vpfe_get_ccdc_image_format(vpfe_dev,
+-                                                       &vpfe_dev->fmt);
+-                      if (ret < 0) {
+-                              v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                                      "Invalid image format at CCDC\n");
+-                              goto unlock_out;
+-                      }
+-              } else {
+-                      ret = -EINVAL;
+-                      v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-                              "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+-              }
++                      "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+               break;
+       default:
+               ret = -ENOTTY;
+diff --git a/drivers/media/rc/ir-lirc-codec.c 
b/drivers/media/rc/ir-lirc-codec.c
+index de85f1d7ce43..c01b655571a2 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int 
cmd,
+               if (!dev->rx_resolution)
+                       return -ENOTTY;
+ 
+-              val = dev->rx_resolution;
++              val = dev->rx_resolution / 1000;
+               break;
+ 
+       case LIRC_SET_WIDEBAND_RECEIVER:
+diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
+index c8863f36686a..f39cf8cb639f 100644
+--- a/drivers/media/rc/ir-spi.c
++++ b/drivers/media/rc/ir-spi.c
+@@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev,
+ 
+       /* convert the pulse/space signal to raw binary signal */
+       for (i = 0; i < count; i++) {
++              unsigned int periods;
+               int j;
+               u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
+ 
+-              if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE)
++              periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
++
++              if (len + periods >= IR_SPI_MAX_BUFSIZE)
+                       return -EINVAL;
+ 
+               /*
+@@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev,
+                * contain a space duration.
+                */
+               val = (i % 2) ? idata->space : idata->pulse;
+-              for (j = 0; j < buffer[i]; j++)
++              for (j = 0; j < periods; j++)
+                       idata->tx_buf[len++] = val;
+       }
+ 
+       memset(&xfer, 0, sizeof(xfer));
+ 
+-      xfer.speed_hz = idata->freq;
++      xfer.speed_hz = idata->freq * 16;
+       xfer.len = len * sizeof(*idata->tx_buf);
+       xfer.tx_buf = idata->tx_buf;
+ 
+diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c 
b/drivers/media/usb/pulse8-cec/pulse8-cec.c
+index 1dfc2de1fe77..4767f4341ba9 100644
+--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
++++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
+@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
+ MODULE_LICENSE("GPL");
+ 
+ static int debug;
+-static int persistent_config = 1;
++static int persistent_config;
+ module_param(debug, int, 0644);
+ module_param(persistent_config, int, 0644);
+ MODULE_PARM_DESC(debug, "debug level (0-1)");
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 3f8c85d5aa09..88fa03142e92 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned long data)
+  */
+ int mmc_of_parse(struct mmc_host *host)
+ {
+-      struct device_node *np;
++      struct device *dev = host->parent;
+       u32 bus_width;
+       int ret;
+       bool cd_cap_invert, cd_gpio_invert = false;
+       bool ro_cap_invert, ro_gpio_invert = false;
+ 
+-      if (!host->parent || !host->parent->of_node)
++      if (!dev || !dev_fwnode(dev))
+               return 0;
+ 
+-      np = host->parent->of_node;
+-
+       /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
+-      if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
++      if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
+               dev_dbg(host->parent,
+                       "\"bus-width\" property is missing, assuming 1 bit.\n");
+               bus_width = 1;
+@@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+ 
+       /* f_max is obtained from the optional "max-frequency" property */
+-      of_property_read_u32(np, "max-frequency", &host->f_max);
++      device_property_read_u32(dev, "max-frequency", &host->f_max);
+ 
+       /*
+        * Configure CD and WP pins. They are both by default active low to
+@@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host)
+        */
+ 
+       /* Parse Card Detection */
+-      if (of_property_read_bool(np, "non-removable")) {
++      if (device_property_read_bool(dev, "non-removable")) {
+               host->caps |= MMC_CAP_NONREMOVABLE;
+       } else {
+-              cd_cap_invert = of_property_read_bool(np, "cd-inverted");
++              cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
+ 
+-              if (of_property_read_bool(np, "broken-cd"))
++              if (device_property_read_bool(dev, "broken-cd"))
+                       host->caps |= MMC_CAP_NEEDS_POLL;
+ 
+               ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+@@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host)
+       }
+ 
+       /* Parse Write Protection */
+-      ro_cap_invert = of_property_read_bool(np, "wp-inverted");
++      ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
+ 
+       ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+       if (!ret)
+@@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host)
+       else if (ret != -ENOENT && ret != -ENOSYS)
+               return ret;
+ 
+-      if (of_property_read_bool(np, "disable-wp"))
++      if (device_property_read_bool(dev, "disable-wp"))
+               host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+ 
+       /* See the comment on CD inversion above */
+       if (ro_cap_invert ^ ro_gpio_invert)
+               host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ 
+-      if (of_property_read_bool(np, "cap-sd-highspeed"))
++      if (device_property_read_bool(dev, "cap-sd-highspeed"))
+               host->caps |= MMC_CAP_SD_HIGHSPEED;
+-      if (of_property_read_bool(np, "cap-mmc-highspeed"))
++      if (device_property_read_bool(dev, "cap-mmc-highspeed"))
+               host->caps |= MMC_CAP_MMC_HIGHSPEED;
+-      if (of_property_read_bool(np, "sd-uhs-sdr12"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr12"))
+               host->caps |= MMC_CAP_UHS_SDR12;
+-      if (of_property_read_bool(np, "sd-uhs-sdr25"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr25"))
+               host->caps |= MMC_CAP_UHS_SDR25;
+-      if (of_property_read_bool(np, "sd-uhs-sdr50"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr50"))
+               host->caps |= MMC_CAP_UHS_SDR50;
+-      if (of_property_read_bool(np, "sd-uhs-sdr104"))
++      if (device_property_read_bool(dev, "sd-uhs-sdr104"))
+               host->caps |= MMC_CAP_UHS_SDR104;
+-      if (of_property_read_bool(np, "sd-uhs-ddr50"))
++      if (device_property_read_bool(dev, "sd-uhs-ddr50"))
+               host->caps |= MMC_CAP_UHS_DDR50;
+-      if (of_property_read_bool(np, "cap-power-off-card"))
++      if (device_property_read_bool(dev, "cap-power-off-card"))
+               host->caps |= MMC_CAP_POWER_OFF_CARD;
+-      if (of_property_read_bool(np, "cap-mmc-hw-reset"))
++      if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
+               host->caps |= MMC_CAP_HW_RESET;
+-      if (of_property_read_bool(np, "cap-sdio-irq"))
++      if (device_property_read_bool(dev, "cap-sdio-irq"))
+               host->caps |= MMC_CAP_SDIO_IRQ;
+-      if (of_property_read_bool(np, "full-pwr-cycle"))
++      if (device_property_read_bool(dev, "full-pwr-cycle"))
+               host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
+-      if (of_property_read_bool(np, "keep-power-in-suspend"))
++      if (device_property_read_bool(dev, "keep-power-in-suspend"))
+               host->pm_caps |= MMC_PM_KEEP_POWER;
+-      if (of_property_read_bool(np, "wakeup-source") ||
+-          of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
++      if (device_property_read_bool(dev, "wakeup-source") ||
++          device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+               host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+-      if (of_property_read_bool(np, "mmc-ddr-3_3v"))
++      if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
+               host->caps |= MMC_CAP_3_3V_DDR;
+-      if (of_property_read_bool(np, "mmc-ddr-1_8v"))
++      if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
+               host->caps |= MMC_CAP_1_8V_DDR;
+-      if (of_property_read_bool(np, "mmc-ddr-1_2v"))
++      if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
+               host->caps |= MMC_CAP_1_2V_DDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_8v"))
++      if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
+               host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs200-1_2v"))
++      if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
+               host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_8v"))
++      if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
+               host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-1_2v"))
++      if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
+               host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+-      if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
++      if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
+               host->caps2 |= MMC_CAP2_HS400_ES;
+-      if (of_property_read_bool(np, "no-sdio"))
++      if (device_property_read_bool(dev, "no-sdio"))
+               host->caps2 |= MMC_CAP2_NO_SDIO;
+-      if (of_property_read_bool(np, "no-sd"))
++      if (device_property_read_bool(dev, "no-sd"))
+               host->caps2 |= MMC_CAP2_NO_SD;
+-      if (of_property_read_bool(np, "no-mmc"))
++      if (device_property_read_bool(dev, "no-mmc"))
+               host->caps2 |= MMC_CAP2_NO_MMC;
+ 
+-      host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
++      host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
+       if (host->dsr_req && (host->dsr & ~0xffff)) {
+               dev_err(host->parent,
+                       "device tree specified broken value for DSR: 0x%x, 
ignoring\n",
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index e45129f48174..efde0f20dd24 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mci *host, 
unsigned int id)
+       host->slot[id] = slot;
+ 
+       mmc->ops = &dw_mci_ops;
+-      if (of_property_read_u32_array(host->dev->of_node,
+-                                     "clock-freq-min-max", freq, 2)) {
++      if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
++                                         freq, 2)) {
+               mmc->f_min = DW_MCI_FREQ_MIN;
+               mmc->f_max = DW_MCI_FREQ_MAX;
+       } else {
+@@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
+ {
+       int addr_config;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+ 
+       /*
+       * Check tansfer mode from HCON[17:16]
+@@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
+               dev_info(host->dev, "Using internal DMA controller.\n");
+       } else {
+               /* TRANS_MODE_EDMAC: check dma bindings again */
+-              if ((of_property_count_strings(np, "dma-names") < 0) ||
+-                  (!of_find_property(np, "dmas", NULL))) {
++              if ((device_property_read_string_array(dev, "dma-names",
++                                                     NULL, 0) < 0) ||
++                  !device_property_present(dev, "dmas")) {
+                       goto no_dma;
+               }
+               host->dma_ops = &dw_mci_edmac_ops;
+@@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct 
dw_mci *host)
+ {
+       struct dw_mci_board *pdata;
+       struct device *dev = host->dev;
+-      struct device_node *np = dev->of_node;
+       const struct dw_mci_drv_data *drv_data = host->drv_data;
+       int ret;
+       u32 clock_frequency;
+@@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse_dt(struct 
dw_mci *host)
+       }
+ 
+       /* find out number of slots supported */
+-      of_property_read_u32(np, "num-slots", &pdata->num_slots);
++      device_property_read_u32(dev, "num-slots", &pdata->num_slots);
+ 
+-      if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
++      if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
+               dev_info(dev,
+                        "fifo-depth property not found, using value of FIFOTH 
register as default\n");
+ 
+-      of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
++      device_property_read_u32(dev, "card-detect-delay",
++                               &pdata->detect_delay_ms);
+ 
+-      of_property_read_u32(np, "data-addr", &host->data_addr_override);
++      device_property_read_u32(dev, "data-addr", &host->data_addr_override);
+ 
+-      if (of_get_property(np, "fifo-watermark-aligned", NULL))
++      if (device_property_present(dev, "fifo-watermark-aligned"))
+               host->wm_aligned = true;
+ 
+-      if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
++      if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
+               pdata->bus_hz = clock_frequency;
+ 
+       if (drv_data && drv_data->parse_dt) {
+diff --git a/drivers/mmc/host/sdhci-of-at91.c 
b/drivers/mmc/host/sdhci-of-at91.c
+index 7611fd679f1a..1485530c3592 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -31,6 +31,7 @@
+ 
+ #define SDMMC_MC1R    0x204
+ #define               SDMMC_MC1R_DDR          BIT(3)
++#define               SDMMC_MC1R_FCD          BIT(7)
+ #define SDMMC_CACR    0x230
+ #define               SDMMC_CACR_CAPWREN      BIT(0)
+ #define               SDMMC_CACR_KEY          (0x46 << 8)
+@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
+       struct clk *mainck;
+ };
+ 
++static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
++{
++      u8 mc1r;
++
++      mc1r = readb(host->ioaddr + SDMMC_MC1R);
++      mc1r |= SDMMC_MC1R_FCD;
++      writeb(mc1r, host->ioaddr + SDMMC_MC1R);
++}
++
+ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+ {
+       u16 clk;
+@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host 
*host, unsigned int timing)
+       sdhci_set_uhs_signaling(host, timing);
+ }
+ 
++static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
++{
++      sdhci_reset(host, mask);
++
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++}
++
+ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
+       .set_clock              = sdhci_at91_set_clock,
+       .set_bus_width          = sdhci_set_bus_width,
+-      .reset                  = sdhci_reset,
++      .reset                  = sdhci_at91_reset,
+       .set_uhs_signaling      = sdhci_at91_set_uhs_signaling,
+       .set_power              = sdhci_at91_set_power,
+ };
+@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+       }
+ 
++      /*
++       * If the device attached to the MMC bus is not removable, it is safer
++       * to set the Force Card Detect bit. People often don't connect the
++       * card detect signal and use this pin for another purpose. If the card
++       * detect pin is not muxed to SDHCI controller, a default value is
++       * used. This value can be different from a SoC revision to another
++       * one. Problems come when this default value is not card present. To
++       * avoid this case, if the device is non removable then the card
++       * detection procedure using the SDMCC_CD signal is bypassed.
++       * This bit is reset when a software reset for all command is performed
++       * so we need to implement our own reset function to set back this bit.
++       */
++      if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
++              sdhci_at91_set_force_card_detect(host);
++
+       pm_runtime_put_autosuspend(&pdev->dev);
+ 
+       return 0;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 8ab6bdbe1682..224e93aa6d23 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2047,6 +2047,7 @@ static int bond_miimon_inspect(struct bonding *bond)
+                               continue;
+ 
+                       bond_propose_link_state(slave, BOND_LINK_FAIL);
++                      commit++;
+                       slave->delay = bond->params.downdelay;
+                       if (slave->delay) {
+                               netdev_info(bond->dev, "link status down for 
%sinterface %s, disabling it in %d ms\n",
+@@ -2085,6 +2086,7 @@ static int bond_miimon_inspect(struct bonding *bond)
+                               continue;
+ 
+                       bond_propose_link_state(slave, BOND_LINK_BACK);
++                      commit++;
+                       slave->delay = bond->params.updelay;
+ 
+                       if (slave->delay) {
+@@ -4598,7 +4600,7 @@ static int bond_check_params(struct bond_params *params)
+       }
+       ad_user_port_key = valptr->value;
+ 
+-      if (bond_mode == BOND_MODE_TLB) {
++      if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
+               bond_opt_initstr(&newval, "default");
+               valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+                                       &newval);
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index fa0eece21eef..d9cc94a7d44e 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1668,6 +1668,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
+               .dev_name = "BCM53125",
+               .vlans = 4096,
+               .enabled_ports = 0xff,
++              .arl_entries = 4,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index d034d8cd7d22..32864a47c4c1 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3377,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
+       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+       .port_pause_config = mv88e6390_port_pause_config,
++      .port_set_cmode = mv88e6390x_port_set_cmode,
+       .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+       .stats_snapshot = mv88e6390_g1_stats_snapshot,
+diff --git a/drivers/net/ethernet/aurora/nb8800.c 
b/drivers/net/ethernet/aurora/nb8800.c
+index 5711fbbd6ae3..878cffd37e1f 100644
+--- a/drivers/net/ethernet/aurora/nb8800.c
++++ b/drivers/net/ethernet/aurora/nb8800.c
+@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
+               mac_mode |= HALF_DUPLEX;
+ 
+       if (gigabit) {
+-              if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
++              if (phy_interface_is_rgmii(dev->phydev))
+                       mac_mode |= RGMII_MODE;
+ 
+               mac_mode |= GMAC_MODE;
+@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
+               break;
+ 
+       case PHY_INTERFACE_MODE_RGMII:
+-              pad_mode = PAD_MODE_RGMII;
+-              break;
+-
++      case PHY_INTERFACE_MODE_RGMII_ID:
++      case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+-              pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
++              pad_mode = PAD_MODE_RGMII;
+               break;
+ 
+       default:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c 
b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 10d282841f5b..ac0a460c006a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -777,6 +777,10 @@ static void cb_timeout_handler(struct work_struct *work)
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+ }
+ 
++static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
++static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
++                            struct mlx5_cmd_msg *msg);
++
+ static void cmd_work_handler(struct work_struct *work)
+ {
+       struct mlx5_cmd_work_ent *ent = container_of(work, struct 
mlx5_cmd_work_ent, work);
+@@ -786,16 +790,27 @@ static void cmd_work_handler(struct work_struct *work)
+       struct mlx5_cmd_layout *lay;
+       struct semaphore *sem;
+       unsigned long flags;
++      int alloc_ret;
+ 
+       sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+       down(sem);
+       if (!ent->page_queue) {
+-              ent->idx = alloc_ent(cmd);
+-              if (ent->idx < 0) {
++              alloc_ret = alloc_ent(cmd);
++              if (alloc_ret < 0) {
+                       mlx5_core_err(dev, "failed to allocate command 
entry\n");
++                      if (ent->callback) {
++                              ent->callback(-EAGAIN, ent->context);
++                              mlx5_free_cmd_msg(dev, ent->out);
++                              free_msg(dev, ent->in);
++                              free_cmd(ent);
++                      } else {
++                              ent->ret = -EAGAIN;
++                              complete(&ent->done);
++                      }
+                       up(sem);
+                       return;
+               }
++              ent->idx = alloc_ret;
+       } else {
+               ent->idx = cmd->max_reg_cmds;
+               spin_lock_irqsave(&cmd->alloc_lock, flags);
+@@ -955,7 +970,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, 
struct mlx5_cmd_msg *in,
+ 
+       err = wait_func(dev, ent);
+       if (err == -ETIMEDOUT)
+-              goto out_free;
++              goto out;
+ 
+       ds = ent->ts2 - ent->ts1;
+       op = MLX5_GET(mbox_in, in->first.data, opcode);
+@@ -1419,6 +1434,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, 
u64 vec, bool forced)
+                                       mlx5_core_err(dev, "Command completion 
arrived after timeout (entry idx = %d).\n",
+                                                     ent->idx);
+                                       free_ent(cmd, ent->idx);
++                                      free_cmd(ent);
+                               }
+                               continue;
+                       }
+@@ -1477,7 +1493,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, 
u64 vec, bool forced)
+                               free_msg(dev, ent->in);
+ 
+                               err = err ? err : ent->status;
+-                              free_cmd(ent);
++                              if (!forced)
++                                      free_cmd(ent);
+                               callback(err, context);
+                       } else {
+                               complete(&ent->done);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 944fc1742464..3b39dbd97e57 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -261,6 +261,14 @@ struct mlx5e_dcbx {
+ };
+ #endif
+ 
++#define MAX_PIN_NUM   8
++struct mlx5e_pps {
++      u8                         pin_caps[MAX_PIN_NUM];
++      struct work_struct         out_work;
++      u64                        start[MAX_PIN_NUM];
++      u8                         enabled;
++};
++
+ struct mlx5e_tstamp {
+       rwlock_t                   lock;
+       struct cyclecounter        cycles;
+@@ -272,7 +280,7 @@ struct mlx5e_tstamp {
+       struct mlx5_core_dev      *mdev;
+       struct ptp_clock          *ptp;
+       struct ptp_clock_info      ptp_info;
+-      u8                        *pps_pin_caps;
++      struct mlx5e_pps           pps_info;
+ };
+ 
+ enum {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+index e706a87fc8b2..80c500f87ab6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+@@ -53,6 +53,15 @@ enum {
+       MLX5E_EVENT_MODE_ONCE_TILL_ARM  = 0x2,
+ };
+ 
++enum {
++      MLX5E_MTPPS_FS_ENABLE                   = BIT(0x0),
++      MLX5E_MTPPS_FS_PATTERN                  = BIT(0x2),
++      MLX5E_MTPPS_FS_PIN_MODE                 = BIT(0x3),
++      MLX5E_MTPPS_FS_TIME_STAMP               = BIT(0x4),
++      MLX5E_MTPPS_FS_OUT_PULSE_DURATION       = BIT(0x5),
++      MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ          = BIT(0x7),
++};
++
+ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
+                       struct skb_shared_hwtstamps *hwts)
+ {
+@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct 
cyclecounter *cc)
+       return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
+ }
+ 
++static void mlx5e_pps_out(struct work_struct *work)
++{
++      struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
++                                                out_work);
++      struct mlx5e_tstamp *tstamp = container_of(pps_info, struct 
mlx5e_tstamp,
++                                                 pps_info);
++      u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
++      unsigned long flags;
++      int i;
++
++      for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
++              u64 tstart;
++
++              write_lock_irqsave(&tstamp->lock, flags);
++              tstart = tstamp->pps_info.start[i];
++              tstamp->pps_info.start[i] = 0;
++              write_unlock_irqrestore(&tstamp->lock, flags);
++              if (!tstart)
++                      continue;
++
++              MLX5_SET(mtpps_reg, in, pin, i);
++              MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
++              MLX5_SET(mtpps_reg, in, field_select, 
MLX5E_MTPPS_FS_TIME_STAMP);
++              mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
++      }
++}
++
+ static void mlx5e_timestamp_overflow(struct work_struct *work)
+ {
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
+                                                  overflow_work);
++      struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, 
tstamp);
+       unsigned long flags;
+ 
+       write_lock_irqsave(&tstamp->lock, flags);
+       timecounter_read(&tstamp->clock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
+-      schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
++      queue_delayed_work(priv->wq, &tstamp->overflow_work,
++                         msecs_to_jiffies(tstamp->overflow_period * 1000));
+ }
+ 
+ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+@@ -214,18 +252,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, 
s32 delta)
+       int neg_adj = 0;
+       struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+                                                 ptp_info);
+-      struct mlx5e_priv *priv =
+-              container_of(tstamp, struct mlx5e_priv, tstamp);
+-
+-      if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
+-              u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+-
+-              /* For future use need to add a loop for finding all 1PPS out 
pins */
+-              MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+-              MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 
0xFFFF);
+-
+-              mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+-      }
+ 
+       if (delta < 0) {
+               neg_adj = 1;
+@@ -254,12 +280,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info 
*ptp,
+       struct mlx5e_priv *priv =
+               container_of(tstamp, struct mlx5e_priv, tstamp);
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
++      u32 field_select = 0;
++      u8 pin_mode = 0;
+       u8 pattern = 0;
+       int pin = -1;
+       int err = 0;
+ 
+-      if (!MLX5_CAP_GEN(priv->mdev, pps) ||
+-          !MLX5_CAP_GEN(priv->mdev, pps_modify))
++      if (!MLX5_PPS_CAP(priv->mdev))
+               return -EOPNOTSUPP;
+ 
+       if (rq->extts.index >= tstamp->ptp_info.n_pins)
+@@ -269,15 +296,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info 
*ptp,
+               pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
+               if (pin < 0)
+                       return -EBUSY;
++              pin_mode = MLX5E_PIN_MODE_IN;
++              pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
++              field_select = MLX5E_MTPPS_FS_PIN_MODE |
++                             MLX5E_MTPPS_FS_PATTERN |
++                             MLX5E_MTPPS_FS_ENABLE;
++      } else {
++              pin = rq->extts.index;
++              field_select = MLX5E_MTPPS_FS_ENABLE;
+       }
+ 
+-      if (rq->extts.flags & PTP_FALLING_EDGE)
+-              pattern = 1;
+-
+       MLX5_SET(mtpps_reg, in, pin, pin);
+-      MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
++      MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+       MLX5_SET(mtpps_reg, in, pattern, pattern);
+       MLX5_SET(mtpps_reg, in, enable, on);
++      MLX5_SET(mtpps_reg, in, field_select, field_select);
+ 
+       err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+       if (err)
+@@ -296,14 +329,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info 
*ptp,
+       struct mlx5e_priv *priv =
+               container_of(tstamp, struct mlx5e_priv, tstamp);
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+-      u64 nsec_now, nsec_delta, time_stamp;
++      u64 nsec_now, nsec_delta, time_stamp = 0;
+       u64 cycles_now, cycles_delta;
+       struct timespec64 ts;
+       unsigned long flags;
++      u32 field_select = 0;
++      u8 pin_mode = 0;
++      u8 pattern = 0;
+       int pin = -1;
++      int err = 0;
+       s64 ns;
+ 
+-      if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
++      if (!MLX5_PPS_CAP(priv->mdev))
+               return -EOPNOTSUPP;
+ 
+       if (rq->perout.index >= tstamp->ptp_info.n_pins)
+@@ -314,32 +351,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info 
*ptp,
+                                  rq->perout.index);
+               if (pin < 0)
+                       return -EBUSY;
+-      }
+ 
+-      ts.tv_sec = rq->perout.period.sec;
+-      ts.tv_nsec = rq->perout.period.nsec;
+-      ns = timespec64_to_ns(&ts);
+-      if (on)
++              pin_mode = MLX5E_PIN_MODE_OUT;
++              pattern = MLX5E_OUT_PATTERN_PERIODIC;
++              ts.tv_sec = rq->perout.period.sec;
++              ts.tv_nsec = rq->perout.period.nsec;
++              ns = timespec64_to_ns(&ts);
++
+               if ((ns >> 1) != 500000000LL)
+                       return -EINVAL;
+-      ts.tv_sec = rq->perout.start.sec;
+-      ts.tv_nsec = rq->perout.start.nsec;
+-      ns = timespec64_to_ns(&ts);
+-      cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+-      write_lock_irqsave(&tstamp->lock, flags);
+-      nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+-      nsec_delta = ns - nsec_now;
+-      cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+-                               tstamp->cycles.mult);
+-      write_unlock_irqrestore(&tstamp->lock, flags);
+-      time_stamp = cycles_now + cycles_delta;
++
++              ts.tv_sec = rq->perout.start.sec;
++              ts.tv_nsec = rq->perout.start.nsec;
++              ns = timespec64_to_ns(&ts);
++              cycles_now = mlx5_read_internal_timer(tstamp->mdev);
++              write_lock_irqsave(&tstamp->lock, flags);
++              nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
++              nsec_delta = ns - nsec_now;
++              cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
++                                       tstamp->cycles.mult);
++              write_unlock_irqrestore(&tstamp->lock, flags);
++              time_stamp = cycles_now + cycles_delta;
++              field_select = MLX5E_MTPPS_FS_PIN_MODE |
++                             MLX5E_MTPPS_FS_PATTERN |
++                             MLX5E_MTPPS_FS_ENABLE |
++                             MLX5E_MTPPS_FS_TIME_STAMP;
++      } else {
++              pin = rq->perout.index;
++              field_select = MLX5E_MTPPS_FS_ENABLE;
++      }
++
+       MLX5_SET(mtpps_reg, in, pin, pin);
+-      MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+-      MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
++      MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
++      MLX5_SET(mtpps_reg, in, pattern, pattern);
+       MLX5_SET(mtpps_reg, in, enable, on);
+       MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
++      MLX5_SET(mtpps_reg, in, field_select, field_select);
++
++      err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
++      if (err)
++              return err;
+ 
+-      return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
++      return mlx5_set_mtppse(priv->mdev, pin, 0,
++                             MLX5E_EVENT_MODE_REPETETIVE & on);
++}
++
++static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
++                             struct ptp_clock_request *rq,
++                             int on)
++{
++      struct mlx5e_tstamp *tstamp =
++              container_of(ptp, struct mlx5e_tstamp, ptp_info);
++
++      tstamp->pps_info.enabled = !!on;
++      return 0;
+ }
+ 
+ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+@@ -351,6 +416,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+               return mlx5e_extts_configure(ptp, rq, on);
+       case PTP_CLK_REQ_PEROUT:
+               return mlx5e_perout_configure(ptp, rq, on);
++      case PTP_CLK_REQ_PPS:
++              return mlx5e_pps_configure(ptp, rq, on);
+       default:
+               return -EOPNOTSUPP;
+       }
+@@ -396,6 +463,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp 
*tstamp)
+               return -ENOMEM;
+       tstamp->ptp_info.enable = mlx5e_ptp_enable;
+       tstamp->ptp_info.verify = mlx5e_ptp_verify;
++      tstamp->ptp_info.pps = 1;
+ 
+       for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+               snprintf(tstamp->ptp_info.pin_config[i].name,
+@@ -423,22 +491,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
+       tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+                                             cap_max_num_of_pps_out_pins);
+ 
+-      tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+-      tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+-      tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+-      tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+-      tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+-      tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+-      tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+-      tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
++      tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
++      tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
++      tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
++      tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
++      tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
++      tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
++      tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
++      tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+ }
+ 
+ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+                            struct ptp_clock_event *event)
+ {
++      struct net_device *netdev = priv->netdev;
+       struct mlx5e_tstamp *tstamp = &priv->tstamp;
++      struct timespec64 ts;
++      u64 nsec_now, nsec_delta;
++      u64 cycles_now, cycles_delta;
++      int pin = event->index;
++      s64 ns;
++      unsigned long flags;
+ 
+-      ptp_clock_event(tstamp->ptp, event);
++      switch (tstamp->ptp_info.pin_config[pin].func) {
++      case PTP_PF_EXTTS:
++              if (tstamp->pps_info.enabled) {
++                      event->type = PTP_CLOCK_PPSUSR;
++                      event->pps_times.ts_real = 
ns_to_timespec64(event->timestamp);
++              } else {
++                      event->type = PTP_CLOCK_EXTTS;
++              }
++              ptp_clock_event(tstamp->ptp, event);
++              break;
++      case PTP_PF_PEROUT:
++              mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
++              cycles_now = mlx5_read_internal_timer(tstamp->mdev);
++              ts.tv_sec += 1;
++              ts.tv_nsec = 0;
++              ns = timespec64_to_ns(&ts);
++              write_lock_irqsave(&tstamp->lock, flags);
++              nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
++              nsec_delta = ns - nsec_now;
++              cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
++                                       tstamp->cycles.mult);
++              tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
++              queue_work(priv->wq, &tstamp->pps_info.out_work);
++              write_unlock_irqrestore(&tstamp->lock, flags);
++              break;
++      default:
++              netdev_err(netdev, "%s: Unhandled event\n", __func__);
++      }
+ }
+ 
+ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+@@ -474,9 +576,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+       do_div(ns, NSEC_PER_SEC / 2 / HZ);
+       tstamp->overflow_period = ns;
+ 
++      INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
+       INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
+       if (tstamp->overflow_period)
+-              schedule_delayed_work(&tstamp->overflow_work, 0);
++              queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
+       else
+               mlx5_core_warn(priv->mdev, "invalid overflow period, 
overflow_work is not scheduled\n");
+ 
+@@ -485,16 +588,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+       snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
+ 
+       /* Initialize 1PPS data structures */
+-#define MAX_PIN_NUM   8
+-      tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
+-      if (tstamp->pps_pin_caps) {
+-              if (MLX5_CAP_GEN(priv->mdev, pps))
+-                      mlx5e_get_pps_caps(priv, tstamp);
+-              if (tstamp->ptp_info.n_pins)
+-                      mlx5e_init_pin_config(tstamp);
+-      } else {
+-              mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
+-      }
++      if (MLX5_PPS_CAP(priv->mdev))
++              mlx5e_get_pps_caps(priv, tstamp);
++      if (tstamp->ptp_info.n_pins)
++              mlx5e_init_pin_config(tstamp);
+ 
+       tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
+                                        &priv->mdev->pdev->dev);
+@@ -517,8 +614,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
+               priv->tstamp.ptp = NULL;
+       }
+ 
+-      kfree(tstamp->pps_pin_caps);
+-      kfree(tstamp->ptp_info.pin_config);
+-
++      cancel_work_sync(&tstamp->pps_info.out_work);
+       cancel_delayed_work_sync(&tstamp->overflow_work);
++      kfree(tstamp->ptp_info.pin_config);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index 85bf4a389295..986387de13ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
+ 
+ static bool outer_header_zero(u32 *match_criteria)
+ {
+-      int size = MLX5_ST_SZ_BYTES(fte_match_param);
++      int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
+       char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                            outer_headers);
+ 
+@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ 
+       spec->match_criteria_enable = 
(!outer_header_zero(spec->match_criteria));
+       flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+-      rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
++      rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               netdev_err(priv->netdev, "%s: failed to add ethtool steering 
rule: %d\n",
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 7819fe9ede22..072aa8a13a0a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -365,7 +365,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, 
void *vpriv,
+               break;
+       case MLX5_DEV_EVENT_PPS:
+               eqe = (struct mlx5_eqe *)param;
+-              ptp_event.type = PTP_CLOCK_EXTTS;
+               ptp_event.index = eqe->data.pps.pin;
+               ptp_event.timestamp =
+                       timecounter_cyc2time(&priv->tstamp.clock,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 33eae5ad2fb0..58a9f5c96d10 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -690,7 +690,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
+       else
+               mlx5_core_dbg(dev, "port_module_event is not set\n");
+ 
+-      if (MLX5_CAP_GEN(dev, pps))
++      if (MLX5_PPS_CAP(dev))
+               async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+ 
+       err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c 
b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+index cc1858752e70..6d90e9e3bfd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+@@ -160,8 +160,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev 
*mdev, struct mlx5_core
+ 
+ static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct 
mlx5_core_qp *qp)
+ {
+-      mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
+-
+       mlx5_core_destroy_qp(mdev, qp);
+ }
+ 
+@@ -176,8 +174,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
+               return err;
+       }
+ 
+-      mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+-
+       err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, 
&priv->tisn[0]);
+       if (err) {
+               mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
+@@ -235,6 +231,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv 
*priv)
+ 
+ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+ {
++      struct mlx5i_priv *ipriv  = priv->ppriv;
+       int err;
+ 
+       err = mlx5e_create_indirect_rqt(priv);
+@@ -253,12 +250,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+       if (err)
+               goto err_destroy_indirect_tirs;
+ 
+-      err = mlx5i_create_flow_steering(priv);
++      err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+       if (err)
+               goto err_destroy_direct_tirs;
+ 
++      err = mlx5i_create_flow_steering(priv);
++      if (err)
++              goto err_remove_rx_underlay_qpn;
++
+       return 0;
+ 
++err_remove_rx_underlay_qpn:
++      mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+ err_destroy_direct_tirs:
+       mlx5e_destroy_direct_tirs(priv);
+ err_destroy_indirect_tirs:
+@@ -272,6 +275,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
+ 
+ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
+ {
++      struct mlx5i_priv *ipriv  = priv->ppriv;
++
++      mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
+       mlx5i_destroy_flow_steering(priv);
+       mlx5e_destroy_direct_tirs(priv);
+       mlx5e_destroy_indirect_tirs(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c 
b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index b5d5519542e8..0ca4623bda6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+                                          u8 *port1, u8 *port2)
+ {
+-      if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+-              if (tracker->netdev_state[0].tx_enabled) {
+-                      *port1 = 1;
+-                      *port2 = 1;
+-              } else {
+-                      *port1 = 2;
+-                      *port2 = 2;
+-              }
+-      } else {
+-              *port1 = 1;
+-              *port2 = 2;
+-              if (!tracker->netdev_state[0].link_up)
+-                      *port1 = 2;
+-              else if (!tracker->netdev_state[1].link_up)
+-                      *port2 = 1;
++      *port1 = 1;
++      *port2 = 2;
++      if (!tracker->netdev_state[0].tx_enabled ||
++          !tracker->netdev_state[0].link_up) {
++              *port1 = 2;
++              return;
+       }
++
++      if (!tracker->netdev_state[1].tx_enabled ||
++          !tracker->netdev_state[1].link_up)
++              *port2 = 1;
+ }
+ 
+ static void mlx5_activate_lag(struct mlx5_lag *ldev,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 
b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index fbc6e9e9e305..1874aa96c1a1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -153,6 +153,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 
*mtpps, u32 mtpps_size);
+ int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
+ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+ 
++#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&              \
++                          MLX5_CAP_GEN((mdev), pps_modify) &&         \
++                          MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&  \
++                          MLX5_CAP_MCAM_FEATURE((mdev), 
mtpps_enh_out_per_adj))
++
+ void mlx5e_init(void);
+ void mlx5e_cleanup(void);
+ 
+diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
+index 6f6ed75b63c9..765de3bedb88 100644
+--- a/drivers/net/irda/mcs7780.c
++++ b/drivers/net/irda/mcs7780.c
+@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, 
__u16 val)
+ static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+ {
+       struct usb_device *dev = mcs->usbdev;
+-      int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+-                                MCS_RD_RTYPE, 0, reg, val, 2,
+-                                msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++      void *dmabuf;
++      int ret;
++
++      dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
++      if (!dmabuf)
++              return -ENOMEM;
++
++      ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
++                            MCS_RD_RTYPE, 0, reg, dmabuf, 2,
++                            msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++
++      memcpy(val, dmabuf, sizeof(__u16));
++      kfree(dmabuf);
+ 
+       return ret;
+ }
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index eebb0e1c70ff..b30d9ceee8bc 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
+       if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+               phydev->state = PHY_UP;
+       mutex_unlock(&phydev->lock);
++
++      /* Now we can run the state machine synchronously */
++      phy_state_machine(&phydev->state_queue.work);
+ }
+ 
+ /**
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6633dd4bb649..acb754eb1ccb 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
+ 
+       buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+       buf += headroom; /* advance address leaving hole at front of pkt */
+-      ctx = (void *)(unsigned long)len;
+       get_page(alloc_frag->page);
+       alloc_frag->offset += len + headroom;
+       hole = alloc_frag->size - alloc_frag->offset;
+       if (hole < len + headroom) {
+               /* To avoid internal fragmentation, if there is very likely not
+                * enough space for another buffer, add the remaining space to
+-               * the current buffer. This extra space is not included in
+-               * the truesize stored in ctx.
++               * the current buffer.
+                */
+               len += hole;
+               alloc_frag->offset += hole;
+       }
+ 
+       sg_init_one(rq->sg, buf, len);
++      ctx = (void *)(unsigned long)len;
+       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+       if (err < 0)
+               put_page(virt_to_head_page(buf));
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 5653d6dd38f6..d44f59ef4f72 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct 
brcmf_sdio_dev *sdiodev)
+               goto fail;
+       }
+ 
+-      /* allocate scatter-gather table. sg support
+-       * will be disabled upon allocation failure.
+-       */
+-      brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+-
+       /* Query the F2 block size, set roundup accordingly */
+       bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+       bus->roundup = min(max_roundup, bus->blocksize);
+diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c 
b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+index 4b97371c3b42..838946d17b59 100644
+--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+@@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct 
iwl_rx_cmd_buffer *rxb)
+                               next_reclaimed;
+                       IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+                                                 next_reclaimed);
++                      iwlagn_check_ratid_empty(priv, sta_id, tid);
+               }
+ 
+               iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+ 
+-              iwlagn_check_ratid_empty(priv, sta_id, tid);
+               freed = 0;
+ 
+               /* process frames */
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 3c52867dfe28..d145e0d90227 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1241,6 +1241,8 @@ config SCSI_LPFC
+       tristate "Emulex LightPulse Fibre Channel Support"
+       depends on PCI && SCSI
+       depends on SCSI_FC_ATTRS
++      depends on NVME_TARGET_FC || NVME_TARGET_FC=n
++      depends on NVME_FC || NVME_FC=n
+       select CRC_T10DIF
+       ---help---
+           This lpfc driver supports the Emulex LightPulse
+diff --git a/drivers/target/target_core_user.c 
b/drivers/target/target_core_user.c
+index beb5f098f32d..05804227234d 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -437,7 +437,7 @@ static int scatter_data_area(struct tcmu_dev *udev,
+                       to_offset = get_block_offset_user(udev, dbi,
+                                       block_remaining);
+                       offset = DATA_BLOCK_SIZE - block_remaining;
+-                      to = (void *)(unsigned long)to + offset;
++                      to += offset;
+ 
+                       if (*iov_cnt != 0 &&
+                           to_offset == iov_tail(udev, *iov)) {
+@@ -510,7 +510,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct 
tcmu_cmd *cmd,
+                       copy_bytes = min_t(size_t, sg_remaining,
+                                       block_remaining);
+                       offset = DATA_BLOCK_SIZE - block_remaining;
+-                      from = (void *)(unsigned long)from + offset;
++                      from += offset;
+                       tcmu_flush_dcache_range(from, copy_bytes);
+                       memcpy(to + sg->length - sg_remaining, from,
+                                       copy_bytes);
+@@ -699,25 +699,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+               size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+ 
+               entry = (void *) mb + CMDR_OFF + cmd_head;
+-              tcmu_flush_dcache_range(entry, sizeof(*entry));
+               tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
+               tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
+               entry->hdr.cmd_id = 0; /* not used for PAD */
+               entry->hdr.kflags = 0;
+               entry->hdr.uflags = 0;
++              tcmu_flush_dcache_range(entry, sizeof(*entry));
+ 
+               UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
++              tcmu_flush_dcache_range(mb, sizeof(*mb));
+ 
+               cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+               WARN_ON(cmd_head != 0);
+       }
+ 
+       entry = (void *) mb + CMDR_OFF + cmd_head;
+-      tcmu_flush_dcache_range(entry, sizeof(*entry));
++      memset(entry, 0, command_size);
+       tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+       entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+-      entry->hdr.kflags = 0;
+-      entry->hdr.uflags = 0;
+ 
+       /* Handle allocating space from the data area */
+       tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+@@ -736,11 +735,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
+       entry->req.iov_cnt = iov_cnt;
+-      entry->req.iov_dif_cnt = 0;
+ 
+       /* Handle BIDI commands */
++      iov_cnt = 0;
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+-              iov_cnt = 0;
+               iov++;
+               ret = scatter_data_area(udev, tcmu_cmd,
+                                       se_cmd->t_bidi_data_sg,
+@@ -753,8 +751,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+                       pr_err("tcmu: alloc and scatter bidi data failed\n");
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
+-              entry->req.iov_bidi_cnt = iov_cnt;
+       }
++      entry->req.iov_bidi_cnt = iov_cnt;
+ 
+       /*
+        * Recalaulate the command's base size and size according
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 33d979e9ea2a..83eecd33ad96 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4776,10 +4776,6 @@ static void shrink_delalloc(struct btrfs_root *root, 
u64 to_reclaim, u64 orig,
+               else
+                       flush = BTRFS_RESERVE_NO_FLUSH;
+               spin_lock(&space_info->lock);
+-              if (can_overcommit(root, space_info, orig, flush)) {
+-                      spin_unlock(&space_info->lock);
+-                      break;
+-              }
+               if (list_empty(&space_info->tickets) &&
+                   list_empty(&space_info->priority_tickets)) {
+                       spin_unlock(&space_info->lock);
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index 3ec0e46de95f..22a8d532cca6 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -193,13 +193,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int 
type,
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+-              if (acl) {
+-                      error = posix_acl_update_mode(inode, &inode->i_mode, 
&acl);
+-                      if (error)
+-                              return error;
+-                      inode->i_ctime = current_time(inode);
+-                      ext4_mark_inode_dirty(handle, inode);
+-              }
+               break;
+ 
+       case ACL_TYPE_DEFAULT:
+@@ -221,8 +214,9 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int 
type,
+                                     value, size, 0);
+ 
+       kfree(value);
+-      if (!error)
++      if (!error) {
+               set_cached_acl(inode, type, acl);
++      }
+ 
+       return error;
+ }
+@@ -232,6 +226,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, 
int type)
+ {
+       handle_t *handle;
+       int error, retries = 0;
++      umode_t mode = inode->i_mode;
++      int update_mode = 0;
+ 
+       error = dquot_initialize(inode);
+       if (error)
+@@ -242,7 +238,20 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, 
int type)
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+ 
++      if ((type == ACL_TYPE_ACCESS) && acl) {
++              error = posix_acl_update_mode(inode, &mode, &acl);
++              if (error)
++                      goto out_stop;
++              update_mode = 1;
++      }
++
+       error = __ext4_set_acl(handle, inode, type, acl);
++      if (!error && update_mode) {
++              inode->i_mode = mode;
++              inode->i_ctime = current_time(inode);
++              ext4_mark_inode_dirty(handle, inode);
++      }
++out_stop:
+       ext4_journal_stop(handle);
+       if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 02ce7e7bbdf5..407fc5aa32a7 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -521,6 +521,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+                               lastoff = page_offset(page);
+                               bh = head = page_buffers(page);
+                               do {
++                                      if (lastoff + bh->b_size <= startoff)
++                                              goto next;
+                                       if (buffer_uptodate(bh) ||
+                                           buffer_unwritten(bh)) {
+                                               if (whence == SEEK_DATA)
+@@ -535,6 +537,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                               unlock_page(page);
+                                               goto out;
+                                       }
++next:
+                                       lastoff += bh->b_size;
+                                       bh = bh->b_this_page;
+                               } while (bh != head);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index c3ed9021b781..035cd3f4785e 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1927,7 +1927,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count)
+                       n_desc_blocks = o_desc_blocks +
+                               le16_to_cpu(es->s_reserved_gdt_blocks);
+                       n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+-                      n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
++                      n_blocks_count = (ext4_fsblk_t)n_group *
++                              EXT4_BLOCKS_PER_GROUP(sb);
+                       n_group--; /* set to last group number */
+               }
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index f5a7faac39a7..074169a54162 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, 
void *data)
+                       cdata->res.server_scope = NULL;
+               }
+               /* Save the EXCHANGE_ID verifier session trunk tests */
+-              memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
++              memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
+                      sizeof(clp->cl_confirm.data));
+       }
+ out:
+@@ -7444,7 +7444,6 @@ static const struct rpc_call_ops 
nfs4_exchange_id_call_ops = {
+ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred 
*cred,
+                       u32 sp4_how, struct rpc_xprt *xprt)
+ {
+-      nfs4_verifier verifier;
+       struct rpc_message msg = {
+               .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+               .rpc_cred = cred,
+@@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client 
*clp, struct rpc_cred *cred,
+               return -ENOMEM;
+       }
+ 
+-      if (!xprt)
+-              nfs4_init_boot_verifier(clp, &verifier);
++      nfs4_init_boot_verifier(clp, &calldata->args.verifier);
+ 
+       status = nfs4_init_uniform_client_string(clp);
+       if (status)
+@@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client 
*clp, struct rpc_cred *cred,
+               task_setup_data.rpc_xprt = xprt;
+               task_setup_data.flags =
+                               RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
+-              calldata->args.verifier = &clp->cl_confirm;
+-      } else {
+-              calldata->args.verifier = &verifier;
++              memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
++                              sizeof(calldata->args.verifier.data));
+       }
+       calldata->args.client = clp;
+ #ifdef CONFIG_NFS_V4_1_MIGRATION
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 3aebfdc82b30..b0cbee2b2422 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
+       int len = 0;
+ 
+       encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+-      encode_nfs4_verifier(xdr, args->verifier);
++      encode_nfs4_verifier(xdr, &args->verifier);
+ 
+       encode_string(xdr, strlen(args->client->cl_owner_id),
+                       args->client->cl_owner_id);
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index dc22ba8c710f..e50a387959bf 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle,
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
+-              if (acl) {
+-                      umode_t mode;
+-
+-                      ret = posix_acl_update_mode(inode, &mode, &acl);
+-                      if (ret)
+-                              return ret;
+-
+-                      ret = ocfs2_acl_set_mode(inode, di_bh,
+-                                               handle, mode);
+-                      if (ret)
+-                              return ret;
+-              }
+               break;
+       case ACL_TYPE_DEFAULT:
+               name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+@@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inode, struct 
posix_acl *acl, int type)
+       had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
+       if (had_lock < 0)
+               return had_lock;
++      if (type == ACL_TYPE_ACCESS && acl) {
++              umode_t mode;
++
++              status = posix_acl_update_mode(inode, &mode, &acl);
++              if (status)
++                      goto unlock;
++
++              status = ocfs2_acl_set_mode(inode, bh, NULL, mode);
++              if (status)
++                      goto unlock;
++      }
+       status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
++unlock:
+       ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
+       brelse(bh);
+       return status;
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 1d622f276e3a..26f9591b04b1 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -851,6 +851,9 @@ static int userfaultfd_release(struct inode *inode, struct 
file *file)
+       __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+       spin_unlock(&ctx->fault_pending_wqh.lock);
+ 
++      /* Flush pending events that may still wait on event_wqh */
++      wake_up_all(&ctx->event_wqh);
++
+       wake_up_poll(&ctx->fd_wqh, POLLHUP);
+       userfaultfd_ctx_put(ctx);
+       return 0;
+@@ -1645,6 +1648,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx 
*ctx,
+               ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
+                                    uffdio_zeropage.range.len);
+               mmput(ctx->mm);
++      } else {
++              return -ENOSPC;
+       }
+       if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+               return -EFAULT;
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 0f2a80377520..30b86efea2bc 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -58,7 +58,6 @@ enum cpuhp_state {
+       CPUHP_XEN_EVTCHN_PREPARE,
+       CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+       CPUHP_SH_SH3X_PREPARE,
+-      CPUHP_BLK_MQ_PREPARE,
+       CPUHP_NET_FLOW_PREPARE,
+       CPUHP_TOPOLOGY_PREPARE,
+       CPUHP_NET_IUCV_PREPARE,
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 119a3f9604b0..898cfe2eeb42 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -18,6 +18,19 @@
+ 
+ #ifdef CONFIG_CPUSETS
+ 
++/*
++ * Static branch rewrites can happen in an arbitrary order for a given
++ * key. In code paths where we need to loop with read_mems_allowed_begin() and
++ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
++ * to ensure that begin() always gets rewritten before retry() in the
++ * disabled -> enabled transition. If not, then if local irqs are disabled
++ * around the loop, we can deadlock since retry() would always be
++ * comparing the latest value of the mems_allowed seqcount against 0 as
++ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
++ * transition should happen in reverse order for the same reasons (want to 
stop
++ * looking at real value of mems_allowed.sequence in retry() first).
++ */
++extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
+ static inline bool cpusets_enabled(void)
+ {
+@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
+ 
+ static inline void cpuset_inc(void)
+ {
++      static_branch_inc(&cpusets_pre_enable_key);
+       static_branch_inc(&cpusets_enabled_key);
+ }
+ 
+ static inline void cpuset_dec(void)
+ {
+       static_branch_dec(&cpusets_enabled_key);
++      static_branch_dec(&cpusets_pre_enable_key);
+ }
+ 
+ extern int cpuset_init(void);
+@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
+  */
+ static inline unsigned int read_mems_allowed_begin(void)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_pre_enable_key))
+               return 0;
+ 
+       return read_seqcount_begin(&current->mems_allowed_seq);
+@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
+  */
+ static inline bool read_mems_allowed_retry(unsigned int seq)
+ {
+-      if (!cpusets_enabled())
++      if (!static_branch_unlikely(&cpusets_enabled_key))
+               return false;
+ 
+       return read_seqcount_retry(&current->mems_allowed_seq, seq);
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index edafedb7b509..e21a0b3d6454 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -7718,8 +7718,10 @@ struct mlx5_ifc_pcam_reg_bits {
+ };
+ 
+ struct mlx5_ifc_mcam_enhanced_features_bits {
+-      u8         reserved_at_0[0x7f];
++      u8         reserved_at_0[0x7d];
+ 
++      u8         mtpps_enh_out_per_adj[0x1];
++      u8         mtpps_fs[0x1];
+       u8         pcie_performance_group[0x1];
+ };
+ 
+@@ -8115,7 +8117,8 @@ struct mlx5_ifc_mtpps_reg_bits {
+       u8         reserved_at_78[0x4];
+       u8         cap_pin_4_mode[0x4];
+ 
+-      u8         reserved_at_80[0x80];
++      u8         field_select[0x20];
++      u8         reserved_at_a0[0x60];
+ 
+       u8         enable[0x1];
+       u8         reserved_at_101[0xb];
+@@ -8130,8 +8133,9 @@ struct mlx5_ifc_mtpps_reg_bits {
+ 
+       u8         out_pulse_duration[0x10];
+       u8         out_periodic_adjustment[0x10];
++      u8         enhanced_out_periodic_adjustment[0x20];
+ 
+-      u8         reserved_at_1a0[0x60];
++      u8         reserved_at_1c0[0x20];
+ };
+ 
+ struct mlx5_ifc_mtppse_reg_bits {
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 45cdb27791a3..ab8f7e11c160 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -494,6 +494,10 @@ struct mm_struct {
+        * PROT_NONE or PROT_NUMA mapped page.
+        */
+       bool tlb_flush_pending;
++#endif
++#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
++      /* See flush_tlb_batched_pending() */
++      bool tlb_flush_batched;
+ #endif
+       struct uprobes_state uprobes_state;
+ #ifdef CONFIG_HUGETLB_PAGE
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index b28c83475ee8..7882a07d973e 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1222,7 +1222,7 @@ struct nfs41_state_protection {
+ 
+ struct nfs41_exchange_id_args {
+       struct nfs_client               *client;
+-      nfs4_verifier                   *verifier;
++      nfs4_verifier                   verifier;
+       u32                             flags;
+       struct nfs41_state_protection   state_protect;
+ };
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index c102ef65cb64..db6dc9dc0482 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -323,6 +323,7 @@ enum {
+ 
+       __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
+       __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
++      __WQ_ORDERED_EXPLICIT   = 1 << 18, /* internal: 
alloc_ordered_workqueue() */
+       __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
+ 
+       WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
+@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, 
int max_active,
+  * Pointer to the allocated workqueue on success, %NULL on failure.
+  */
+ #define alloc_ordered_workqueue(fmt, flags, args...)                  \
+-      alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
++      alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |                \
++                      __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
+ 
+ #define create_workqueue(name)                                                
\
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 069582ee5d7f..06db0c3ec384 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), 
ntohs((chunk)->chunk_hdr.length), member)
+ 
+ #define _sctp_walk_params(pos, chunk, end, member)\
+ for (pos.v = chunk->member;\
++     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) 
<=\
++      (void *)chunk + end) &&\
+      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+      pos.v += SCTP_PAD4(ntohs(pos.p->length)))
+@@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), 
ntohs((chunk_hdr)->length))
+ #define _sctp_walk_errors(err, chunk_hdr, end)\
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+           sizeof(sctp_chunkhdr_t));\
++     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
++      (void *)chunk_hdr + end) &&\
+      (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+      err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 3391dbd73959..1933442cf1a6 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -265,6 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock 
*sk, unsigned int flags,
+ }
+ 
+ void udp_v4_early_demux(struct sk_buff *skb);
++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+ int udp_get_port(struct sock *sk, unsigned short snum,
+                int (*saddr_cmp)(const struct sock *,
+                                 const struct sock *));
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 5170fd81e1fd..375893d8d4a5 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -795,10 +795,6 @@ struct snd_soc_component_driver {
+       int (*suspend)(struct snd_soc_component *);
+       int (*resume)(struct snd_soc_component *);
+ 
+-      /* pcm creation and destruction */
+-      int (*pcm_new)(struct snd_soc_pcm_runtime *);
+-      void (*pcm_free)(struct snd_pcm *);
+-
+       /* DT */
+       int (*of_xlate_dai_name)(struct snd_soc_component *component,
+                                struct of_phandle_args *args,
+@@ -872,8 +868,6 @@ struct snd_soc_component {
+       void (*remove)(struct snd_soc_component *);
+       int (*suspend)(struct snd_soc_component *);
+       int (*resume)(struct snd_soc_component *);
+-      int (*pcm_new)(struct snd_soc_pcm_runtime *);
+-      void (*pcm_free)(struct snd_pcm *);
+ 
+       /* machine specific init */
+       int (*init)(struct snd_soc_component *component);
+diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
+index 00f4d6bf048f..7a01568e5e22 100644
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -33,6 +33,9 @@ struct cgroup_taskset {
+       struct list_head        src_csets;
+       struct list_head        dst_csets;
+ 
++      /* the number of tasks in the set */
++      int                     nr_tasks;
++
+       /* the subsys currently being processed */
+       int                     ssid;
+ 
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 8d4e85eae42c..2c62e4b3f198 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(struct task_struct 
*task,
+       if (!cset->mg_src_cgrp)
+               return;
+ 
++      mgctx->tset.nr_tasks++;
++
+       list_move_tail(&task->cg_list, &cset->mg_tasks);
+       if (list_empty(&cset->mg_node))
+               list_add_tail(&cset->mg_node,
+@@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx 
*mgctx)
+       struct css_set *cset, *tmp_cset;
+       int ssid, failed_ssid, ret;
+ 
+-      /* methods shouldn't be called if no task is actually migrating */
+-      if (list_empty(&tset->src_csets))
+-              return 0;
+-
+       /* check that we can legitimately attach to the cgroup */
+-      do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+-              if (ss->can_attach) {
+-                      tset->ssid = ssid;
+-                      ret = ss->can_attach(tset);
+-                      if (ret) {
+-                              failed_ssid = ssid;
+-                              goto out_cancel_attach;
++      if (tset->nr_tasks) {
++              do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++                      if (ss->can_attach) {
++                              tset->ssid = ssid;
++                              ret = ss->can_attach(tset);
++                              if (ret) {
++                                      failed_ssid = ssid;
++                                      goto out_cancel_attach;
++                              }
+                       }
+-              }
+-      } while_each_subsys_mask();
++              } while_each_subsys_mask();
++      }
+ 
+       /*
+        * Now that we're guaranteed success, proceed to move all tasks to
+@@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx 
*mgctx)
+        */
+       tset->csets = &tset->dst_csets;
+ 
+-      do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+-              if (ss->attach) {
+-                      tset->ssid = ssid;
+-                      ss->attach(tset);
+-              }
+-      } while_each_subsys_mask();
++      if (tset->nr_tasks) {
++              do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++                      if (ss->attach) {
++                              tset->ssid = ssid;
++                              ss->attach(tset);
++                      }
++              } while_each_subsys_mask();
++      }
+ 
+       ret = 0;
+       goto out_release_tset;
+ 
+ out_cancel_attach:
+-      do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
+-              if (ssid == failed_ssid)
+-                      break;
+-              if (ss->cancel_attach) {
+-                      tset->ssid = ssid;
+-                      ss->cancel_attach(tset);
+-              }
+-      } while_each_subsys_mask();
++      if (tset->nr_tasks) {
++              do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
++                      if (ssid == failed_ssid)
++                              break;
++                      if (ss->cancel_attach) {
++                              tset->ssid = ssid;
++                              ss->cancel_attach(tset);
++                      }
++              } while_each_subsys_mask();
++      }
+ out_release_tset:
+       spin_lock_irq(&css_set_lock);
+       list_splice_init(&tset->dst_csets, &tset->src_csets);
+@@ -2917,11 +2921,11 @@ static ssize_t cgroup_subtree_control_write(struct 
kernfs_open_file *of,
+       cgrp->subtree_control &= ~disable;
+ 
+       ret = cgroup_apply_control(cgrp);
+-
+       cgroup_finalize_control(cgrp, ret);
++      if (ret)
++              goto out_unlock;
+ 
+       kernfs_activate(cgrp->kn);
+-      ret = 0;
+ out_unlock:
+       cgroup_kn_unlock(of->kn);
+       return ret ?: nbytes;
+@@ -4574,6 +4578,10 @@ int __init cgroup_init(void)
+ 
+               if (ss->bind)
+                       ss->bind(init_css_set.subsys[ssid]);
++
++              mutex_lock(&cgroup_mutex);
++              css_populate_dir(init_css_set.subsys[ssid]);
++              mutex_unlock(&cgroup_mutex);
+       }
+ 
+       /* init_css_set.subsys[] has been updated, re-hash */
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index ae643412948a..8f26927f16a1 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -63,6 +63,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+ 
++DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+ 
+ /* See "Frequency meter" comments, below. */
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 152a706ef8b8..d3f33020a06b 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1495,7 +1495,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 
basem)
+               base->is_idle = false;
+       } else {
+               if (!is_max_delta)
+-                      expires = basem + (nextevt - basej) * TICK_NSEC;
++                      expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
+               /*
+                * If we expect to sleep more than a tick, mark the base idle:
+                */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index c74bf39ef764..6effbcb7a3d6 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3744,8 +3744,12 @@ static int apply_workqueue_attrs_locked(struct 
workqueue_struct *wq,
+               return -EINVAL;
+ 
+       /* creating multiple pwqs breaks ordering guarantee */
+-      if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+-              return -EINVAL;
++      if (!list_empty(&wq->pwqs)) {
++              if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
++                      return -EINVAL;
++
++              wq->flags &= ~__WQ_ORDERED;
++      }
+ 
+       ctx = apply_wqattrs_prepare(wq, attrs);
+       if (!ctx)
+@@ -3929,6 +3933,16 @@ struct workqueue_struct *__alloc_workqueue_key(const 
char *fmt,
+       struct workqueue_struct *wq;
+       struct pool_workqueue *pwq;
+ 
++      /*
++       * Unbound && max_active == 1 used to imply ordered, which is no
++       * longer the case on NUMA machines due to per-node pools.  While
++       * alloc_ordered_workqueue() is the right way to create an ordered
++       * workqueue, keep the previous behavior to avoid subtle breakages
++       * on NUMA.
++       */
++      if ((flags & WQ_UNBOUND) && max_active == 1)
++              flags |= __WQ_ORDERED;
++
+       /* see the comment above the definition of WQ_POWER_EFFICIENT */
+       if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+               flags |= WQ_UNBOUND;
+@@ -4119,13 +4133,14 @@ void workqueue_set_max_active(struct workqueue_struct 
*wq, int max_active)
+       struct pool_workqueue *pwq;
+ 
+       /* disallow meddling with max_active for ordered workqueues */
+-      if (WARN_ON(wq->flags & __WQ_ORDERED))
++      if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+               return;
+ 
+       max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
+ 
+       mutex_lock(&wq->mutex);
+ 
++      wq->flags &= ~__WQ_ORDERED;
+       wq->saved_max_active = max_active;
+ 
+       for_each_pwq(pwq, wq)
+@@ -5253,7 +5268,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
+        * attributes breaks ordering guarantee.  Disallow exposing ordered
+        * workqueues.
+        */
+-      if (WARN_ON(wq->flags & __WQ_ORDERED))
++      if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+               return -EINVAL;
+ 
+       wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3eedb187e549..cc289933f462 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+       unsigned long vaddr = *position;
+       unsigned long remainder = *nr_pages;
+       struct hstate *h = hstate_vma(vma);
++      int err = -EFAULT;
+ 
+       while (vaddr < vma->vm_end && remainder) {
+               pte_t *pte;
+@@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+                       }
+                       ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
+                       if (ret & VM_FAULT_ERROR) {
+-                              int err = vm_fault_to_errno(ret, flags);
+-
+-                              if (err)
+-                                      return err;
+-
++                              err = vm_fault_to_errno(ret, flags);
+                               remainder = 0;
+                               break;
+                       }
+@@ -4229,7 +4226,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+        */
+       *position = vaddr;
+ 
+-      return i ? i : -EFAULT;
++      return i ? i : err;
+ }
+ 
+ #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+diff --git a/mm/internal.h b/mm/internal.h
+index 0e4f558412fb..9c8a2bfb975c 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percpu_wq;
+ #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ void try_to_unmap_flush(void);
+ void try_to_unmap_flush_dirty(void);
++void flush_tlb_batched_pending(struct mm_struct *mm);
+ #else
+ static inline void try_to_unmap_flush(void)
+ {
+@@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(void)
+ static inline void try_to_unmap_flush_dirty(void)
+ {
+ }
+-
++static inline void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++}
+ #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+ 
+ extern const struct trace_print_flags pageflag_names[];
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 25b78ee4fc2c..75d2cffbe61d 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned 
long addr,
+ 
+       tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+       orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+diff --git a/mm/memory.c b/mm/memory.c
+index bb11c474857e..b0c3d1556a94 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1197,6 +1197,7 @@ static unsigned long zap_pte_range(struct mmu_gather 
*tlb,
+       init_rss_vec(rss);
+       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       pte = start_pte;
++      flush_tlb_batched_pending(mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               pte_t ptent = *pte;
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index 8edd0d576254..f42749e6bf4e 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -66,6 +66,7 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
+           atomic_read(&vma->vm_mm->mm_users) == 1)
+               target_node = numa_node_id();
+ 
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+       do {
+               oldpte = *pte;
+diff --git a/mm/mremap.c b/mm/mremap.c
+index cd8a1b199ef9..3f23715d3c69 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
+       new_ptl = pte_lockptr(mm, new_pmd);
+       if (new_ptl != old_ptl)
+               spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
++      flush_tlb_batched_pending(vma->vm_mm);
+       arch_enter_lazy_mmu_mode();
+ 
+       for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
+@@ -428,6 +429,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long 
addr,
+ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
+               unsigned long new_addr, unsigned long new_len, bool *locked,
+               struct vm_userfaultfd_ctx *uf,
++              struct list_head *uf_unmap_early,
+               struct list_head *uf_unmap)
+ {
+       struct mm_struct *mm = current->mm;
+@@ -446,7 +448,7 @@ static unsigned long mremap_to(unsigned long addr, 
unsigned long old_len,
+       if (addr + old_len > new_addr && new_addr + new_len > addr)
+               goto out;
+ 
+-      ret = do_munmap(mm, new_addr, new_len, NULL);
++      ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
+       if (ret)
+               goto out;
+ 
+@@ -514,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned 
long, old_len,
+       unsigned long charged = 0;
+       bool locked = false;
+       struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
++      LIST_HEAD(uf_unmap_early);
+       LIST_HEAD(uf_unmap);
+ 
+       if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+@@ -541,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned 
long, old_len,
+ 
+       if (flags & MREMAP_FIXED) {
+               ret = mremap_to(addr, old_len, new_addr, new_len,
+-                              &locked, &uf, &uf_unmap);
++                              &locked, &uf, &uf_unmap_early, &uf_unmap);
+               goto out;
+       }
+ 
+@@ -621,6 +624,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned 
long, old_len,
+       up_write(&current->mm->mmap_sem);
+       if (locked && new_len > old_len)
+               mm_populate(new_addr + old_len, new_len - old_len);
++      userfaultfd_unmap_complete(mm, &uf_unmap_early);
+       mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
+       userfaultfd_unmap_complete(mm, &uf_unmap);
+       return ret;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index d405f0e0ee96..9835d19fe143 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct 
*mm, bool writable)
+       cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+       tlb_ubc->flush_required = true;
+ 
++      /*
++       * Ensure compiler does not re-order the setting of tlb_flush_batched
++       * before the PTE is cleared.
++       */
++      barrier();
++      mm->tlb_flush_batched = true;
++
+       /*
+        * If the PTE was dirty then it's best to assume it's writable. The
+        * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+@@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum 
ttu_flags flags)
+ 
+       return should_defer;
+ }
++
++/*
++ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
++ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
++ * operation such as mprotect or munmap to race between reclaim unmapping
++ * the page and flushing the page. If this race occurs, it potentially allows
++ * access to data via a stale TLB entry. Tracking all mm's that have TLB
++ * batching in flight would be expensive during reclaim so instead track
++ * whether TLB batching occurred in the past and if so then do a flush here
++ * if required. This will cost one additional flush per reclaim cycle paid
++ * by the first operation at risk such as mprotect and mumap.
++ *
++ * This must be called under the PTL so that an access to tlb_flush_batched
++ * that is potentially a "reclaim vs mprotect/munmap/etc" race will 
synchronise
++ * via the PTL.
++ */
++void flush_tlb_batched_pending(struct mm_struct *mm)
++{
++      if (mm->tlb_flush_batched) {
++              flush_tlb_mm(mm);
++
++              /*
++               * Do not allow the compiler to re-order the clearing of
++               * tlb_flush_batched before the tlb is flushed.
++               */
++              barrier();
++              mm->tlb_flush_batched = false;
++      }
++}
+ #else
+ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+ {
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index 27fad31784a8..18f9cb9aa87d 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user 
*arg)
+ 
+       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+               return -EFAULT;
++      ifr.ifr_name[IFNAMSIZ-1] = 0;
+ 
+       error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+       if (error)
+@@ -423,6 +424,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void 
__user *arg)
+               if (copy_from_user(&iwr, arg, sizeof(iwr)))
+                       return -EFAULT;
+ 
++              iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0;
++
+               return wext_handle_ioctl(net, &iwr, cmd, arg);
+       }
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 467a2f4510a7..52bfeb60c886 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1977,7 +1977,8 @@ static int do_setlink(const struct sk_buff *skb,
+               struct sockaddr *sa;
+               int len;
+ 
+-              len = sizeof(sa_family_t) + dev->addr_len;
++              len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
++                                                sizeof(*sa));
+               sa = kmalloc(len, GFP_KERNEL);
+               if (!sa) {
+                       err = -ENOMEM;
+@@ -4165,6 +4166,7 @@ static int rtnetlink_event(struct notifier_block *this, 
unsigned long event, voi
+ 
+       switch (event) {
+       case NETDEV_REBOOT:
++      case NETDEV_CHANGEADDR:
+       case NETDEV_CHANGENAME:
+       case NETDEV_FEAT_CHANGE:
+       case NETDEV_BONDING_FAILOVER:
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c
+index 1704948e6a12..f227f002c73d 100644
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
+        * singleton values (which always leads to failure).
+        * These settings can still (later) be overridden via sockopts.
+        */
+-      if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
+-          ccid_get_builtin_ccids(&rx.val, &rx.len))
++      if (ccid_get_builtin_ccids(&tx.val, &tx.len))
+               return -ENOBUFS;
++      if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
++              kfree(tx.val);
++              return -ENOBUFS;
++      }
+ 
+       if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
+           !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index f75482bdee9a..97368f229876 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff 
*skb)
+               goto drop_and_free;
+ 
+       inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++      reqsk_put(req);
+       return 0;
+ 
+ drop_and_free:
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 992621172220..cf3e40df4765 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct 
sk_buff *skb)
+               goto drop_and_free;
+ 
+       inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++      reqsk_put(req);
+       return 0;
+ 
+ drop_and_free:
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 83e3ed258467..3acc8261477c 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1327,13 +1327,14 @@ static struct pernet_operations fib_net_ops = {
+ 
+ void __init ip_fib_init(void)
+ {
+-      rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+-      rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+-      rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
++      fib_trie_init();
+ 
+       register_pernet_subsys(&fib_net_ops);
++
+       register_netdevice_notifier(&fib_netdev_notifier);
+       register_inetaddr_notifier(&fib_inetaddr_notifier);
+ 
+-      fib_trie_init();
++      rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
++      rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
++      rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ad9ad4aab5da..ce7bc2e5175a 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1372,7 +1372,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
+               return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
+                                         &info.info);
+       case FIB_EVENT_NH_DEL:
+-              if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
++              if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+                    fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
+                   (fib_nh->nh_flags & RTNH_F_DEAD))
+                       return call_fib_notifiers(dev_net(fib_nh->nh_dev),
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 0257d965f111..4a97fe20f59e 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       treq->rcv_isn           = ntohl(th->seq) - 1;
+       treq->snt_isn           = cookie;
+       treq->ts_off            = 0;
++      treq->txhash            = net_tx_rndhash();
+       req->mss                = mss;
+       ireq->ir_num            = ntohs(th->dest);
+       ireq->ir_rmt_port       = th->source;
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index b89bce4c721e..96c95c8d981e 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -113,7 +113,8 @@ struct bbr {
+               cwnd_gain:10,   /* current gain for setting cwnd */
+               full_bw_cnt:3,  /* number of rounds without large bw gains */
+               cycle_idx:3,    /* current index in pacing_gain cycle array */
+-              unused_b:6;
++              has_seen_rtt:1, /* have we seen an RTT sample yet? */
++              unused_b:5;
+       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
+       u32     full_bw;        /* recent bw, to estimate if pipe is full */
+ };
+@@ -212,6 +213,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 
rate, int gain)
+       return rate >> BW_SCALE;
+ }
+ 
++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
++{
++      u64 rate = bw;
++
++      rate = bbr_rate_bytes_per_sec(sk, rate, gain);
++      rate = min_t(u64, rate, sk->sk_max_pacing_rate);
++      return rate;
++}
++
++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
++static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
++{
++      struct tcp_sock *tp = tcp_sk(sk);
++      struct bbr *bbr = inet_csk_ca(sk);
++      u64 bw;
++      u32 rtt_us;
++
++      if (tp->srtt_us) {              /* any RTT sample yet? */
++              rtt_us = max(tp->srtt_us >> 3, 1U);
++              bbr->has_seen_rtt = 1;
++      } else {                         /* no RTT sample yet */
++              rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
++      }
++      bw = (u64)tp->snd_cwnd * BW_UNIT;
++      do_div(bw, rtt_us);
++      sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
++}
++
+ /* Pace using current bw estimate and a gain factor. In order to help drive 
the
+  * network toward lower queues while maintaining high utilization and low
+  * latency, the average pacing rate aims to be slightly (~1%) lower than the
+@@ -221,12 +251,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 
rate, int gain)
+  */
+ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
+ {
++      struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+-      u64 rate = bw;
++      u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
+ 
+-      rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+-      rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+-      if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
++      if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
++              bbr_init_pacing_rate_from_rtt(sk);
++      if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
+               sk->sk_pacing_rate = rate;
+ }
+ 
+@@ -799,7 +830,6 @@ static void bbr_init(struct sock *sk)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+-      u64 bw;
+ 
+       bbr->prior_cwnd = 0;
+       bbr->tso_segs_goal = 0;  /* default segs per skb until first ACK */
+@@ -815,11 +845,8 @@ static void bbr_init(struct sock *sk)
+ 
+       minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
+ 
+-      /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+-      bw = (u64)tp->snd_cwnd * BW_UNIT;
+-      do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
+-      sk->sk_pacing_rate = 0;         /* force an update of sk_pacing_rate */
+-      bbr_set_pacing_rate(sk, bw, bbr_high_gain);
++      bbr->has_seen_rtt = 0;
++      bbr_init_pacing_rate_from_rtt(sk);
+ 
+       bbr->restore_cwnd = 0;
+       bbr->round_start = 0;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 1d6219bf2d6b..b9a84eba60b8 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1762,7 +1762,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct 
sk_buff *skb)
+ /* For TCP sockets, sk_rx_dst is protected by socket lock
+  * For UDP, we use xchg() to guard against concurrent changes.
+  */
+-static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
++void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+       struct dst_entry *old;
+ 
+@@ -2120,6 +2120,7 @@ void udp_destroy_sock(struct sock *sk)
+                       encap_destroy(sk);
+       }
+ }
++EXPORT_SYMBOL(udp_sk_rx_dst_set);
+ 
+ /*
+  *    Socket option code for UDP
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 1699acb2fa2c..be0306778938 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               *prevhdr = NEXTHDR_FRAGMENT;
+               tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
+               if (!tmp_hdr) {
+-                      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                    IPSTATS_MIB_FRAGFAILS);
+                       err = -ENOMEM;
+                       goto fail;
+               }
+@@ -793,8 +791,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+                                hroom + troom, GFP_ATOMIC);
+               if (!frag) {
+-                      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                                    IPSTATS_MIB_FRAGFAILS);
+                       err = -ENOMEM;
+                       goto fail;
+               }
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index e9065b8d3af8..abb2c307fbe8 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+-      u16 offset = sizeof(struct ipv6hdr);
++      unsigned int offset = sizeof(struct ipv6hdr);
+       unsigned int packet_len = skb_tail_pointer(skb) -
+               skb_network_header(skb);
+       int found_rhdr = 0;
+@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+       while (offset <= packet_len) {
+               struct ipv6_opt_hdr *exthdr;
++              unsigned int len;
+ 
+               switch (**nexthdr) {
+ 
+@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+               exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+                                                offset);
+-              offset += ipv6_optlen(exthdr);
++              len = ipv6_optlen(exthdr);
++              if (len + offset >= IPV6_MAXPLEN)
++                      return -EINVAL;
++              offset += len;
+               *nexthdr = &exthdr->nexthdr;
+       }
+ 
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 5abc3692b901..ca7895454cec 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -215,6 +215,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       treq->rcv_isn = ntohl(th->seq) - 1;
+       treq->snt_isn = cookie;
+       treq->ts_off = 0;
++      treq->txhash = net_tx_rndhash();
+ 
+       /*
+        * We need to lookup the dst_entry to get the correct window size.
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 75703fda23e7..592270c310f4 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff 
*skb,
+                                         struct udp_table *udptable)
+ {
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+-      struct sock *sk;
+ 
+-      sk = skb_steal_sock(skb);
+-      if (unlikely(sk))
+-              return sk;
+       return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
+                                &iph->daddr, dport, inet6_iif(skb),
+                                udptable, skb);
+@@ -798,6 +794,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
+       if (udp6_csum_init(skb, uh, proto))
+               goto csum_error;
+ 
++      /* Check if the socket is already available, e.g. due to early demux */
++      sk = skb_steal_sock(skb);
++      if (sk) {
++              struct dst_entry *dst = skb_dst(skb);
++              int ret;
++
++              if (unlikely(sk->sk_rx_dst != dst))
++                      udp_sk_rx_dst_set(sk, dst);
++
++              ret = udpv6_queue_rcv_skb(sk, skb);
++              sock_put(sk);
++
++              /* a return value > 0 means to resubmit the input */
++              if (ret > 0)
++                      return ret;
++              return 0;
++      }
++
+       /*
+        *      Multicast receive code
+        */
+@@ -806,11 +820,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table 
*udptable,
+                               saddr, daddr, udptable, proto);
+ 
+       /* Unicast */
+-
+-      /*
+-       * check socket cache ... must talk to Alan about his plans
+-       * for sock caches... i'll skip this for now.
+-       */
+       sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+       if (sk) {
+               int ret;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 08679ebb3068..b3bf66bbf4dc 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -1289,8 +1289,8 @@ static int parse_ct(const struct nlattr *attr, struct 
ovs_conntrack_info *info,
+ 
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+-              int maxlen = ovs_ct_attr_lens[type].maxlen;
+-              int minlen = ovs_ct_attr_lens[type].minlen;
++              int maxlen;
++              int minlen;
+ 
+               if (type > OVS_CT_ATTR_MAX) {
+                       OVS_NLERR(log,
+@@ -1298,6 +1298,9 @@ static int parse_ct(const struct nlattr *attr, struct 
ovs_conntrack_info *info,
+                                 type, OVS_CT_ATTR_MAX);
+                       return -EINVAL;
+               }
++
++              maxlen = ovs_ct_attr_lens[type].maxlen;
++              minlen = ovs_ct_attr_lens[type].minlen;
+               if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+                       OVS_NLERR(log,
+                                 "Conntrack attr type has unexpected length 
(type=%d, length=%d, expected=%d)",
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e3eeed19cc7a..0880e0a9d151 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4334,7 +4334,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               register_prot_hook(sk);
+       }
+       spin_unlock(&po->bind_lock);
+-      if (closing && (po->tp_version > TPACKET_V2)) {
++      if (pg_vec && (po->tp_version > TPACKET_V2)) {
+               /* Because we don't support block-based V3 on tx-ring */
+               if (!tx_ring)
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 92e332e17391..961a6f81ae64 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct 
sctp_association *asoc,
+       sctp_adaptation_ind_param_t aiparam;
+       sctp_supported_ext_param_t ext_param;
+       int num_ext = 0;
+-      __u8 extensions[3];
++      __u8 extensions[4];
+       sctp_paramhdr_t *auth_chunks = NULL,
+                       *auth_hmacs = NULL;
+ 
+@@ -396,7 +396,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct 
sctp_association *asoc,
+       sctp_adaptation_ind_param_t aiparam;
+       sctp_supported_ext_param_t ext_param;
+       int num_ext = 0;
+-      __u8 extensions[3];
++      __u8 extensions[4];
+       sctp_paramhdr_t *auth_chunks = NULL,
+                       *auth_hmacs = NULL,
+                       *auth_random = NULL;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a808332d02d0..606d5333ff98 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2296,6 +2296,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", 
ALC882_FIXUP_GPIO3),
+       SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+       SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", 
ALC882_FIXUP_NO_PRIMARY_HP),
++      SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", 
ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", 
ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", 
ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 754e3ef8d7ae..d05acc8eed1f 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -3139,8 +3139,6 @@ static int snd_soc_component_initialize(struct 
snd_soc_component *component,
+       component->remove = component->driver->remove;
+       component->suspend = component->driver->suspend;
+       component->resume = component->driver->resume;
+-      component->pcm_new = component->driver->pcm_new;
+-      component->pcm_free = component->driver->pcm_free;
+ 
+       dapm = &component->dapm;
+       dapm->dev = dev;
+@@ -3328,25 +3326,6 @@ static void snd_soc_platform_drv_remove(struct 
snd_soc_component *component)
+       platform->driver->remove(platform);
+ }
+ 
+-static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
+-{
+-      struct snd_soc_platform *platform = rtd->platform;
+-
+-      if (platform->driver->pcm_new)
+-              return platform->driver->pcm_new(rtd);
+-      else
+-              return 0;
+-}
+-
+-static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
+-{
+-      struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+-      struct snd_soc_platform *platform = rtd->platform;
+-
+-      if (platform->driver->pcm_free)
+-              platform->driver->pcm_free(pcm);
+-}
+-
+ /**
+  * snd_soc_add_platform - Add a platform to the ASoC core
+  * @dev: The parent device for the platform
+@@ -3370,10 +3349,6 @@ int snd_soc_add_platform(struct device *dev, struct 
snd_soc_platform *platform,
+               platform->component.probe = snd_soc_platform_drv_probe;
+       if (platform_drv->remove)
+               platform->component.remove = snd_soc_platform_drv_remove;
+-      if (platform_drv->pcm_new)
+-              platform->component.pcm_new = snd_soc_platform_drv_pcm_new;
+-      if (platform_drv->pcm_free)
+-              platform->component.pcm_free = snd_soc_platform_drv_pcm_free;
+ 
+ #ifdef CONFIG_DEBUG_FS
+       platform->component.debugfs_prefix = "platform";
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index efc5831f205d..8ff7cd3b8c1f 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime 
*fe, int dir,
+               dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+                               be->dai_link->name, event, dir);
+ 
++              if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++                  (be->dpcm[dir].users >= 1))
++                      continue;
++
+               snd_soc_dapm_stream_event(be, dir, event);
+       }
+ 
+@@ -2628,25 +2632,12 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream 
*fe_substream)
+       return ret;
+ }
+ 
+-static void soc_pcm_free(struct snd_pcm *pcm)
+-{
+-      struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+-      struct snd_soc_component *component;
+-
+-      list_for_each_entry(component, &rtd->card->component_dev_list,
+-                          card_list) {
+-              if (component->pcm_free)
+-                      component->pcm_free(pcm);
+-      }
+-}
+-
+ /* create a new pcm */
+ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
+ {
+       struct snd_soc_platform *platform = rtd->platform;
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+-      struct snd_soc_component *component;
+       struct snd_pcm *pcm;
+       char new_name[64];
+       int ret = 0, playback = 0, capture = 0;
+@@ -2755,18 +2746,17 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int 
num)
+       if (capture)
+               snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
+ 
+-      list_for_each_entry(component, &rtd->card->component_dev_list, 
card_list) {
+-              if (component->pcm_new) {
+-                      ret = component->pcm_new(rtd);
+-                      if (ret < 0) {
+-                              dev_err(component->dev,
+-                                      "ASoC: pcm constructor failed: %d\n",
+-                                      ret);
+-                              return ret;
+-                      }
++      if (platform->driver->pcm_new) {
++              ret = platform->driver->pcm_new(rtd);
++              if (ret < 0) {
++                      dev_err(platform->dev,
++                              "ASoC: pcm constructor failed: %d\n",
++                              ret);
++                      return ret;
+               }
+       }
+-      pcm->private_free = soc_pcm_free;
++
++      pcm->private_free = platform->driver->pcm_free;
+ out:
+       dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
+                (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
+diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
+index b50f68a439ce..ba9fc099cf67 100644
+--- a/sound/soc/ux500/mop500.c
++++ b/sound/soc/ux500/mop500.c
+@@ -33,6 +33,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
+               .stream_name = "ab8500_0",
+               .cpu_dai_name = "ux500-msp-i2s.1",
+               .codec_dai_name = "ab8500-codec-dai.0",
++              .platform_name = "ux500-msp-i2s.1",
+               .codec_name = "ab8500-codec.0",
+               .init = mop500_ab8500_machine_init,
+               .ops = mop500_ab8500_ops,
+@@ -42,6 +43,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
+               .stream_name = "ab8500_1",
+               .cpu_dai_name = "ux500-msp-i2s.3",
+               .codec_dai_name = "ab8500-codec-dai.1",
++              .platform_name = "ux500-msp-i2s.3",
+               .codec_name = "ab8500-codec.0",
+               .init = NULL,
+               .ops = mop500_ab8500_ops,
+@@ -85,6 +87,8 @@ static int mop500_of_probe(struct platform_device *pdev,
+       for (i = 0; i < 2; i++) {
+               mop500_dai_links[i].cpu_of_node = msp_np[i];
+               mop500_dai_links[i].cpu_dai_name = NULL;
++              mop500_dai_links[i].platform_of_node = msp_np[i];
++              mop500_dai_links[i].platform_name = NULL;
+               mop500_dai_links[i].codec_of_node = codec_np;
+               mop500_dai_links[i].codec_name = NULL;
+       }
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index e2e5effba2a9..db1c7b25a44c 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1665,12 +1665,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, 
gpa_t gpa, u64 size, void *
+ 
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+ {
++      if (!kvm->arch.pgd)
++              return 0;
+       trace_kvm_age_hva(start, end);
+       return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+ }
+ 
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ {
++      if (!kvm->arch.pgd)
++              return 0;
+       trace_kvm_test_age_hva(hva);
+       return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+ }

Reply via email to