commit:     1950ee99461bdc6a46958d93468c0901c1895680
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 20 12:32:45 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 21 15:01:45 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1950ee99

Linux patch 4.4.162

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1161_linux-4.4.162.patch | 2111 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2115 insertions(+)

diff --git a/0000_README b/0000_README
index d7ad776..98ec483 100644
--- a/0000_README
+++ b/0000_README
@@ -687,6 +687,10 @@ Patch:  1160_linux-4.4.161.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.161
 
+Patch:  1161_linux-4.4.162.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.162
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1161_linux-4.4.162.patch b/1161_linux-4.4.162.patch
new file mode 100644
index 0000000..d8efae3
--- /dev/null
+++ b/1161_linux-4.4.162.patch
@@ -0,0 +1,2111 @@
+diff --git a/Documentation/devicetree/bindings/net/macb.txt 
b/Documentation/devicetree/bindings/net/macb.txt
+index b5d79761ac97..410c044166e2 100644
+--- a/Documentation/devicetree/bindings/net/macb.txt
++++ b/Documentation/devicetree/bindings/net/macb.txt
+@@ -8,6 +8,7 @@ Required properties:
+   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
+   the Cadence GEM, or the generic form: "cdns,gem".
+   Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 
SoCs.
++  Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 
SoCs.
+   Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
+   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 
SoCs.
+   Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 3fd53e193b7f..da515c535e62 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -961,11 +961,6 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+                       See Documentation/x86/intel_mpx.txt for more
+                       information about the feature.
+ 
+-      eagerfpu=       [X86]
+-                      on      enable eager fpu restore
+-                      off     disable eager fpu restore
+-                      auto    selects the default scheme, which automatically
+-                              enables eagerfpu restore for xsaveopt.
+ 
+       module.async_probe [KNL]
+                       Enable asynchronous probe on this module.
+diff --git a/Makefile b/Makefile
+index 57e4ff1a8b96..00ff2dd68ff1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 161
++SUBLEVEL = 162
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index b9f7306412e5..9d64eacdd2aa 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -18,20 +18,6 @@ cflags-y    += -fno-common -pipe -fno-builtin 
-mmedium-calls -D__linux__
+ cflags-$(CONFIG_ISA_ARCOMPACT)        += -mA7
+ cflags-$(CONFIG_ISA_ARCV2)    += -mcpu=archs
+ 
+-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || 
echo 0)
+-
+-ifdef CONFIG_ISA_ARCOMPACT
+-ifeq ($(is_700), 0)
+-    $(error Toolchain not configured for ARCompact builds)
+-endif
+-endif
+-
+-ifdef CONFIG_ISA_ARCV2
+-ifeq ($(is_700), 1)
+-    $(error Toolchain not configured for ARCv2 builds)
+-endif
+-endif
+-
+ ifdef CONFIG_ARC_CURR_IN_REG
+ # For a global register defintion, make sure it gets passed to every file
+ # We had a customer reported bug where some code built in kernel was NOT using
+diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi 
b/arch/arm/boot/dts/sama5d3_emac.dtsi
+index 7cb235ef0fb6..6e9e1c2f9def 100644
+--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
++++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
+@@ -41,7 +41,7 @@
+                       };
+ 
+                       macb1: ethernet@f802c000 {
+-                              compatible = "cdns,at91sam9260-macb", 
"cdns,macb";
++                              compatible = "atmel,sama5d3-macb", 
"cdns,at91sam9260-macb", "cdns,macb";
+                               reg = <0xf802c000 0x100>;
+                               interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
+                               pinctrl-names = "default";
+diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
+index b7019b559ddb..2d2860711e07 100644
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -199,13 +199,27 @@ dont_backup_fp:
+       std     r1, PACATMSCRATCH(r13)
+       ld      r1, PACAR1(r13)
+ 
+-      /* Store the PPR in r11 and reset to decent value */
+       std     r11, GPR11(r1)                  /* Temporary stash */
+ 
++      /*
++       * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
++       * clobbered by an exception once we turn on MSR_RI below.
++       */
++      ld      r11, PACATMSCRATCH(r13)
++      std     r11, GPR1(r1)
++
++      /*
++       * Store r13 away so we can free up the scratch SPR for the SLB fault
++       * handler (needed once we start accessing the thread_struct).
++       */
++      GET_SCRATCH0(r11)
++      std     r11, GPR13(r1)
++
+       /* Reset MSR RI so we can take SLB faults again */
+       li      r11, MSR_RI
+       mtmsrd  r11, 1
+ 
++      /* Store the PPR in r11 and reset to decent value */
+       mfspr   r11, SPRN_PPR
+       HMT_MEDIUM
+ 
+@@ -230,11 +244,11 @@ dont_backup_fp:
+       SAVE_GPR(8, r7)                         /* user r8 */
+       SAVE_GPR(9, r7)                         /* user r9 */
+       SAVE_GPR(10, r7)                        /* user r10 */
+-      ld      r3, PACATMSCRATCH(r13)          /* user r1 */
++      ld      r3, GPR1(r1)                    /* user r1 */
+       ld      r4, GPR7(r1)                    /* user r7 */
+       ld      r5, GPR11(r1)                   /* user r11 */
+       ld      r6, GPR12(r1)                   /* user r12 */
+-      GET_SCRATCH0(8)                         /* user r13 */
++      ld      r8, GPR13(r1)                   /* user r13 */
+       std     r3, GPR1(r7)
+       std     r4, GPR7(r7)
+       std     r5, GPR11(r7)
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c 
b/arch/x86/crypto/crc32c-intel_glue.c
+index 715399b14ed7..c194d5717ae5 100644
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -48,21 +48,13 @@
+ #ifdef CONFIG_X86_64
+ /*
+  * use carryless multiply version of crc32c when buffer
+- * size is >= 512 (when eager fpu is enabled) or
+- * >= 1024 (when eager fpu is disabled) to account
++ * size is >= 512 to account
+  * for fpu state save/restore overhead.
+  */
+-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
+-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU       1024
++#define CRC32C_PCL_BREAKEVEN  512
+ 
+ asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
+                               unsigned int crc_init);
+-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
+-#define set_pcl_breakeven_point()                                     \
+-do {                                                                  \
+-      if (!use_eager_fpu())                                           \
+-              crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
+-} while (0)
+ #endif /* CONFIG_X86_64 */
+ 
+ static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t 
length)
+@@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc 
*desc, const u8 *data,
+        * use faster PCL version if datasize is large enough to
+        * overcome kernel fpu state save/restore overhead
+        */
+-      if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
++      if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+               kernel_fpu_begin();
+               *crcp = crc_pcl(data, len, *crcp);
+               kernel_fpu_end();
+@@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc 
*desc, const u8 *data,
+ static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int 
len,
+                               u8 *out)
+ {
+-      if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
++      if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+               kernel_fpu_begin();
+               *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
+               kernel_fpu_end();
+@@ -256,7 +248,6 @@ static int __init crc32c_intel_mod_init(void)
+               alg.update = crc32c_pcl_intel_update;
+               alg.finup = crc32c_pcl_intel_finup;
+               alg.digest = crc32c_pcl_intel_digest;
+-              set_pcl_breakeven_point();
+       }
+ #endif
+       return crypto_register_shash(&alg);
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index dd2269dcbc47..a5fa3195a230 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -104,7 +104,6 @@
+ #define X86_FEATURE_EXTD_APICID       ( 3*32+26) /* has extended APICID (8 
bits) */
+ #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF        ( 3*32+28) /* APERFMPERF */
+-/* free, was #define X86_FEATURE_EAGER_FPU    ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
+index ec2aedb6f92a..16825dda18dc 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -57,11 +57,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
+ /*
+  * FPU related CPU feature flag helper routines:
+  */
+-static __always_inline __pure bool use_eager_fpu(void)
+-{
+-      return true;
+-}
+-
+ static __always_inline __pure bool use_xsaveopt(void)
+ {
+       return static_cpu_has(X86_FEATURE_XSAVEOPT);
+@@ -498,24 +493,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, 
unsigned int cpu)
+ }
+ 
+ 
+-/*
+- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
+- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
+- */
+-
+-static inline void __fpregs_activate_hw(void)
+-{
+-      if (!use_eager_fpu())
+-              clts();
+-}
+-
+-static inline void __fpregs_deactivate_hw(void)
+-{
+-      if (!use_eager_fpu())
+-              stts();
+-}
+-
+-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
+ static inline void __fpregs_deactivate(struct fpu *fpu)
+ {
+       WARN_ON_FPU(!fpu->fpregs_active);
+@@ -524,7 +501,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
+       this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+ }
+ 
+-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
+ static inline void __fpregs_activate(struct fpu *fpu)
+ {
+       WARN_ON_FPU(fpu->fpregs_active);
+@@ -549,22 +525,17 @@ static inline int fpregs_active(void)
+ }
+ 
+ /*
+- * Encapsulate the CR0.TS handling together with the
+- * software flag.
+- *
+  * These generally need preemption protection to work,
+  * do try to avoid using these on their own.
+  */
+ static inline void fpregs_activate(struct fpu *fpu)
+ {
+-      __fpregs_activate_hw();
+       __fpregs_activate(fpu);
+ }
+ 
+ static inline void fpregs_deactivate(struct fpu *fpu)
+ {
+       __fpregs_deactivate(fpu);
+-      __fpregs_deactivate_hw();
+ }
+ 
+ /*
+@@ -591,8 +562,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
+        * or if the past 5 consecutive context-switches used math.
+        */
+       fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+-                    new_fpu->fpstate_active &&
+-                    (use_eager_fpu() || new_fpu->counter > 5);
++                    new_fpu->fpstate_active;
+ 
+       if (old_fpu->fpregs_active) {
+               if (!copy_fpregs_to_fpstate(old_fpu))
+@@ -605,17 +575,12 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
+ 
+               /* Don't change CR0.TS if we just switch! */
+               if (fpu.preload) {
+-                      new_fpu->counter++;
+                       __fpregs_activate(new_fpu);
+                       prefetch(&new_fpu->state);
+-              } else {
+-                      __fpregs_deactivate_hw();
+               }
+       } else {
+-              old_fpu->counter = 0;
+               old_fpu->last_cpu = -1;
+               if (fpu.preload) {
+-                      new_fpu->counter++;
+                       if (fpu_want_lazy_restore(new_fpu, cpu))
+                               fpu.preload = 0;
+                       else
+diff --git a/arch/x86/include/asm/fpu/types.h 
b/arch/x86/include/asm/fpu/types.h
+index 1c6f6ac52ad0..0d81c7d6fe96 100644
+--- a/arch/x86/include/asm/fpu/types.h
++++ b/arch/x86/include/asm/fpu/types.h
+@@ -302,17 +302,6 @@ struct fpu {
+        */
+       unsigned char                   fpregs_active;
+ 
+-      /*
+-       * @counter:
+-       *
+-       * This counter contains the number of consecutive context switches
+-       * during which the FPU stays used. If this is over a threshold, the
+-       * lazy FPU restore logic becomes eager, to save the trap overhead.
+-       * This is an unsigned char so that after 256 iterations the counter
+-       * wraps and the context switch behavior turns lazy again; this is to
+-       * deal with bursty apps that only use the FPU for a short time:
+-       */
+-      unsigned char                   counter;
+       /*
+        * @state:
+        *
+@@ -321,29 +310,6 @@ struct fpu {
+        * the registers in the FPU are more recent than this state
+        * copy. If the task context-switches away then they get
+        * saved here and represent the FPU state.
+-       *
+-       * After context switches there may be a (short) time period
+-       * during which the in-FPU hardware registers are unchanged
+-       * and still perfectly match this state, if the tasks
+-       * scheduled afterwards are not using the FPU.
+-       *
+-       * This is the 'lazy restore' window of optimization, which
+-       * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
+-       *
+-       * We detect whether a subsequent task uses the FPU via setting
+-       * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
+-       *
+-       * During this window, if the task gets scheduled again, we
+-       * might be able to skip having to do a restore from this
+-       * memory buffer to the hardware registers - at the cost of
+-       * incurring the overhead of #NM fault traps.
+-       *
+-       * Note that on modern CPUs that support the XSAVEOPT (or other
+-       * optimized XSAVE instructions), we don't use #NM traps anymore,
+-       * as the hardware can track whether FPU registers need saving
+-       * or not. On such CPUs we activate the non-lazy ('eagerfpu')
+-       * logic, which unconditionally saves/restores all FPU state
+-       * across context switches. (if FPU state exists.)
+        */
+       union fpregs_state              state;
+       /*
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 74fda1a453bd..3a37cdbdfbaa 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -439,7 +439,6 @@ struct kvm_vcpu_arch {
+       struct kvm_mmu_memory_cache mmu_page_header_cache;
+ 
+       struct fpu guest_fpu;
+-      bool eager_fpu;
+       u64 xcr0;
+       u64 guest_supported_xcr0;
+       u32 guest_xstate_size;
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 6aa0b519c851..b322325424bc 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -53,27 +53,9 @@ static bool kernel_fpu_disabled(void)
+       return this_cpu_read(in_kernel_fpu);
+ }
+ 
+-/*
+- * Were we in an interrupt that interrupted kernel mode?
+- *
+- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
+- * pair does nothing at all: the thread must not have fpu (so
+- * that we don't try to save the FPU state), and TS must
+- * be set (so that the clts/stts pair does nothing that is
+- * visible in the interrupted kernel thread).
+- *
+- * Except for the eagerfpu case when we return true; in the likely case
+- * the thread has FPU but we are not going to set/clear TS.
+- */
+ static bool interrupted_kernel_fpu_idle(void)
+ {
+-      if (kernel_fpu_disabled())
+-              return false;
+-
+-      if (use_eager_fpu())
+-              return true;
+-
+-      return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
++      return !kernel_fpu_disabled();
+ }
+ 
+ /*
+@@ -121,7 +103,6 @@ void __kernel_fpu_begin(void)
+               copy_fpregs_to_fpstate(fpu);
+       } else {
+               this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+-              __fpregs_activate_hw();
+       }
+ }
+ EXPORT_SYMBOL(__kernel_fpu_begin);
+@@ -132,8 +113,6 @@ void __kernel_fpu_end(void)
+ 
+       if (fpu->fpregs_active)
+               copy_kernel_to_fpregs(&fpu->state);
+-      else
+-              __fpregs_deactivate_hw();
+ 
+       kernel_fpu_enable();
+ }
+@@ -194,10 +173,7 @@ void fpu__save(struct fpu *fpu)
+       preempt_disable();
+       if (fpu->fpregs_active) {
+               if (!copy_fpregs_to_fpstate(fpu)) {
+-                      if (use_eager_fpu())
+-                              copy_kernel_to_fpregs(&fpu->state);
+-                      else
+-                              fpregs_deactivate(fpu);
++                      copy_kernel_to_fpregs(&fpu->state);
+               }
+       }
+       preempt_enable();
+@@ -245,8 +221,7 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu 
*src_fpu)
+        * Don't let 'init optimized' areas of the XSAVE area
+        * leak into the child task:
+        */
+-      if (use_eager_fpu())
+-              memset(&dst_fpu->state.xsave, 0, xstate_size);
++      memset(&dst_fpu->state.xsave, 0, xstate_size);
+ 
+       /*
+        * Save current FPU registers directly into the child
+@@ -268,17 +243,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu 
*src_fpu)
+       if (!copy_fpregs_to_fpstate(dst_fpu)) {
+               memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
+ 
+-              if (use_eager_fpu())
+-                      copy_kernel_to_fpregs(&src_fpu->state);
+-              else
+-                      fpregs_deactivate(src_fpu);
++              copy_kernel_to_fpregs(&src_fpu->state);
+       }
+       preempt_enable();
+ }
+ 
+ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+ {
+-      dst_fpu->counter = 0;
+       dst_fpu->fpregs_active = 0;
+       dst_fpu->last_cpu = -1;
+ 
+@@ -381,7 +352,6 @@ void fpu__restore(struct fpu *fpu)
+       kernel_fpu_disable();
+       fpregs_activate(fpu);
+       copy_kernel_to_fpregs(&fpu->state);
+-      fpu->counter++;
+       kernel_fpu_enable();
+ }
+ EXPORT_SYMBOL_GPL(fpu__restore);
+@@ -398,7 +368,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
+ void fpu__drop(struct fpu *fpu)
+ {
+       preempt_disable();
+-      fpu->counter = 0;
+ 
+       if (fpu->fpregs_active) {
+               /* Ignore delayed exceptions from user space */
+@@ -437,7 +406,7 @@ void fpu__clear(struct fpu *fpu)
+ {
+       WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an 
anomaly */
+ 
+-      if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
++      if (!static_cpu_has(X86_FEATURE_FPU)) {
+               /* FPU state will be reallocated lazily at the first use. */
+               fpu__drop(fpu);
+       } else {
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 3de077116218..9be3e79eb629 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -319,11 +319,9 @@ static int __fpu__restore_sig(void __user *buf, void 
__user *buf_fx, int size)
+               }
+ 
+               fpu->fpstate_active = 1;
+-              if (use_eager_fpu()) {
+-                      preempt_disable();
+-                      fpu__restore(fpu);
+-                      preempt_enable();
+-              }
++              preempt_disable();
++              fpu__restore(fpu);
++              preempt_enable();
+ 
+               return err;
+       } else {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 83d6369c45f5..338d13d4fd2f 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -16,7 +16,6 @@
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+-#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
+ #include <asm/user.h>
+ #include <asm/fpu/xstate.h>
+ #include "cpuid.h"
+@@ -104,9 +103,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
+       if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
+               best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+ 
+-      vcpu->arch.eager_fpu = use_eager_fpu();
+-      if (vcpu->arch.eager_fpu)
+-              kvm_x86_ops->fpu_activate(vcpu);
++      kvm_x86_ops->fpu_activate(vcpu);
+ 
+       /*
+        * The existing code assumes virtual address is 48-bit in the canonical
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 53d43d22a84b..e6ab034f0bc7 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7319,16 +7319,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+       copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+       __kernel_fpu_end();
+       ++vcpu->stat.fpu_reload;
+-      /*
+-       * If using eager FPU mode, or if the guest is a frequent user
+-       * of the FPU, just leave the FPU active for next time.
+-       * Every 255 times fpu_counter rolls over to 0; a guest that uses
+-       * the FPU in bursts will revert to loading it on demand.
+-       */
+-      if (!vcpu->arch.eager_fpu) {
+-              if (++vcpu->fpu_counter < 5)
+-                      kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+-      }
+       trace_kvm_fpu(0);
+ }
+ 
+diff --git a/drivers/clocksource/timer-ti-32k.c 
b/drivers/clocksource/timer-ti-32k.c
+index 8518d9dfba5c..73c990867c01 100644
+--- a/drivers/clocksource/timer-ti-32k.c
++++ b/drivers/clocksource/timer-ti-32k.c
+@@ -98,6 +98,9 @@ static void __init ti_32k_timer_init(struct device_node *np)
+               return;
+       }
+ 
++      if (!of_machine_is_compatible("ti,am43"))
++              ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
++
+       ti_32k_timer.counter = ti_32k_timer.base;
+ 
+       /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index b233cf8436b0..2e1e84c98034 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -504,7 +504,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void 
*mqd,
+ 
+       while (true) {
+               temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+-              if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
++              if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+                       break;
+               if (timeout == 0)
+                       return -ETIME;
+diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
+index 12dcbd8226f2..2cce48d9e903 100644
+--- a/drivers/hv/hv_fcopy.c
++++ b/drivers/hv/hv_fcopy.c
+@@ -256,7 +256,6 @@ void hv_fcopy_onchannelcallback(void *context)
+                */
+ 
+               fcopy_transaction.recv_len = recvlen;
+-              fcopy_transaction.recv_channel = channel;
+               fcopy_transaction.recv_req_id = requestid;
+               fcopy_transaction.fcopy_msg = fcopy_msg;
+ 
+@@ -323,6 +322,7 @@ static void fcopy_on_reset(void)
+ int hv_fcopy_init(struct hv_util_service *srv)
+ {
+       recv_buffer = srv->recv_buffer;
++      fcopy_transaction.recv_channel = srv->channel;
+ 
+       init_completion(&release_event);
+       /*
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index ce4d3a935491..1771a968c3f2 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -78,9 +78,11 @@ static void kvp_send_key(struct work_struct *dummy);
+ 
+ static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
+ static void kvp_timeout_func(struct work_struct *dummy);
++static void kvp_host_handshake_func(struct work_struct *dummy);
+ static void kvp_register(int);
+ 
+ static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
++static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
+ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
+ 
+ static const char kvp_devname[] = "vmbus/hv_kvp";
+@@ -131,6 +133,11 @@ static void kvp_timeout_func(struct work_struct *dummy)
+       hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ }
+ 
++static void kvp_host_handshake_func(struct work_struct *dummy)
++{
++      hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
++}
++
+ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+ {
+       switch (msg->kvp_hdr.operation) {
+@@ -155,7 +162,13 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
+       pr_debug("KVP: userspace daemon ver. %d registered\n",
+                KVP_OP_REGISTER);
+       kvp_register(dm_reg_value);
+-      kvp_transaction.state = HVUTIL_READY;
++
++      /*
++       * If we're still negotiating with the host cancel the timeout
++       * work to not poll the channel twice.
++       */
++      cancel_delayed_work_sync(&kvp_host_handshake_work);
++      hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
+ 
+       return 0;
+ }
+@@ -595,10 +608,26 @@ void hv_kvp_onchannelcallback(void *context)
+       struct icmsg_negotiate *negop = NULL;
+       int util_fw_version;
+       int kvp_srv_version;
++      static enum {NEGO_NOT_STARTED,
++                   NEGO_IN_PROGRESS,
++                   NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
+ 
++      if (kvp_transaction.state < HVUTIL_READY) {
++              /*
++               * If userspace daemon is not connected and host is asking
++               * us to negotiate we need to delay to not lose messages.
++               * This is important for Failover IP setting.
++               */
++              if (host_negotiatied == NEGO_NOT_STARTED) {
++                      host_negotiatied = NEGO_IN_PROGRESS;
++                      schedule_delayed_work(&kvp_host_handshake_work,
++                                    HV_UTIL_NEGO_TIMEOUT * HZ);
++              }
++              return;
++      }
+       if (kvp_transaction.state > HVUTIL_READY)
+               return;
+-
++recheck:
+       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+                        &requestid);
+ 
+@@ -640,7 +669,6 @@ void hv_kvp_onchannelcallback(void *context)
+                        */
+ 
+                       kvp_transaction.recv_len = recvlen;
+-                      kvp_transaction.recv_channel = channel;
+                       kvp_transaction.recv_req_id = requestid;
+                       kvp_transaction.kvp_msg = kvp_msg;
+ 
+@@ -674,6 +702,10 @@ void hv_kvp_onchannelcallback(void *context)
+               vmbus_sendpacket(channel, recv_buffer,
+                                      recvlen, requestid,
+                                      VM_PKT_DATA_INBAND, 0);
++
++              host_negotiatied = NEGO_FINISHED;
++
++              goto recheck;
+       }
+ 
+ }
+@@ -690,6 +722,7 @@ int
+ hv_kvp_init(struct hv_util_service *srv)
+ {
+       recv_buffer = srv->recv_buffer;
++      kvp_transaction.recv_channel = srv->channel;
+ 
+       init_completion(&release_event);
+       /*
+@@ -711,6 +744,7 @@ hv_kvp_init(struct hv_util_service *srv)
+ void hv_kvp_deinit(void)
+ {
+       kvp_transaction.state = HVUTIL_DEVICE_DYING;
++      cancel_delayed_work_sync(&kvp_host_handshake_work);
+       cancel_delayed_work_sync(&kvp_timeout_work);
+       cancel_work_sync(&kvp_sendkey_work);
+       hvutil_transport_destroy(hvt);
+diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
+index faad79ae318a..b0feddb17170 100644
+--- a/drivers/hv/hv_snapshot.c
++++ b/drivers/hv/hv_snapshot.c
+@@ -114,7 +114,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
+       default:
+               return -EINVAL;
+       }
+-      vss_transaction.state = HVUTIL_READY;
++      hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
+       pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
+       return 0;
+ }
+@@ -264,7 +264,6 @@ void hv_vss_onchannelcallback(void *context)
+                        */
+ 
+                       vss_transaction.recv_len = recvlen;
+-                      vss_transaction.recv_channel = channel;
+                       vss_transaction.recv_req_id = requestid;
+                       vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
+ 
+@@ -340,6 +339,7 @@ hv_vss_init(struct hv_util_service *srv)
+               return -ENOTSUPP;
+       }
+       recv_buffer = srv->recv_buffer;
++      vss_transaction.recv_channel = srv->channel;
+ 
+       /*
+        * When this driver loads, the user level daemon that
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index 41f5896224bd..9dc63725363d 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -326,6 +326,7 @@ static int util_probe(struct hv_device *dev,
+       srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+       if (!srv->recv_buffer)
+               return -ENOMEM;
++      srv->channel = dev->channel;
+       if (srv->util_init) {
+               ret = srv->util_init(srv);
+               if (ret) {
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 75e383e6d03d..15e06493c53a 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -35,6 +35,11 @@
+  */
+ #define HV_UTIL_TIMEOUT 30
+ 
++/*
++ * Timeout for guest-host handshake for services.
++ */
++#define HV_UTIL_NEGO_TIMEOUT 60
++
+ /*
+  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
+  * is set by CPUID(HVCPUID_VERSION_FEATURES).
+diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
+index 7aa7b9cb6203..efefcfa24a4c 100644
+--- a/drivers/i2c/busses/i2c-scmi.c
++++ b/drivers/i2c/busses/i2c-scmi.c
+@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, 
unsigned short flags,
+                       mt_params[3].type = ACPI_TYPE_INTEGER;
+                       mt_params[3].integer.value = len;
+                       mt_params[4].type = ACPI_TYPE_BUFFER;
++                      mt_params[4].buffer.length = len;
+                       mt_params[4].buffer.pointer = data->block + 1;
+               }
+               break;
+diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
+index f1235831283d..fdeda0b0fbd6 100644
+--- a/drivers/input/keyboard/atakbd.c
++++ b/drivers/input/keyboard/atakbd.c
+@@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
+  */
+ 
+ 
+-static unsigned char atakbd_keycode[0x72] = { /* American layout */
+-      [0]      = KEY_GRAVE,
++static unsigned char atakbd_keycode[0x73] = { /* American layout */
+       [1]      = KEY_ESC,
+       [2]      = KEY_1,
+       [3]      = KEY_2,
+@@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = {      /* 
American layout */
+       [38]     = KEY_L,
+       [39]     = KEY_SEMICOLON,
+       [40]     = KEY_APOSTROPHE,
+-      [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
++      [41]     = KEY_GRAVE,
+       [42]     = KEY_LEFTSHIFT,
+-      [43]     = KEY_GRAVE,           /* FIXME: '~' */
++      [43]     = KEY_BACKSLASH,
+       [44]     = KEY_Z,
+       [45]     = KEY_X,
+       [46]     = KEY_C,
+@@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = {    /* 
American layout */
+       [66]     = KEY_F8,
+       [67]     = KEY_F9,
+       [68]     = KEY_F10,
+-      [69]     = KEY_ESC,
+-      [70]     = KEY_DELETE,
+-      [71]     = KEY_KP7,
+-      [72]     = KEY_KP8,
+-      [73]     = KEY_KP9,
++      [71]     = KEY_HOME,
++      [72]     = KEY_UP,
+       [74]     = KEY_KPMINUS,
+-      [75]     = KEY_KP4,
+-      [76]     = KEY_KP5,
+-      [77]     = KEY_KP6,
++      [75]     = KEY_LEFT,
++      [77]     = KEY_RIGHT,
+       [78]     = KEY_KPPLUS,
+-      [79]     = KEY_KP1,
+-      [80]     = KEY_KP2,
+-      [81]     = KEY_KP3,
+-      [82]     = KEY_KP0,
+-      [83]     = KEY_KPDOT,
+-      [90]     = KEY_KPLEFTPAREN,
+-      [91]     = KEY_KPRIGHTPAREN,
+-      [92]     = KEY_KPASTERISK,      /* FIXME */
+-      [93]     = KEY_KPASTERISK,
+-      [94]     = KEY_KPPLUS,
+-      [95]     = KEY_HELP,
++      [80]     = KEY_DOWN,
++      [82]     = KEY_INSERT,
++      [83]     = KEY_DELETE,
+       [96]     = KEY_102ND,
+-      [97]     = KEY_KPASTERISK,      /* FIXME */
+-      [98]     = KEY_KPSLASH,
++      [97]     = KEY_UNDO,
++      [98]     = KEY_HELP,
+       [99]     = KEY_KPLEFTPAREN,
+       [100]    = KEY_KPRIGHTPAREN,
+       [101]    = KEY_KPSLASH,
+       [102]    = KEY_KPASTERISK,
+-      [103]    = KEY_UP,
+-      [104]    = KEY_KPASTERISK,      /* FIXME */
+-      [105]    = KEY_LEFT,
+-      [106]    = KEY_RIGHT,
+-      [107]    = KEY_KPASTERISK,      /* FIXME */
+-      [108]    = KEY_DOWN,
+-      [109]    = KEY_KPASTERISK,      /* FIXME */
+-      [110]    = KEY_KPASTERISK,      /* FIXME */
+-      [111]    = KEY_KPASTERISK,      /* FIXME */
+-      [112]    = KEY_KPASTERISK,      /* FIXME */
+-      [113]    = KEY_KPASTERISK       /* FIXME */
++      [103]    = KEY_KP7,
++      [104]    = KEY_KP8,
++      [105]    = KEY_KP9,
++      [106]    = KEY_KP4,
++      [107]    = KEY_KP5,
++      [108]    = KEY_KP6,
++      [109]    = KEY_KP1,
++      [110]    = KEY_KP2,
++      [111]    = KEY_KP3,
++      [112]    = KEY_KP0,
++      [113]    = KEY_KPDOT,
++      [114]    = KEY_KPENTER,
+ };
+ 
+ static struct input_dev *atakbd_dev;
+@@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
+ static void atakbd_interrupt(unsigned char scancode, char down)
+ {
+ 
+-      if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
++      if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
+ 
+               // report raw events here?
+ 
+               scancode = atakbd_keycode[scancode];
+ 
+-              if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch 
key on Amiga */
+-                      input_report_key(atakbd_dev, scancode, 1);
+-                      input_report_key(atakbd_dev, scancode, 0);
+-                      input_sync(atakbd_dev);
+-              } else {
+-                      input_report_key(atakbd_dev, scancode, down);
+-                      input_sync(atakbd_dev);
+-              }
+-      } else                          /* scancodes >= 0xf2 are mouse data, 
most likely */
++              input_report_key(atakbd_dev, scancode, down);
++              input_sync(atakbd_dev);
++      } else                          /* scancodes >= 0xf3 are mouse data, 
most likely */
+               printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
+ 
+       return;
+diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c 
b/drivers/media/usb/dvb-usb-v2/af9035.c
+index 6e02a15d39ce..abddb621d9e6 100644
+--- a/drivers/media/usb/dvb-usb-v2/af9035.c
++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
+@@ -389,8 +389,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter 
*adap,
+                           msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
+                               reg |= 0x100000;
+ 
+-                      ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
+-                                      msg[0].len - 3);
++                      ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
++                                                               &msg[0].buf[3],
++                                                               msg[0].len - 3)
++                                              : -EOPNOTSUPP;
+               } else {
+                       /* I2C write */
+                       u8 buf[MAX_XFER_SIZE];
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 1d924d1533c0..b9dc2fcd8f26 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
+ }
+ 
+ static const struct of_device_id usbhs_child_match_table[] = {
+-      { .compatible = "ti,omap-ehci", },
+-      { .compatible = "ti,omap-ohci", },
++      { .compatible = "ti,ehci-omap", },
++      { .compatible = "ti,ohci-omap3", },
+       { }
+ };
+ 
+@@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
+               .pm             = &usbhsomap_dev_pm_ops,
+               .of_match_table = usbhs_omap_dt_ids,
+       },
++      .probe          = usbhs_omap_probe,
+       .remove         = usbhs_omap_remove,
+ };
+ 
+@@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
+ 
+-static int __init omap_usbhs_drvinit(void)
++static int omap_usbhs_drvinit(void)
+ {
+-      return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
++      return platform_driver_register(&usbhs_omap_driver);
+ }
+ 
+ /*
+@@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
+  */
+ fs_initcall_sync(omap_usbhs_drvinit);
+ 
+-static void __exit omap_usbhs_drvexit(void)
++static void omap_usbhs_drvexit(void)
+ {
+       platform_driver_unregister(&usbhs_omap_driver);
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 339118f3c718..78da1b7b4d86 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -216,6 +216,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct 
net_device *bond_dev,
+ static void bond_slave_arr_handler(struct work_struct *work);
+ static bool bond_time_in_interval(struct bonding *bond, unsigned long 
last_act,
+                                 int mod);
++static void bond_netdev_notify_work(struct work_struct *work);
+ 
+ /*---------------------------- General routines 
-----------------------------*/
+ 
+@@ -1237,6 +1238,8 @@ static struct slave *bond_alloc_slave(struct bonding 
*bond)
+                       return NULL;
+               }
+       }
++      INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
++
+       return slave;
+ }
+ 
+@@ -1244,6 +1247,7 @@ static void bond_free_slave(struct slave *slave)
+ {
+       struct bonding *bond = bond_get_bond_by_slave(slave);
+ 
++      cancel_delayed_work_sync(&slave->notify_work);
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               kfree(SLAVE_AD_INFO(slave));
+ 
+@@ -1265,39 +1269,26 @@ static void bond_fill_ifslave(struct slave *slave, 
struct ifslave *info)
+       info->link_failure_count = slave->link_failure_count;
+ }
+ 
+-static void bond_netdev_notify(struct net_device *dev,
+-                             struct netdev_bonding_info *info)
+-{
+-      rtnl_lock();
+-      netdev_bonding_info_change(dev, info);
+-      rtnl_unlock();
+-}
+-
+ static void bond_netdev_notify_work(struct work_struct *_work)
+ {
+-      struct netdev_notify_work *w =
+-              container_of(_work, struct netdev_notify_work, work.work);
++      struct slave *slave = container_of(_work, struct slave,
++                                         notify_work.work);
++
++      if (rtnl_trylock()) {
++              struct netdev_bonding_info binfo;
+ 
+-      bond_netdev_notify(w->dev, &w->bonding_info);
+-      dev_put(w->dev);
+-      kfree(w);
++              bond_fill_ifslave(slave, &binfo.slave);
++              bond_fill_ifbond(slave->bond, &binfo.master);
++              netdev_bonding_info_change(slave->dev, &binfo);
++              rtnl_unlock();
++      } else {
++              queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
++      }
+ }
+ 
+ void bond_queue_slave_event(struct slave *slave)
+ {
+-      struct bonding *bond = slave->bond;
+-      struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
+-
+-      if (!nnw)
+-              return;
+-
+-      dev_hold(slave->dev);
+-      nnw->dev = slave->dev;
+-      bond_fill_ifslave(slave, &nnw->bonding_info.slave);
+-      bond_fill_ifbond(bond, &nnw->bonding_info.master);
+-      INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
+-
+-      queue_delayed_work(slave->bond->wq, &nnw->work, 0);
++      queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
+ }
+ 
+ /* enslave device <slave> to bond device <master> */
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c 
b/drivers/net/ethernet/broadcom/bcmsysport.c
+index af9ec57bbebf..7a6dd5e5e498 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -850,14 +850,22 @@ static void bcm_sysport_resume_from_wol(struct 
bcm_sysport_priv *priv)
+ {
+       u32 reg;
+ 
+-      /* Stop monitoring MPD interrupt */
+-      intrl2_0_mask_set(priv, INTRL2_0_MPD);
+-
+       /* Clear the MagicPacket detection logic */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+ 
++      reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
++      if (reg & INTRL2_0_MPD)
++              netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
++
++      if (reg & INTRL2_0_BRCM_MATCH_TAG) {
++              reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
++                                RXCHK_BRCM_TAG_MATCH_MASK;
++              netdev_info(priv->netdev,
++                          "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
++      }
++
+       netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+ }
+ 
+@@ -890,11 +898,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void 
*dev_id)
+       if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+               bcm_sysport_tx_reclaim_all(priv);
+ 
+-      if (priv->irq0_stat & INTRL2_0_MPD) {
+-              netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+-              bcm_sysport_resume_from_wol(priv);
+-      }
+-
+       return IRQ_HANDLED;
+ }
+ 
+@@ -1915,9 +1918,6 @@ static int bcm_sysport_suspend_to_wol(struct 
bcm_sysport_priv *priv)
+       /* UniMAC receive needs to be turned on */
+       umac_enable_set(priv, CMD_RX_EN, 1);
+ 
+-      /* Enable the interrupt wake-up source */
+-      intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+-
+       netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+ 
+       return 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 4ffacafddacb..fea8116da06a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1343,8 +1343,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct 
bnxt_napi *bnapi, int budget)
+               if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+                       tx_pkts++;
+                       /* return full budget so NAPI will complete. */
+-                      if (unlikely(tx_pkts > bp->tx_wake_thresh))
++                      if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+                               rx_pkts = budget;
++                              raw_cons = NEXT_RAW_CMP(raw_cons);
++                              break;
++                      }
+               } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+                       if (likely(rc >= 0))
+@@ -1362,7 +1365,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct 
bnxt_napi *bnapi, int budget)
+               }
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+ 
+-              if (rx_pkts == budget)
++              if (rx_pkts && rx_pkts == budget)
+                       break;
+       }
+ 
+@@ -1404,8 +1407,12 @@ static int bnxt_poll(struct napi_struct *napi, int 
budget)
+       while (1) {
+               work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+ 
+-              if (work_done >= budget)
++              if (work_done >= budget) {
++                      if (!budget)
++                              BNXT_CP_DB_REARM(cpr->cp_doorbell,
++                                               cpr->cp_raw_cons);
+                       break;
++              }
+ 
+               if (!bnxt_has_work(bp, cpr)) {
+                       napi_complete(napi);
+diff --git a/drivers/net/ethernet/cadence/macb.c 
b/drivers/net/ethernet/cadence/macb.c
+index 8c698d464716..085f77f273ba 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -2743,6 +2743,13 @@ static const struct macb_config at91sam9260_config = {
+       .init = macb_init,
+ };
+ 
++static const struct macb_config sama5d3macb_config = {
++      .caps = MACB_CAPS_SG_DISABLED
++            | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
++      .clk_init = macb_clk_init,
++      .init = macb_init,
++};
++
+ static const struct macb_config pc302gem_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+@@ -2801,6 +2808,7 @@ static const struct of_device_id macb_dt_ids[] = {
+       { .compatible = "cdns,gem", .data = &pc302gem_config },
+       { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
+       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
++      { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
+       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
+       { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
+       { .compatible = "cdns,emac", .data = &emac_config },
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c 
b/drivers/net/ethernet/marvell/mvpp2.c
+index ac92685dd4e5..42305f3234ff 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -29,6 +29,7 @@
+ #include <linux/clk.h>
+ #include <linux/hrtimer.h>
+ #include <linux/ktime.h>
++#include <linux/if_vlan.h>
+ #include <uapi/linux/ppp_defs.h>
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+@@ -4268,7 +4269,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue 
*txq)
+ }
+ 
+ /* Set Tx descriptors fields relevant for CSUM calculation */
+-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
++static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
+                              int ip_hdr_len, int l4_proto)
+ {
+       u32 command;
+@@ -5032,14 +5033,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, 
struct sk_buff *skb)
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int ip_hdr_len = 0;
+               u8 l4_proto;
++              __be16 l3_proto = vlan_get_protocol(skb);
+ 
+-              if (skb->protocol == htons(ETH_P_IP)) {
++              if (l3_proto == htons(ETH_P_IP)) {
+                       struct iphdr *ip4h = ip_hdr(skb);
+ 
+                       /* Calculate IPv4 checksum and L4 checksum */
+                       ip_hdr_len = ip4h->ihl;
+                       l4_proto = ip4h->protocol;
+-              } else if (skb->protocol == htons(ETH_P_IPV6)) {
++              } else if (l3_proto == htons(ETH_P_IPV6)) {
+                       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ 
+                       /* Read l4_protocol from one of IPv6 extra headers */
+@@ -5051,7 +5053,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, 
struct sk_buff *skb)
+               }
+ 
+               return mvpp2_txq_desc_csum(skb_network_offset(skb),
+-                              skb->protocol, ip_hdr_len, l4_proto);
++                                         l3_proto, ip_hdr_len, l4_proto);
+       }
+ 
+       return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c 
b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index ff77b8b608bd..7417605c3cf6 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -228,7 +228,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv 
*priv, int vec)
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+ 
+-      if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
++      if (!cpumask_available(eq->affinity_mask) ||
++          cpumask_empty(eq->affinity_mask))
+               return;
+ 
+       hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+index 55007f1e6bbc..12cd8aef1881 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+@@ -1802,7 +1802,8 @@ struct qlcnic_hardware_ops {
+       int (*config_loopback) (struct qlcnic_adapter *, u8);
+       int (*clear_loopback) (struct qlcnic_adapter *, u8);
+       int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+-      void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
++      void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
++                               u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
+       int (*get_board_info) (struct qlcnic_adapter *);
+       void (*set_mac_filter_count) (struct qlcnic_adapter *);
+       void (*free_mac_list) (struct qlcnic_adapter *);
+@@ -2044,9 +2045,10 @@ static inline int qlcnic_nic_set_promisc(struct 
qlcnic_adapter *adapter,
+ }
+ 
+ static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+-                                      u64 *addr, u16 id)
++                                      u64 *addr, u16 vlan,
++                                      struct qlcnic_host_tx_ring *tx_ring)
+ {
+-      adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
++      adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
+ }
+ 
+ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index b4f3cb55605e..7f7aea9758e7 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -2132,7 +2132,8 @@ out:
+ }
+ 
+ void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+-                                u16 vlan_id)
++                                u16 vlan_id,
++                                struct qlcnic_host_tx_ring *tx_ring)
+ {
+       u8 mac[ETH_ALEN];
+       memcpy(&mac, addr, ETH_ALEN);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+index 331ae2c20f40..c8e012b3f7e7 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, 
ulong, u32);
+ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+ int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
++void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
++                                u16 vlan, struct qlcnic_host_tx_ring *ring);
+ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info 
*);
+ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+ void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+index 4bb33af8e2b3..56a3bd9e37dc 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+                        struct net_device *netdev);
+ void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+-                             u64 *uaddr, u16 vlan_id);
++                             u64 *uaddr, u16 vlan_id,
++                             struct qlcnic_host_tx_ring *tx_ring);
+ int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+                                    struct ethtool_coalesce *);
+ int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 
b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index d4b5085a21fa..98042a3701b5 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -269,13 +269,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter 
*adapter,
+ }
+ 
+ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+-                             u16 vlan_id)
++                             u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+ {
+       struct cmd_desc_type0 *hwdesc;
+       struct qlcnic_nic_req *req;
+       struct qlcnic_mac_req *mac_req;
+       struct qlcnic_vlan_req *vlan_req;
+-      struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+       u32 producer;
+       u64 word;
+ 
+@@ -302,7 +301,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter 
*adapter, u64 *uaddr,
+ 
+ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+                              struct cmd_desc_type0 *first_desc,
+-                             struct sk_buff *skb)
++                             struct sk_buff *skb,
++                             struct qlcnic_host_tx_ring *tx_ring)
+ {
+       struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+       struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+@@ -336,7 +336,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter 
*adapter,
+                   tmp_fil->vlan_id == vlan_id) {
+                       if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+                               qlcnic_change_filter(adapter, &src_addr,
+-                                                   vlan_id);
++                                                   vlan_id, tx_ring);
+                       tmp_fil->ftime = jiffies;
+                       return;
+               }
+@@ -351,7 +351,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter 
*adapter,
+       if (!fil)
+               return;
+ 
+-      qlcnic_change_filter(adapter, &src_addr, vlan_id);
++      qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
+       fil->ftime = jiffies;
+       fil->vlan_id = vlan_id;
+       memcpy(fil->faddr, &src_addr, ETH_ALEN);
+@@ -767,7 +767,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct 
net_device *netdev)
+       }
+ 
+       if (adapter->drv_mac_learn)
+-              qlcnic_send_filter(adapter, first_desc, skb);
++              qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
+ 
+       tx_ring->tx_stats.tx_bytes += skb->len;
+       tx_ring->tx_stats.xmit_called++;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index d02691ba3d7f..20aa34f45f07 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -71,7 +71,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
+  * Description:
+  * This function validates the number of Unicast address entries supported
+  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
+- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
++ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+  * logic. This function validates a valid, supported configuration is
+  * selected, and defaults to 1 Unicast address if an unsupported
+  * configuration is selected.
+@@ -81,8 +81,7 @@ static int dwmac1000_validate_ucast_entries(int 
ucast_entries)
+       int x = ucast_entries;
+ 
+       switch (x) {
+-      case 1:
+-      case 32:
++      case 1 ... 32:
+       case 64:
+       case 128:
+               break;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 49174837c2ba..33ffb573fd67 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1142,6 +1142,11 @@ static int team_port_add(struct team *team, struct 
net_device *port_dev)
+               return -EBUSY;
+       }
+ 
++      if (dev == port_dev) {
++              netdev_err(dev, "Cannot enslave team device to itself\n");
++              return -EINVAL;
++      }
++
+       if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+           vlan_uses_dev(dev)) {
+               netdev_err(dev, "Device %s is VLAN challenged and team device 
has VLAN set up\n",
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 478937418a33..8dbe086e0a96 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1506,6 +1506,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct 
usb_interface *intf)
+ {
+       struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+       if (pdata) {
++              cancel_work_sync(&pdata->set_multicast);
+               netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+               kfree(pdata);
+               pdata = NULL;
+diff --git a/drivers/usb/gadget/function/u_serial.c 
b/drivers/usb/gadget/function/u_serial.c
+index f7771d86ad6c..4ea44f7122ee 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -518,7 +518,7 @@ static void gs_rx_push(unsigned long _port)
+               }
+ 
+               /* push data to (open) tty */
+-              if (req->actual) {
++              if (req->actual && tty) {
+                       char            *packet = req->buf;
+                       unsigned        size = req->actual;
+                       unsigned        n;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 9df00101bfe1..f788f80fe1aa 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1048,17 +1048,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, 
u16 wValue,
+                               temp = readl(port_array[wIndex]);
+                               break;
+                       }
+-
+-                      /* Software should not attempt to set
+-                       * port link state above '3' (U3) and the port
+-                       * must be enabled.
+-                       */
+-                      if ((temp & PORT_PE) == 0 ||
+-                              (link_state > USB_SS_PORT_LS_U3)) {
+-                              xhci_warn(xhci, "Cannot set link state.\n");
++                      /* Port must be enabled */
++                      if (!(temp & PORT_PE)) {
++                              retval = -ENODEV;
++                              break;
++                      }
++                      /* Can't set port link state above '3' (U3) */
++                      if (link_state > USB_SS_PORT_LS_U3) {
++                              xhci_warn(xhci, "Cannot set port %d link state 
%d\n",
++                                       wIndex, link_state);
+                               goto error;
+                       }
+-
+                       if (link_state == USB_SS_PORT_LS_U3) {
+                               slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+                                               wIndex + 1);
+diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
+index 63c4842eb224..46e0e8b39b76 100644
+--- a/drivers/video/fbdev/aty/atyfb.h
++++ b/drivers/video/fbdev/aty/atyfb.h
+@@ -332,6 +332,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated 
*/
+ extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll 
*pll);
+ extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+ 
++extern const u8 aty_postdividers[8];
++
+ 
+     /*
+      *  Hardware cursor support
+@@ -358,7 +360,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
+ 
+ extern void aty_reset_engine(const struct atyfb_par *par);
+ extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
+-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+ 
+ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c 
b/drivers/video/fbdev/aty/atyfb_base.c
+index 7f658fa4d22a..9755a0ec6591 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -3093,17 +3093,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, 
struct fb_info *info,
+               /*
+                * PLL Reference Divider M:
+                */
+-              M = pll_regs[2];
++              M = pll_regs[PLL_REF_DIV];
+ 
+               /*
+                * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
+                */
+-              N = pll_regs[7 + (clock_cntl & 3)];
++              N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
+ 
+               /*
+                * PLL Post Divider P (Dependent on CLOCK_CNTL):
+                */
+-              P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
++              P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl 
& 3) << 1)) & 3) |
++                                   ((pll_regs[PLL_EXT_CNTL] >> (2 + 
(clock_cntl & 3))) & 4)];
+ 
+               /*
+                * PLL Divider Q:
+diff --git a/drivers/video/fbdev/aty/mach64_ct.c 
b/drivers/video/fbdev/aty/mach64_ct.c
+index 51f29d627ceb..af54256a20a1 100644
+--- a/drivers/video/fbdev/aty/mach64_ct.c
++++ b/drivers/video/fbdev/aty/mach64_ct.c
+@@ -114,7 +114,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct 
atyfb_par *par)
+  */
+ 
+ #define Maximum_DSP_PRECISION 7
+-static u8 postdividers[] = {1,2,4,8,3};
++const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
+ 
+ static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
+ {
+@@ -221,7 +221,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, 
u32 vclk_per, struct pll
+               pll->vclk_post_div += (q <  64*8);
+               pll->vclk_post_div += (q <  32*8);
+       }
+-      pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
++      pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
+       //    pll->vclk_post_div <<= 6;
+       pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
+       pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
+@@ -512,7 +512,7 @@ static int aty_init_pll_ct(const struct fb_info *info, 
union aty_pll *pll)
+               u8 mclk_fb_div, pll_ext_cntl;
+               pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
+               pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
+-              pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
++              pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 
0x07];
+               mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
+               if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
+                       mclk_fb_div <<= 1;
+@@ -534,7 +534,7 @@ static int aty_init_pll_ct(const struct fb_info *info, 
union aty_pll *pll)
+               xpost_div += (q <  64*8);
+               xpost_div += (q <  32*8);
+       }
+-      pll->ct.xclk_post_div_real = postdividers[xpost_div];
++      pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
+       pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
+ 
+ #ifdef CONFIG_PPC
+@@ -583,7 +583,7 @@ static int aty_init_pll_ct(const struct fb_info *info, 
union aty_pll *pll)
+                       mpost_div += (q <  64*8);
+                       mpost_div += (q <  32*8);
+               }
+-              sclk_post_div_real = postdividers[mpost_div];
++              sclk_post_div_real = aty_postdividers[mpost_div];
+               pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
+               pll->ct.spll_cntl2 = mpost_div << 4;
+ #ifdef DEBUG
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index d0aaf338fa9f..d6bae37489af 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -638,14 +638,20 @@ static size_t ext4_xattr_free_space(struct 
ext4_xattr_entry *last,
+ }
+ 
+ static int
+-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
++ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
++                   struct inode *inode)
+ {
+-      struct ext4_xattr_entry *last;
++      struct ext4_xattr_entry *last, *next;
+       size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
+ 
+       /* Compute min_offs and last. */
+       last = s->first;
+-      for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
++      for (; !IS_LAST_ENTRY(last); last = next) {
++              next = EXT4_XATTR_NEXT(last);
++              if ((void *)next >= s->end) {
++                      EXT4_ERROR_INODE(inode, "corrupted xattr entries");
++                      return -EFSCORRUPTED;
++              }
+               if (!last->e_value_block && last->e_value_size) {
+                       size_t offs = le16_to_cpu(last->e_value_offs);
+                       if (offs < min_offs)
+@@ -825,7 +831,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+                               ce = NULL;
+                       }
+                       ea_bdebug(bs->bh, "modifying in-place");
+-                      error = ext4_xattr_set_entry(i, s);
++                      error = ext4_xattr_set_entry(i, s, inode);
+                       if (!error) {
+                               if (!IS_LAST_ENTRY(s->first))
+                                       ext4_xattr_rehash(header(s->base),
+@@ -875,7 +881,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+               s->end = s->base + sb->s_blocksize;
+       }
+ 
+-      error = ext4_xattr_set_entry(i, s);
++      error = ext4_xattr_set_entry(i, s, inode);
+       if (error == -EFSCORRUPTED)
+               goto bad_block;
+       if (error)
+@@ -1037,7 +1043,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct 
inode *inode,
+ 
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return -ENOSPC;
+-      error = ext4_xattr_set_entry(i, s);
++      error = ext4_xattr_set_entry(i, s, inode);
+       if (error) {
+               if (error == -ENOSPC &&
+                   ext4_has_inline_data(inode)) {
+@@ -1049,7 +1055,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct 
inode *inode,
+                       error = ext4_xattr_ibody_find(inode, i, is);
+                       if (error)
+                               return error;
+-                      error = ext4_xattr_set_entry(i, s);
++                      error = ext4_xattr_set_entry(i, s, inode);
+               }
+               if (error)
+                       return error;
+@@ -1075,7 +1081,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct 
inode *inode,
+ 
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return -ENOSPC;
+-      error = ext4_xattr_set_entry(i, s);
++      error = ext4_xattr_set_entry(i, s, inode);
+       if (error)
+               return error;
+       header = IHDR(inode, ext4_raw_inode(&is->iloc));
+diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
+index 4c2c03663533..8e1427762eeb 100644
+--- a/fs/jffs2/xattr.c
++++ b/fs/jffs2/xattr.c
+@@ -1004,12 +1004,14 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char 
*buffer, size_t size)
+                       rc = xhandle->list(xhandle, dentry, buffer + len,
+                                          size - len, xd->xname,
+                                          xd->name_len);
++                      if (rc > size - len) {
++                              rc = -ERANGE;
++                              goto out;
++                      }
+               } else {
+                       rc = xhandle->list(xhandle, dentry, NULL, 0,
+                                          xd->xname, xd->name_len);
+               }
+-              if (rc < 0)
+-                      goto out;
+               len += rc;
+       }
+       rc = len;
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index ae6a711dcd1d..281bb007f725 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -1179,6 +1179,7 @@ int vmbus_allocate_mmio(struct resource **new, struct 
hv_device *device_obj,
+ 
+ struct hv_util_service {
+       u8 *recv_buffer;
++      void *channel;
+       void (*util_cb)(void *);
+       int (*util_init)(struct hv_util_service *);
+       void (*util_deinit)(void);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index fc54049e8286..0b211d482c96 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2168,6 +2168,13 @@ struct netdev_notifier_info {
+       struct net_device *dev;
+ };
+ 
++struct netdev_notifier_info_ext {
++      struct netdev_notifier_info info; /* must be first */
++      union {
++              u32 mtu;
++      } ext;
++};
++
+ struct netdev_notifier_change_info {
+       struct netdev_notifier_info info; /* must be first */
+       unsigned int flags_changed;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 93abe5f6188d..d5abd3a80896 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -146,12 +146,6 @@ struct bond_parm_tbl {
+       int mode;
+ };
+ 
+-struct netdev_notify_work {
+-      struct delayed_work     work;
+-      struct net_device       *dev;
+-      struct netdev_bonding_info bonding_info;
+-};
+-
+ struct slave {
+       struct net_device *dev; /* first - useful for panic debug */
+       struct bonding *bond; /* our master */
+@@ -177,6 +171,7 @@ struct slave {
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll *np;
+ #endif
++      struct delayed_work notify_work;
+       struct kobject kobj;
+       struct rtnl_link_stats64 slave_stats;
+ };
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 3afb7c4c7098..2a25b53cd427 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -322,6 +322,7 @@ int ip_fib_check_default(__be32 gw, struct net_device 
*dev);
+ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool 
force);
+ int fib_sync_down_addr(struct net *net, __be32 local);
+ int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
++void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
+ 
+ extern u32 fib_multipath_secret __read_mostly;
+ 
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 8895eff2d735..a2d70ef74db7 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -869,7 +869,6 @@ const char * const vmstat_text[] = {
+ #ifdef CONFIG_DEBUG_VM_VMACACHE
+       "vmacache_find_calls",
+       "vmacache_find_hits",
+-      "vmacache_full_flushes",
+ #endif
+ #endif /* CONFIG_VM_EVENTS_COUNTERS */
+ };
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 191cf880d805..7366feb8b5b3 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1660,6 +1660,28 @@ int call_netdevice_notifiers(unsigned long val, struct 
net_device *dev)
+ }
+ EXPORT_SYMBOL(call_netdevice_notifiers);
+ 
++/**
++ *    call_netdevice_notifiers_mtu - call all network notifier blocks
++ *    @val: value passed unmodified to notifier function
++ *    @dev: net_device pointer passed unmodified to notifier function
++ *    @arg: additional u32 argument passed to the notifier function
++ *
++ *    Call all network notifier blocks.  Parameters and return value
++ *    are as for raw_notifier_call_chain().
++ */
++static int call_netdevice_notifiers_mtu(unsigned long val,
++                                      struct net_device *dev, u32 arg)
++{
++      struct netdev_notifier_info_ext info = {
++              .info.dev = dev,
++              .ext.mtu = arg,
++      };
++
++      BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
++
++      return call_netdevice_notifiers_info(val, dev, &info.info);
++}
++
+ #ifdef CONFIG_NET_INGRESS
+ static struct static_key ingress_needed __read_mostly;
+ 
+@@ -6134,14 +6156,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
+       err = __dev_set_mtu(dev, new_mtu);
+ 
+       if (!err) {
+-              err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++              err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
++                                                 orig_mtu);
+               err = notifier_to_errno(err);
+               if (err) {
+                       /* setting mtu back and notifying everyone again,
+                        * so that they have a chance to revert changes.
+                        */
+                       __dev_set_mtu(dev, orig_mtu);
+-                      call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++                      call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
++                                                   new_mtu);
+               }
+       }
+       return err;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 96c9c0f0905a..f1df04c7d395 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2116,6 +2116,12 @@ struct net_device *rtnl_create_link(struct net *net,
+       else if (ops->get_num_rx_queues)
+               num_rx_queues = ops->get_num_rx_queues();
+ 
++      if (num_tx_queues < 1 || num_tx_queues > 4096)
++              return ERR_PTR(-EINVAL);
++
++      if (num_rx_queues < 1 || num_rx_queues > 4096)
++              return ERR_PTR(-EINVAL);
++
+       err = -ENOMEM;
+       dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
+                              ops->setup, num_tx_queues, num_rx_queues);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 015c33712803..ce646572b912 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1170,7 +1170,8 @@ static int fib_inetaddr_event(struct notifier_block 
*this, unsigned long event,
+ static int fib_netdev_event(struct notifier_block *this, unsigned long event, 
void *ptr)
+ {
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-      struct netdev_notifier_changeupper_info *info;
++      struct netdev_notifier_changeupper_info *upper_info = ptr;
++      struct netdev_notifier_info_ext *info_ext = ptr;
+       struct in_device *in_dev;
+       struct net *net = dev_net(dev);
+       unsigned int flags;
+@@ -1205,16 +1206,19 @@ static int fib_netdev_event(struct notifier_block 
*this, unsigned long event, vo
+                       fib_sync_up(dev, RTNH_F_LINKDOWN);
+               else
+                       fib_sync_down_dev(dev, event, false);
+-              /* fall through */
++              rt_cache_flush(net);
++              break;
+       case NETDEV_CHANGEMTU:
++              fib_sync_mtu(dev, info_ext->ext.mtu);
+               rt_cache_flush(net);
+               break;
+       case NETDEV_CHANGEUPPER:
+-              info = ptr;
++              upper_info = ptr;
+               /* flush all routes if dev is linked to or unlinked from
+                * an L3 master device (e.g., VRF)
+                */
+-              if (info->upper_dev && netif_is_l3_master(info->upper_dev))
++              if (upper_info->upper_dev &&
++                  netif_is_l3_master(upper_info->upper_dev))
+                       fib_disable_ip(dev, NETDEV_DOWN, true);
+               break;
+       }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 03ebff3950d8..3109b9bb95d2 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1373,6 +1373,56 @@ int fib_sync_down_addr(struct net *net, __be32 local)
+       return ret;
+ }
+ 
++/* Update the PMTU of exceptions when:
++ * - the new MTU of the first hop becomes smaller than the PMTU
++ * - the old MTU was the same as the PMTU, and it limited discovery of
++ *   larger MTUs on the path. With that limit raised, we can now
++ *   discover larger MTUs
++ * A special case is locked exceptions, for which the PMTU is smaller
++ * than the minimal accepted PMTU:
++ * - if the new MTU is greater than the PMTU, don't make any change
++ * - otherwise, unlock and set PMTU
++ */
++static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
++{
++      struct fnhe_hash_bucket *bucket;
++      int i;
++
++      bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
++      if (!bucket)
++              return;
++
++      for (i = 0; i < FNHE_HASH_SIZE; i++) {
++              struct fib_nh_exception *fnhe;
++
++              for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
++                   fnhe;
++                   fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
++                      if (fnhe->fnhe_mtu_locked) {
++                              if (new <= fnhe->fnhe_pmtu) {
++                                      fnhe->fnhe_pmtu = new;
++                                      fnhe->fnhe_mtu_locked = false;
++                              }
++                      } else if (new < fnhe->fnhe_pmtu ||
++                                 orig == fnhe->fnhe_pmtu) {
++                              fnhe->fnhe_pmtu = new;
++                      }
++              }
++      }
++}
++
++void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
++{
++      unsigned int hash = fib_devindex_hashfn(dev->ifindex);
++      struct hlist_head *head = &fib_info_devhash[hash];
++      struct fib_nh *nh;
++
++      hlist_for_each_entry(nh, head, nh_hash) {
++              if (nh->nh_dev == dev)
++                      nh_update_mtu(nh, dev->mtu, orig_mtu);
++      }
++}
++
+ /* Event              force Flags           Description
+  * NETDEV_CHANGE      0     LINKDOWN        Carrier OFF, not for scope host
+  * NETDEV_DOWN        0     LINKDOWN|DEAD   Link down, not for scope host
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 88426a6a7a85..3f8caf7d19b8 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -134,7 +134,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, 
struct sk_buff *skb)
+ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
+ {
+       struct sockaddr_in sin;
+-      const struct iphdr *iph = ip_hdr(skb);
+       __be16 *ports;
+       int end;
+ 
+@@ -149,7 +148,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, 
struct sk_buff *skb)
+       ports = (__be16 *)skb_transport_header(skb);
+ 
+       sin.sin_family = AF_INET;
+-      sin.sin_addr.s_addr = iph->daddr;
++      sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
+       sin.sin_port = ports[1];
+       memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
+ 
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 3d62feb65932..9d3176b080a4 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -597,6 +597,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device 
*dev,
+                   const struct iphdr *tnl_params, u8 protocol)
+ {
+       struct ip_tunnel *tunnel = netdev_priv(dev);
++      unsigned int inner_nhdr_len = 0;
+       const struct iphdr *inner_iph;
+       struct flowi4 fl4;
+       u8     tos, ttl;
+@@ -607,6 +608,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+       int err;
+       bool connected;
+ 
++      /* ensure we can access the inner net header, for several users below */
++      if (skb->protocol == htons(ETH_P_IP))
++              inner_nhdr_len = sizeof(struct iphdr);
++      else if (skb->protocol == htons(ETH_P_IPV6))
++              inner_nhdr_len = sizeof(struct ipv6hdr);
++      if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
++              goto tx_error;
++
+       inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       connected = (tunnel->parms.iph.daddr != 0);
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0613be57513e..582e757e5727 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3786,7 +3786,6 @@ static struct inet6_ifaddr *if6_get_first(struct 
seq_file *seq, loff_t pos)
+                               p++;
+                               continue;
+                       }
+-                      state->offset++;
+                       return ifa;
+               }
+ 
+@@ -3810,13 +3809,12 @@ static struct inet6_ifaddr *if6_get_next(struct 
seq_file *seq,
+               return ifa;
+       }
+ 
++      state->offset = 0;
+       while (++state->bucket < IN6_ADDR_HSIZE) {
+-              state->offset = 0;
+               hlist_for_each_entry_rcu_bh(ifa,
+                                    &inet6_addr_lst[state->bucket], addr_lst) {
+                       if (!net_eq(dev_net(ifa->idev->dev), net))
+                               continue;
+-                      state->offset++;
+                       return ifa;
+               }
+       }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index e8f21dd520b2..7c7a74ea2b0d 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1096,7 +1096,7 @@ static inline int
+ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip6_tnl *t = netdev_priv(dev);
+-      const struct iphdr  *iph = ip_hdr(skb);
++      const struct iphdr  *iph;
+       int encap_limit = -1;
+       struct flowi6 fl6;
+       __u8 dsfield;
+@@ -1104,6 +1104,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device 
*dev)
+       u8 tproto;
+       int err;
+ 
++      /* ensure we can access the full inner ip header */
++      if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++              return -1;
++
++      iph = ip_hdr(skb);
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ 
+       tproto = ACCESS_ONCE(t->parms.proto);
+@@ -1140,7 +1145,7 @@ static inline int
+ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip6_tnl *t = netdev_priv(dev);
+-      struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++      struct ipv6hdr *ipv6h;
+       int encap_limit = -1;
+       __u16 offset;
+       struct flowi6 fl6;
+@@ -1149,6 +1154,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device 
*dev)
+       u8 tproto;
+       int err;
+ 
++      if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
++              return -1;
++
++      ipv6h = ipv6_hdr(skb);
+       tproto = ACCESS_ONCE(t->parms.proto);
+       if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
+           ip6_tnl_addr_conflict(t, ipv6h))
+diff --git a/net/netlabel/netlabel_unlabeled.c 
b/net/netlabel/netlabel_unlabeled.c
+index 3f33ec44bd28..9f4ec16abfcf 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -787,7 +787,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info 
*info,
+ {
+       u32 addr_len;
+ 
+-      if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
++      if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
++          info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
+               addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
+               if (addr_len != sizeof(struct in_addr) &&
+                   addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index b5a17cb510a0..4727f5b80e76 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
+  */
+ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
+ {
++      WARN_ON_ONCE(!bus->rb.area);
++
+       spin_lock_irq(&bus->reg_lock);
+       /* CORB set up */
+       bus->corb.addr = bus->rb.addr;
+@@ -377,13 +379,15 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool 
full_reset)
+       /* reset controller */
+       azx_reset(bus, full_reset);
+ 
+-      /* initialize interrupts */
++      /* clear interrupts */
+       azx_int_clear(bus);
+-      azx_int_enable(bus);
+ 
+       /* initialize the codec command I/O */
+       snd_hdac_bus_init_cmd_io(bus);
+ 
++      /* enable interrupts after CORB/RIRB buffers are initialized above */
++      azx_int_enable(bus);
++
+       /* program the position buffer */
+       if (bus->use_posbuf && bus->posbuf.addr) {
+               snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
+diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
+index d53680ac78e4..6df158669420 100644
+--- a/sound/soc/codecs/sigmadsp.c
++++ b/sound/soc/codecs/sigmadsp.c
+@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
+       struct sigmadsp_control *ctrl, void *data)
+ {
+       /* safeload loads up to 20 bytes in a atomic operation */
+-      if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
+-          sigmadsp->ops->safeload)
++      if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
+               return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
+                       ctrl->num_bytes);
+       else
+diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
+index f27464c2c5ba..79541960f45d 100644
+--- a/sound/soc/codecs/wm8804-i2c.c
++++ b/sound/soc/codecs/wm8804-i2c.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/i2c.h>
++#include <linux/acpi.h>
+ 
+ #include "wm8804.h"
+ 
+@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
+ };
+ MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+ 
++#if defined(CONFIG_OF)
+ static const struct of_device_id wm8804_of_match[] = {
+       { .compatible = "wlf,wm8804", },
+       { }
+ };
+ MODULE_DEVICE_TABLE(of, wm8804_of_match);
++#endif
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id wm8804_acpi_match[] = {
++      { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
++      { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
++      { },
++};
++MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
++#endif
+ 
+ static struct i2c_driver wm8804_i2c_driver = {
+       .driver = {
+               .name = "wm8804",
+               .pm = &wm8804_pm,
+-              .of_match_table = wm8804_of_match,
++              .of_match_table = of_match_ptr(wm8804_of_match),
++              .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
+       },
+       .probe = wm8804_i2c_probe,
+       .remove = wm8804_i2c_remove,
+diff --git a/tools/perf/scripts/python/export-to-postgresql.py 
b/tools/perf/scripts/python/export-to-postgresql.py
+index 1b02cdc0cab6..84cb5913b05a 100644
+--- a/tools/perf/scripts/python/export-to-postgresql.py
++++ b/tools/perf/scripts/python/export-to-postgresql.py
+@@ -205,14 +205,23 @@ from ctypes import *
+ libpq = CDLL("libpq.so.5")
+ PQconnectdb = libpq.PQconnectdb
+ PQconnectdb.restype = c_void_p
++PQconnectdb.argtypes = [ c_char_p ]
+ PQfinish = libpq.PQfinish
++PQfinish.argtypes = [ c_void_p ]
+ PQstatus = libpq.PQstatus
++PQstatus.restype = c_int
++PQstatus.argtypes = [ c_void_p ]
+ PQexec = libpq.PQexec
+ PQexec.restype = c_void_p
++PQexec.argtypes = [ c_void_p, c_char_p ]
+ PQresultStatus = libpq.PQresultStatus
++PQresultStatus.restype = c_int
++PQresultStatus.argtypes = [ c_void_p ]
+ PQputCopyData = libpq.PQputCopyData
++PQputCopyData.restype = c_int
+ PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
+ PQputCopyEnd = libpq.PQputCopyEnd
++PQputCopyEnd.restype = c_int
+ PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
+ 
+ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+diff --git a/tools/testing/selftests/efivarfs/config 
b/tools/testing/selftests/efivarfs/config
+new file mode 100644
+index 000000000000..4e151f1005b2
+--- /dev/null
++++ b/tools/testing/selftests/efivarfs/config
+@@ -0,0 +1 @@
++CONFIG_EFIVAR_FS=y

Reply via email to