commit:     1b545d7bb0fcd1e7a4fa464cab3c4ea6d7ab25a9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 16 15:42:08 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 16 15:42:08 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1b545d7b

Linux patch 4.9.109

 0000_README              |    4 +
 1108_linux-4.9.109.patch | 1339 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1343 insertions(+)

diff --git a/0000_README b/0000_README
index b2d28d0..20d5731 100644
--- a/0000_README
+++ b/0000_README
@@ -475,6 +475,10 @@ Patch:  1107_linux-4.9.108.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.108
 
+Patch:  1108_linux-4.9.109.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.109
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1108_linux-4.9.109.patch b/1108_linux-4.9.109.patch
new file mode 100644
index 0000000..4ba7bc6
--- /dev/null
+++ b/1108_linux-4.9.109.patch
@@ -0,0 +1,1339 @@
+diff --git a/Makefile b/Makefile
+index 1fa9daf219c4..1570cc85313d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 108
++SUBLEVEL = 109
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c 
b/arch/x86/crypto/crc32c-intel_glue.c
+index 60a391b8c4a2..dd1958436591 100644
+--- a/arch/x86/crypto/crc32c-intel_glue.c
++++ b/arch/x86/crypto/crc32c-intel_glue.c
+@@ -58,16 +58,11 @@
+ asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
+                               unsigned int crc_init);
+ static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
+-#if defined(X86_FEATURE_EAGER_FPU)
+ #define set_pcl_breakeven_point()                                     \
+ do {                                                                  \
+       if (!use_eager_fpu())                                           \
+               crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
+ } while (0)
+-#else
+-#define set_pcl_breakeven_point()                                     \
+-      (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
+-#endif
+ #endif /* CONFIG_X86_64 */
+ 
+ static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t 
length)
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index c278f276c9b3..aea30afeddb8 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID       ( 3*32+26) /* has extended APICID (8 
bits) */
+ #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF        ( 3*32+28) /* APERFMPERF */
+-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
++/* free, was #define X86_FEATURE_EAGER_FPU    ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
+index 2737366ea583..8852e3afa1ad 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -62,7 +62,7 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
+  */
+ static __always_inline __pure bool use_eager_fpu(void)
+ {
+-      return static_cpu_has(X86_FEATURE_EAGER_FPU);
++      return true;
+ }
+ 
+ static __always_inline __pure bool use_xsaveopt(void)
+diff --git a/arch/x86/include/asm/kvm_emulate.h 
b/arch/x86/include/asm/kvm_emulate.h
+index fc3c7e49c8e4..ae357d0afc91 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -105,11 +105,12 @@ struct x86_emulate_ops {
+        *  @addr:  [IN ] Linear address from which to read.
+        *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to read from memory.
++       *  @system:[IN ] Whether the access is forced to be at CPL0.
+        */
+       int (*read_std)(struct x86_emulate_ctxt *ctxt,
+                       unsigned long addr, void *val,
+                       unsigned int bytes,
+-                      struct x86_exception *fault);
++                      struct x86_exception *fault, bool system);
+ 
+       /*
+        * read_phys: Read bytes of standard (non-emulated/special) memory.
+@@ -127,10 +128,11 @@ struct x86_emulate_ops {
+        *  @addr:  [IN ] Linear address to which to write.
+        *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
+        *  @bytes: [IN ] Number of bytes to write to memory.
++       *  @system:[IN ] Whether the access is forced to be at CPL0.
+        */
+       int (*write_std)(struct x86_emulate_ctxt *ctxt,
+                        unsigned long addr, void *val, unsigned int bytes,
+-                       struct x86_exception *fault);
++                       struct x86_exception *fault, bool system);
+       /*
+        * fetch: Read bytes of standard (non-emulated/special) memory.
+        *        Used for instruction fetch.
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 6f0ab305dd5e..9f3657891b87 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,10 +15,7 @@
+  */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+-      if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+-              stts();
+-      else
+-              clts();
++      clts();
+ }
+ 
+ /*
+@@ -233,42 +230,6 @@ static void __init 
fpu__init_system_xstate_size_legacy(void)
+       fpu_user_xstate_size = fpu_kernel_xstate_size;
+ }
+ 
+-/*
+- * FPU context switching strategies:
+- *
+- * Against popular belief, we don't do lazy FPU saves, due to the
+- * task migration complications it brings on SMP - we only do
+- * lazy FPU restores.
+- *
+- * 'lazy' is the traditional strategy, which is based on setting
+- * CR0::TS to 1 during context-switch (instead of doing a full
+- * restore of the FPU state), which causes the first FPU instruction
+- * after the context switch (whenever it is executed) to fault - at
+- * which point we lazily restore the FPU state into FPU registers.
+- *
+- * Tasks are of course under no obligation to execute FPU instructions,
+- * so it can easily happen that another context-switch occurs without
+- * a single FPU instruction being executed. If we eventually switch
+- * back to the original task (that still owns the FPU) then we have
+- * not only saved the restores along the way, but we also have the
+- * FPU ready to be used for the original task.
+- *
+- * 'lazy' is deprecated because it's almost never a performance win
+- * and it's much more complicated than 'eager'.
+- *
+- * 'eager' switching is by default on all CPUs, there we switch the FPU
+- * state during every context switch, regardless of whether the task
+- * has used FPU instructions in that time slice or not. This is done
+- * because modern FPU context saving instructions are able to optimize
+- * state saving and restoration in hardware: they can detect both
+- * unused and untouched FPU state and optimize accordingly.
+- *
+- * [ Note that even in 'lazy' mode we might optimize context switches
+- *   to use 'eager' restores, if we detect that a task is using the FPU
+- *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+- */
+-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+-
+ /*
+  * Find supported xfeatures based on cpu features and command-line input.
+  * This must be called after fpu__init_parse_early_param() is called and
+@@ -276,40 +237,10 @@ static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+  */
+ u64 __init fpu__get_supported_xfeatures_mask(void)
+ {
+-      /* Support all xfeatures known to us */
+-      if (eagerfpu != DISABLE)
+-              return XCNTXT_MASK;
+-
+-      /* Warning of xfeatures being disabled for no eagerfpu mode */
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+-              pr_err("x86/fpu: eagerfpu switching disabled, disabling the 
following xstate features: 0x%llx.\n",
+-                      xfeatures_mask & XFEATURE_MASK_EAGER);
+-      }
+-
+-      /* Return a mask that masks out all features requiring eagerfpu mode */
+-      return ~XFEATURE_MASK_EAGER;
+-}
+-
+-/*
+- * Disable features dependent on eagerfpu.
+- */
+-static void __init fpu__clear_eager_fpu_features(void)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_MPX);
++      return XCNTXT_MASK;
+ }
+ 
+-/*
+- * Pick the FPU context switching strategy:
+- *
+- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
+- * the following is true:
+- *
+- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
+- *     FPU switching has a relatively low cost compared to a plain xsave;
+- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
+- *     switching. Should the kernel boot with noxsaveopt, we support MPX
+- *     with eager FPU switching at a higher cost.
+- */
++/* Legacy code to initialize eager fpu mode. */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+       static bool on_boot_cpu __initdata = 1;
+@@ -318,17 +249,6 @@ static void __init fpu__init_system_ctx_switch(void)
+       on_boot_cpu = 0;
+ 
+       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+-
+-      if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+-              eagerfpu = ENABLE;
+-
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER)
+-              eagerfpu = ENABLE;
+-
+-      if (eagerfpu == ENABLE)
+-              setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+-
+-      printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", 
eagerfpu == ENABLE ? "eager" : "lazy");
+ }
+ 
+ /*
+@@ -337,11 +257,6 @@ static void __init fpu__init_system_ctx_switch(void)
+  */
+ static void __init fpu__init_parse_early_param(void)
+ {
+-      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+-              eagerfpu = DISABLE;
+-              fpu__clear_eager_fpu_features();
+-      }
+-
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
+               setup_clear_cpu_cap(X86_FEATURE_FPU);
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index c8d573822e60..510cfc06701a 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -802,6 +802,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, 
int rel)
+       return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+ 
++static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
++                            void *data, unsigned size)
++{
++      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
true);
++}
++
++static int linear_write_system(struct x86_emulate_ctxt *ctxt,
++                             ulong linear, void *data,
++                             unsigned int size)
++{
++      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
true);
++}
++
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+                             struct segmented_address addr,
+                             void *data,
+@@ -813,7 +826,7 @@ static int segmented_read_std(struct x86_emulate_ctxt 
*ctxt,
+       rc = linearize(ctxt, addr, size, false, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+-      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++      return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, 
false);
+ }
+ 
+ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+@@ -827,7 +840,7 @@ static int segmented_write_std(struct x86_emulate_ctxt 
*ctxt,
+       rc = linearize(ctxt, addr, size, true, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+-      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++      return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, 
false);
+ }
+ 
+ /*
+@@ -1500,8 +1513,7 @@ static int read_interrupt_descriptor(struct 
x86_emulate_ctxt *ctxt,
+               return emulate_gp(ctxt, index << 3 | 0x2);
+ 
+       addr = dt.address + index * 8;
+-      return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+-                                 &ctxt->exception);
++      return linear_read_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+@@ -1564,8 +1576,7 @@ static int read_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
+-                                 &ctxt->exception);
++      return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
+ }
+ 
+ /* allowed just for 8 bytes segments */
+@@ -1579,8 +1590,7 @@ static int write_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+-                                  &ctxt->exception);
++      return linear_write_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+@@ -1741,8 +1751,7 @@ static int __load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
+                               return ret;
+               }
+       } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+-              ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+-                              sizeof(base3), &ctxt->exception);
++              ret = linear_read_system(ctxt, desc_addr+8, &base3, 
sizeof(base3));
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+               if (is_noncanonical_address(get_desc_base(&seg_desc) |
+@@ -2055,11 +2064,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt 
*ctxt, int irq)
+       eip_addr = dt.address + (irq << 2);
+       cs_addr = dt.address + (irq << 2) + 2;
+ 
+-      rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
++      rc = linear_read_system(ctxt, cs_addr, &cs, 2);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+-      rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
++      rc = linear_read_system(ctxt, eip_addr, &eip, 2);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+ 
+@@ -2903,12 +2912,12 @@ static bool emulator_io_port_access_allowed(struct 
x86_emulate_ctxt *ctxt,
+ #ifdef CONFIG_X86_64
+       base |= ((u64)base3) << 32;
+ #endif
+-      r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
++      r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
+       if (r != X86EMUL_CONTINUE)
+               return false;
+       if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
+               return false;
+-      r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
++      r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, 
true);
+       if (r != X86EMUL_CONTINUE)
+               return false;
+       if ((perm >> bit_idx) & mask)
+@@ -3037,35 +3046,30 @@ static int task_switch_16(struct x86_emulate_ctxt 
*ctxt,
+                         u16 tss_selector, u16 old_tss_sel,
+                         ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+       struct tss_segment_16 tss_seg;
+       int ret;
+       u32 new_tss_base = get_desc_base(new_desc);
+ 
+-      ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       save_state_to_tss16(ctxt, &tss_seg);
+ 
+-      ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                           &ctxt->exception);
++      ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+-      ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       if (old_tss_sel != 0xffff) {
+               tss_seg.prev_task_link = old_tss_sel;
+ 
+-              ret = ops->write_std(ctxt, new_tss_base,
+-                                   &tss_seg.prev_task_link,
+-                                   sizeof tss_seg.prev_task_link,
+-                                   &ctxt->exception);
++              ret = linear_write_system(ctxt, new_tss_base,
++                                        &tss_seg.prev_task_link,
++                                        sizeof tss_seg.prev_task_link);
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+       }
+@@ -3181,38 +3185,34 @@ static int task_switch_32(struct x86_emulate_ctxt 
*ctxt,
+                         u16 tss_selector, u16 old_tss_sel,
+                         ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-      const struct x86_emulate_ops *ops = ctxt->ops;
+       struct tss_segment_32 tss_seg;
+       int ret;
+       u32 new_tss_base = get_desc_base(new_desc);
+       u32 eip_offset = offsetof(struct tss_segment_32, eip);
+       u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
+ 
+-      ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       save_state_to_tss32(ctxt, &tss_seg);
+ 
+       /* Only GP registers and segment selectors are saved */
+-      ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+-                           ldt_sel_offset - eip_offset, &ctxt->exception);
++      ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
++                                ldt_sel_offset - eip_offset);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+-      ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-                          &ctxt->exception);
++      ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       if (ret != X86EMUL_CONTINUE)
+               return ret;
+ 
+       if (old_tss_sel != 0xffff) {
+               tss_seg.prev_task_link = old_tss_sel;
+ 
+-              ret = ops->write_std(ctxt, new_tss_base,
+-                                   &tss_seg.prev_task_link,
+-                                   sizeof tss_seg.prev_task_link,
+-                                   &ctxt->exception);
++              ret = linear_write_system(ctxt, new_tss_base,
++                                        &tss_seg.prev_task_link,
++                                        sizeof tss_seg.prev_task_link);
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
+       }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 4a66a620fc17..4e0292e0aafb 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6928,8 +6928,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, 
int exit_reason,
+                       vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+               return 1;
+ 
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+-                              sizeof(vmptr), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -7469,8 +7468,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+                               vmx_instruction_info, true, &gva))
+                       return 1;
+               /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+-              kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+-                           &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
++              kvm_write_guest_virt_system(vcpu, gva, &field_value,
++                                          (is_long_mode(vcpu) ? 8 : 4), NULL);
+       }
+ 
+       nested_vmx_succeed(vcpu);
+@@ -7505,8 +7504,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+               if (get_vmx_mem_address(vcpu, exit_qualification,
+                               vmx_instruction_info, false, &gva))
+                       return 1;
+-              if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+-                         &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++              if (kvm_read_guest_virt(vcpu, gva, &field_value,
++                                      (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+                       kvm_inject_page_fault(vcpu, &e);
+                       return 1;
+               }
+@@ -7603,9 +7602,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+                       vmx_instruction_info, true, &vmcs_gva))
+               return 1;
+       /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+-      if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+-                               (void *)&to_vmx(vcpu)->nested.current_vmptr,
+-                               sizeof(u64), &e)) {
++      if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
++                                      (void 
*)&to_vmx(vcpu)->nested.current_vmptr,
++                                      sizeof(u64), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -7659,8 +7658,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmx_instruction_info, false, &gva))
+               return 1;
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+-                              sizeof(operand), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+@@ -7723,8 +7721,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmx_instruction_info, false, &gva))
+               return 1;
+-      if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
+-                              sizeof(u32), &e)) {
++      if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
+               kvm_inject_page_fault(vcpu, &e);
+               return 1;
+       }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4aa265ae8cf7..5ca23af44c81 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4395,11 +4395,10 @@ static int kvm_fetch_guest_virt(struct 
x86_emulate_ctxt *ctxt,
+       return X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+                              gva_t addr, void *val, unsigned int bytes,
+                              struct x86_exception *exception)
+ {
+-      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ 
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+@@ -4407,12 +4406,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+ 
+-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-                                    gva_t addr, void *val, unsigned int bytes,
+-                                    struct x86_exception *exception)
++static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
++                           gva_t addr, void *val, unsigned int bytes,
++                           struct x86_exception *exception, bool system)
+ {
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+-      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
++      u32 access = 0;
++
++      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++              access |= PFERR_USER_MASK;
++
++      return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 
exception);
+ }
+ 
+ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+@@ -4424,18 +4428,16 @@ static int kvm_read_guest_phys_system(struct 
x86_emulate_ctxt *ctxt,
+       return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-                                     gva_t addr, void *val,
+-                                     unsigned int bytes,
+-                                     struct x86_exception *exception)
++static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int 
bytes,
++                                    struct kvm_vcpu *vcpu, u32 access,
++                                    struct x86_exception *exception)
+ {
+-      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       void *data = val;
+       int r = X86EMUL_CONTINUE;
+ 
+       while (bytes) {
+               gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+-                                                           PFERR_WRITE_MASK,
++                                                           access,
+                                                            exception);
+               unsigned offset = addr & (PAGE_SIZE-1);
+               unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+@@ -4456,6 +4458,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
+ out:
+       return r;
+ }
++
++static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void 
*val,
++                            unsigned int bytes, struct x86_exception 
*exception,
++                            bool system)
++{
++      struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++      u32 access = PFERR_WRITE_MASK;
++
++      if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++              access |= PFERR_USER_MASK;
++
++      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++                                         access, exception);
++}
++
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
++                              unsigned int bytes, struct x86_exception 
*exception)
++{
++      return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++                                         PFERR_WRITE_MASK, exception);
++}
+ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+ 
+ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+@@ -5180,8 +5203,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt 
*ctxt, unsigned emul_fla
+ static const struct x86_emulate_ops emulate_ops = {
+       .read_gpr            = emulator_read_gpr,
+       .write_gpr           = emulator_write_gpr,
+-      .read_std            = kvm_read_guest_virt_system,
+-      .write_std           = kvm_write_guest_virt_system,
++      .read_std            = emulator_read_std,
++      .write_std           = emulator_write_std,
+       .read_phys           = kvm_read_guest_phys_system,
+       .fetch               = kvm_fetch_guest_virt,
+       .read_emulated       = emulator_read_emulated,
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index e8ff3e4ce38a..2133a18f2d36 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -161,11 +161,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, 
int irq, int inc_eip);
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ u64 get_kvmclock_ns(struct kvm *kvm);
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+       gva_t addr, void *val, unsigned int bytes,
+       struct x86_exception *exception);
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
+       gva_t addr, void *val, unsigned int bytes,
+       struct x86_exception *exception);
+ 
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index d8305ddf87d0..ff6ac4e824b5 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -1081,7 +1081,7 @@ static void omap_sham_finish_req(struct ahash_request 
*req, int err)
+ 
+       if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+               free_pages((unsigned long)sg_virt(ctx->sg),
+-                         get_order(ctx->sg->length));
++                         get_order(ctx->sg->length + ctx->bufcnt));
+ 
+       if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+               kfree(ctx->sg);
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index 022c7ab7351a..b0cd5aff3822 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ 
+       crypto_cipher_set_flags(fallback,
+                               crypto_cipher_get_flags((struct
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 94ad5c0adbcb..46131701c378 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ 
+       crypto_blkcipher_set_flags(
+               fallback,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index 7cf6d31c1123..6ef7548c5c87 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ 
+       crypto_blkcipher_set_flags(
+               fallback,
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index 27a94a119009..1c4b5b889fba 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+-      printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-             crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+ 
+       crypto_shash_set_flags(fallback,
+                              crypto_shash_get_flags((struct crypto_shash
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 56b24198741c..dd0076497463 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3204,6 +3204,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct 
device *dev,
+       struct gpio_desc *desc = NULL;
+       int status;
+       enum gpio_lookup_flags lookupflags = 0;
++      /* Maybe we have a device name, maybe not */
++      const char *devname = dev ? dev_name(dev) : "?";
+ 
+       dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
+ 
+@@ -3232,8 +3234,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct 
device *dev,
+               return desc;
+       }
+ 
+-      /* If a connection label was passed use that, else use the device name 
as label */
+-      status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
++      /*
++       * If a connection label was passed use that, else attempt to use
++       * the device name as label
++       */
++      status = gpiod_request(desc, con_id ? con_id : devname);
+       if (status < 0)
+               return ERR_PTR(status);
+ 
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 3851d5715772..aeb8250ab079 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1249,6 +1249,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN060B", 0 },
+       { "ELAN060C", 0 },
+       { "ELAN0611", 0 },
++      { "ELAN0612", 0 },
+       { "ELAN1000", 0 },
+       { }
+ };
+diff --git a/drivers/input/touchscreen/goodix.c 
b/drivers/input/touchscreen/goodix.c
+index 5907fddcc966..c599b5a2373b 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -858,6 +858,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id goodix_acpi_match[] = {
+       { "GDIX1001", 0 },
++      { "GDIX1002", 0 },
+       { }
+ };
+ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 1e688bfec567..fe90b7e04427 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
+               }
+       }
+ 
+-      if (b->batch_page) {
+-              vunmap(b->batch_page);
+-              b->batch_page = NULL;
+-      }
+-
+-      if (b->page) {
+-              __free_page(b->page);
+-              b->page = NULL;
+-      }
++      /* Clearing the batch_page unconditionally has no adverse effect */
++      free_page((unsigned long)b->batch_page);
++      b->batch_page = NULL;
+ }
+ 
+ /*
+@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops 
= {
+ 
+ static bool vmballoon_init_batching(struct vmballoon *b)
+ {
+-      b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
+-      if (!b->page)
+-              return false;
++      struct page *page;
+ 
+-      b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
+-      if (!b->batch_page) {
+-              __free_page(b->page);
++      page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++      if (!page)
+               return false;
+-      }
+ 
++      b->batch_page = page_address(page);
+       return true;
+ }
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1a139d0f2232..f5fcc0850dac 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -384,20 +384,15 @@ static int bond_update_speed_duplex(struct slave *slave)
+       slave->duplex = DUPLEX_UNKNOWN;
+ 
+       res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
+-      if (res < 0) {
+-              slave->link = BOND_LINK_DOWN;
++      if (res < 0)
+               return 1;
+-      }
+-      if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) {
+-              slave->link = BOND_LINK_DOWN;
++      if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
+               return 1;
+-      }
+       switch (ecmd.base.duplex) {
+       case DUPLEX_FULL:
+       case DUPLEX_HALF:
+               break;
+       default:
+-              slave->link = BOND_LINK_DOWN;
+               return 1;
+       }
+ 
+@@ -1536,7 +1531,9 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev)
+       new_slave->delay = 0;
+       new_slave->link_failure_count = 0;
+ 
+-      bond_update_speed_duplex(new_slave);
++      if (bond_update_speed_duplex(new_slave) &&
++          bond_needs_speed_duplex(bond))
++              new_slave->link = BOND_LINK_DOWN;
+ 
+       new_slave->last_rx = jiffies -
+               (msecs_to_jiffies(bond->params.arp_interval) + 1);
+@@ -2140,7 +2137,14 @@ static void bond_miimon_commit(struct bonding *bond)
+                       continue;
+ 
+               case BOND_LINK_UP:
+-                      bond_update_speed_duplex(slave);
++                      if (bond_update_speed_duplex(slave) &&
++                          bond_needs_speed_duplex(bond)) {
++                              slave->link = BOND_LINK_DOWN;
++                              netdev_warn(bond->dev,
++                                          "failed to get link speed/duplex 
for %s\n",
++                                          slave->dev->name);
++                              continue;
++                      }
+                       bond_set_slave_link_state(slave, BOND_LINK_UP,
+                                                 BOND_SLAVE_NOTIFY_NOW);
+                       slave->last_link_up = jiffies;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 642ee00e9143..a55d112583bd 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1126,11 +1126,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, 
int qid)
+       if (result < 0)
+               goto release_cq;
+ 
++      nvme_init_queue(nvmeq, qid);
+       result = queue_request_irq(nvmeq);
+       if (result < 0)
+               goto release_sq;
+ 
+-      nvme_init_queue(nvmeq, qid);
+       return result;
+ 
+  release_sq:
+@@ -1248,6 +1248,7 @@ static int nvme_configure_admin_queue(struct nvme_dev 
*dev)
+               return result;
+ 
+       nvmeq->cq_vector = 0;
++      nvme_init_queue(nvmeq, 0);
+       result = queue_request_irq(nvmeq);
+       if (result) {
+               nvmeq->cq_vector = -1;
+@@ -1776,7 +1777,6 @@ static void nvme_reset_work(struct work_struct *work)
+       if (result)
+               goto out;
+ 
+-      nvme_init_queue(dev->queues[0], 0);
+       result = nvme_alloc_admin_tags(dev);
+       if (result)
+               goto out;
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index f791d46fe50f..2caed285fd7b 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -166,11 +166,21 @@ static void nvmet_execute_get_log_page(struct nvmet_req 
*req)
+       nvmet_req_complete(req, status);
+ }
+ 
++static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
++{
++      int len = min(src_len, dst_len);
++
++      memcpy(dst, src, len);
++      if (dst_len > len)
++              memset(dst + len, ' ', dst_len - len);
++}
++
+ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
+ {
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvme_id_ctrl *id;
+       u16 status = 0;
++      const char model[] = "Linux";
+ 
+       id = kzalloc(sizeof(*id), GFP_KERNEL);
+       if (!id) {
+@@ -182,14 +192,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req 
*req)
+       id->vid = 0;
+       id->ssvid = 0;
+ 
+-      memset(id->sn, ' ', sizeof(id->sn));
+-      snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
+-
+-      memset(id->mn, ' ', sizeof(id->mn));
+-      strncpy((char *)id->mn, "Linux", sizeof(id->mn));
+-
+-      memset(id->fr, ' ', sizeof(id->fr));
+-      strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
++      bin2hex(id->sn, &ctrl->subsys->serial,
++              min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
++      copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
++      copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
+ 
+       id->rab = 6;
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 3a044922b048..64b40a12abcf 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -743,9 +743,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char 
*hostnqn,
+       memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
+       memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+ 
+-      /* generate a random serial number as our controllers are ephemeral: */
+-      get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
+-
+       kref_init(&ctrl->ref);
+       ctrl->subsys = subsys;
+ 
+@@ -904,6 +901,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char 
*subsysnqn,
+               return NULL;
+ 
+       subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
++      /* generate a random serial number as our controllers are ephemeral: */
++      get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+ 
+       switch (type) {
+       case NVME_NQN_NVME:
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 26b87dc843d2..0bc530cdf2b4 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -110,7 +110,6 @@ struct nvmet_ctrl {
+ 
+       struct mutex            lock;
+       u64                     cap;
+-      u64                     serial;
+       u32                     cc;
+       u32                     csts;
+ 
+@@ -151,6 +150,7 @@ struct nvmet_subsys {
+       u16                     max_qid;
+ 
+       u64                     ver;
++      u64                     serial;
+       char                    *subsysnqn;
+ 
+       struct config_group     group;
+diff --git a/drivers/staging/android/ion/ion.c 
b/drivers/staging/android/ion/ion.c
+index 209a8f7ef02b..6f9974cb0e15 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -192,8 +192,11 @@ static struct ion_buffer *ion_buffer_create(struct 
ion_heap *heap,
+ 
+ void ion_buffer_destroy(struct ion_buffer *buffer)
+ {
+-      if (WARN_ON(buffer->kmap_cnt > 0))
++      if (buffer->kmap_cnt > 0) {
++              pr_warn_once("%s: buffer still mapped in the kernel\n",
++                           __func__);
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
++      }
+       buffer->heap->ops->free(buffer);
+       vfree(buffer->pages);
+       kfree(buffer);
+diff --git a/drivers/tty/serial/8250/8250_omap.c 
b/drivers/tty/serial/8250/8250_omap.c
+index e8b34f16ba2c..a3adf21f9dce 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1078,13 +1078,14 @@ static int omap8250_no_handle_irq(struct uart_port 
*port)
+       return 0;
+ }
+ 
++static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
+ static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+ static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
+ 
+ static const struct of_device_id omap8250_dt_ids[] = {
+       { .compatible = "ti,omap2-uart" },
+       { .compatible = "ti,omap3-uart" },
+-      { .compatible = "ti,omap4-uart" },
++      { .compatible = "ti,omap4-uart", .data = &omap4_habit, },
+       { .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+       { .compatible = "ti,am4372-uart", .data = &am3352_habit, },
+       { .compatible = "ti,dra742-uart", .data = &dra742_habit, },
+@@ -1326,6 +1327,19 @@ static int omap8250_soft_reset(struct device *dev)
+       int sysc;
+       int syss;
+ 
++      /*
++       * At least on omap4, unused uarts may not idle after reset without
++       * a basic scr dma configuration even with no dma in use. The
++       * module clkctrl status bits will be 1 instead of 3 blocking idle
++       * for the whole clockdomain. The softreset below will clear scr,
++       * and we restore it on resume so this is safe to do on all SoCs
++       * needing omap8250_soft_reset() quirk. Do it in two writes as
++       * recommended in the comment for omap8250_update_scr().
++       */
++      serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
++      serial_out(up, UART_OMAP_SCR,
++                 OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
++
+       sysc = serial_in(up, UART_OMAP_SYSC);
+ 
+       /* softreset the UART */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 6b1863293fe1..41b0dd67fcce 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1726,10 +1726,26 @@ static int pl011_allocate_irq(struct uart_amba_port 
*uap)
+  */
+ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ {
++      unsigned int i;
++
+       spin_lock_irq(&uap->port.lock);
+ 
+       /* Clear out any spuriously appearing RX interrupts */
+       pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
++
++      /*
++       * RXIS is asserted only when the RX FIFO transitions from below
++       * to above the trigger threshold.  If the RX FIFO is already
++       * full to the threshold this can't happen and RXIS will now be
++       * stuck off.  Drain the RX FIFO explicitly to fix this:
++       */
++      for (i = 0; i < uap->fifosize * 2; ++i) {
++              if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
++                      break;
++
++              pl011_read(uap, REG_DR);
++      }
++
+       uap->im = UART011_RTIM;
+       if (!pl011_dma_rx_running(uap))
+               uap->im |= UART011_RXIM;
+diff --git a/drivers/tty/serial/atmel_serial.c 
b/drivers/tty/serial/atmel_serial.c
+index addb287cacea..5a341b1c65c3 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1803,7 +1803,6 @@ static int atmel_startup(struct uart_port *port)
+ {
+       struct platform_device *pdev = to_platform_device(port->dev);
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+-      struct tty_struct *tty = port->state->port.tty;
+       int retval;
+ 
+       /*
+@@ -1818,8 +1817,8 @@ static int atmel_startup(struct uart_port *port)
+        * Allocate the IRQ
+        */
+       retval = request_irq(port->irq, atmel_interrupt,
+-                      IRQF_SHARED | IRQF_COND_SUSPEND,
+-                      tty ? tty->name : "atmel_serial", port);
++                           IRQF_SHARED | IRQF_COND_SUSPEND,
++                           dev_name(&pdev->dev), port);
+       if (retval) {
+               dev_err(port->dev, "atmel_startup - Can't get irq\n");
+               return retval;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index f2ab6d8aab41..5609305b3676 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -866,15 +866,12 @@ static int s3c24xx_serial_request_dma(struct 
s3c24xx_uart_port *p)
+       dma->rx_conf.direction          = DMA_DEV_TO_MEM;
+       dma->rx_conf.src_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       dma->rx_conf.src_addr           = p->port.mapbase + S3C2410_URXH;
+-      dma->rx_conf.src_maxburst       = 16;
++      dma->rx_conf.src_maxburst       = 1;
+ 
+       dma->tx_conf.direction          = DMA_MEM_TO_DEV;
+       dma->tx_conf.dst_addr_width     = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       dma->tx_conf.dst_addr           = p->port.mapbase + S3C2410_UTXH;
+-      if (dma_get_cache_alignment() >= 16)
+-              dma->tx_conf.dst_maxburst = 16;
+-      else
+-              dma->tx_conf.dst_maxburst = 1;
++      dma->tx_conf.dst_maxburst       = 1;
+ 
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 107f0d194ac5..da46f0fba5da 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -2626,8 +2626,8 @@ static int sci_init_clocks(struct sci_port *sci_port, 
struct device *dev)
+                       dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
+                               PTR_ERR(clk));
+               else
+-                      dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
+-                              clk, clk);
++                      dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
++                              clk, clk_get_rate(clk));
+               sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
+       }
+       return 0;
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c 
b/drivers/usb/gadget/udc/renesas_usb3.c
+index 2197a50ed2ab..b1ae944c83a9 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -521,6 +521,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
+       usb3_usb2_pullup(usb3, 0);
+       usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+       usb3_reset_epc(usb3);
++      usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
++                         USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
++                         USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
++                         USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
++                         USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
++      usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
++      usb3_init_epc_registers(usb3);
+ 
+       if (usb3->driver)
+               usb3->driver->disconnect(&usb3->gadget);
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index a96dcc660d0f..8dd200f92020 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
+       if (devinfo->flags & US_FL_BROKEN_FUA)
+               sdev->broken_fua = 1;
+ 
++      /* UAS also needs to support FL_ALWAYS_SYNC */
++      if (devinfo->flags & US_FL_ALWAYS_SYNC) {
++              sdev->skip_ms_page_3f = 1;
++              sdev->skip_ms_page_8 = 1;
++              sdev->wce_default_on = 1;
++      }
+       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+       return 0;
+ }
+diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
+index ca3a5d430ae1..fc5ed351defb 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2340,6 +2340,15 @@ UNUSUAL_DEV(  0x4146, 0xba01, 0x0100, 0x0100,
+               "Micro Mini 1GB",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+ 
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <[email protected]>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++              "SimpleTech",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_ALWAYS_SYNC),
++
+ /*
+  * Nick Bowler <[email protected]>
+  * SCSI stack spams (otherwise harmless) error messages.
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 719ec68ae309..f15aa47c54a9 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -183,3 +183,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
++
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <[email protected]>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++              "SimpleTech",
++              "External HDD",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_ALWAYS_SYNC),
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index c287ccc78fde..e8a008de8dbc 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -24,6 +24,9 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "usbip_common.h"
+ #include "vhci.h"
+ 
+@@ -181,16 +184,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci, 
__u32 rhport)
+       return 0;
+ }
+ 
+-static int valid_port(__u32 pdev_nr, __u32 rhport)
++static int valid_port(__u32 *pdev_nr, __u32 *rhport)
+ {
+-      if (pdev_nr >= vhci_num_controllers) {
+-              pr_err("pdev %u\n", pdev_nr);
++      if (*pdev_nr >= vhci_num_controllers) {
++              pr_err("pdev %u\n", *pdev_nr);
+               return 0;
+       }
+-      if (rhport >= VHCI_HC_PORTS) {
+-              pr_err("rhport %u\n", rhport);
++      *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
++
++      if (*rhport >= VHCI_HC_PORTS) {
++              pr_err("rhport %u\n", *rhport);
+               return 0;
+       }
++      *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
++
+       return 1;
+ }
+ 
+@@ -207,7 +214,7 @@ static ssize_t store_detach(struct device *dev, struct 
device_attribute *attr,
+       pdev_nr = port_to_pdev_nr(port);
+       rhport = port_to_rhport(port);
+ 
+-      if (!valid_port(pdev_nr, rhport))
++      if (!valid_port(&pdev_nr, &rhport))
+               return -EINVAL;
+ 
+       hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+@@ -226,7 +233,8 @@ static ssize_t store_detach(struct device *dev, struct 
device_attribute *attr,
+ }
+ static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
+ 
+-static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed 
speed)
++static int valid_args(__u32 *pdev_nr, __u32 *rhport,
++                    enum usb_device_speed speed)
+ {
+       if (!valid_port(pdev_nr, rhport)) {
+               return 0;
+@@ -288,7 +296,7 @@ static ssize_t store_attach(struct device *dev, struct 
device_attribute *attr,
+                            sockfd, devid, speed);
+ 
+       /* check received parameters */
+-      if (!valid_args(pdev_nr, rhport, speed))
++      if (!valid_args(&pdev_nr, &rhport, speed))
+               return -EINVAL;
+ 
+       hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 7734cc9c7d29..714428c54c68 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
+              BOND_MODE(bond) == BOND_MODE_ALB;
+ }
+ 
++static inline bool bond_needs_speed_duplex(const struct bonding *bond)
++{
++      return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
++}
++
+ static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+ {
+       return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 15150b412930..3ba903ff2bb0 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
+       return 0;
+ }
+ 
++static inline int sadb_key_len(const struct sadb_key *key)
++{
++      int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
++
++      return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
++                          sizeof(uint64_t));
++}
++
++static int verify_key_len(const void *p)
++{
++      const struct sadb_key *key = p;
++
++      if (sadb_key_len(key) > key->sadb_key_len)
++              return -EINVAL;
++
++      return 0;
++}
++
+ static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
+ {
+       return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
+@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const 
struct sadb_msg *hdr, void *
+                               return -EINVAL;
+                       if (ext_hdrs[ext_type-1] != NULL)
+                               return -EINVAL;
+-                      if (ext_type == SADB_EXT_ADDRESS_SRC ||
+-                          ext_type == SADB_EXT_ADDRESS_DST ||
+-                          ext_type == SADB_EXT_ADDRESS_PROXY ||
+-                          ext_type == SADB_X_EXT_NAT_T_OA) {
++                      switch (ext_type) {
++                      case SADB_EXT_ADDRESS_SRC:
++                      case SADB_EXT_ADDRESS_DST:
++                      case SADB_EXT_ADDRESS_PROXY:
++                      case SADB_X_EXT_NAT_T_OA:
+                               if (verify_address_len(p))
+                                       return -EINVAL;
+-                      }
+-                      if (ext_type == SADB_X_EXT_SEC_CTX) {
++                              break;
++                      case SADB_X_EXT_SEC_CTX:
+                               if (verify_sec_ctx_len(p))
+                                       return -EINVAL;
++                              break;
++                      case SADB_EXT_KEY_AUTH:
++                      case SADB_EXT_KEY_ENCRYPT:
++                              if (verify_key_len(p))
++                                      return -EINVAL;
++                              break;
++                      default:
++                              break;
+                       }
+                       ext_hdrs[ext_type-1] = (void *) p;
+               }
+@@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct 
net *net,
+       key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
+       if (key != NULL &&
+           sa->sadb_sa_auth != SADB_X_AALG_NULL &&
+-          ((key->sadb_key_bits+7) / 8 == 0 ||
+-           (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++          key->sadb_key_bits == 0)
+               return ERR_PTR(-EINVAL);
+       key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
+       if (key != NULL &&
+           sa->sadb_sa_encrypt != SADB_EALG_NULL &&
+-          ((key->sadb_key_bits+7) / 8 == 0 ||
+-           (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++          key->sadb_key_bits == 0)
+               return ERR_PTR(-EINVAL);
+ 
+       x = xfrm_state_alloc(net);
+diff --git a/tools/arch/x86/include/asm/cpufeatures.h 
b/tools/arch/x86/include/asm/cpufeatures.h
+index c278f276c9b3..aea30afeddb8 100644
+--- a/tools/arch/x86/include/asm/cpufeatures.h
++++ b/tools/arch/x86/include/asm/cpufeatures.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID       ( 3*32+26) /* has extended APICID (8 
bits) */
+ #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF        ( 3*32+28) /* APERFMPERF */
+-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
++/* free, was #define X86_FEATURE_EAGER_FPU    ( 3*32+29) * "eagerfpu" Non 
lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state 
*/
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */

Reply via email to