commit:     019eea27b90a11b416aa484582661386065fe93a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 29 14:13:38 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug 29 14:13:38 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=019eea27

Linux patch 4.14.141

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1140_linux-4.14.141.patch | 2471 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2475 insertions(+)

diff --git a/0000_README b/0000_README
index 46d7bd2..9d8b846 100644
--- a/0000_README
+++ b/0000_README
@@ -603,6 +603,10 @@ Patch:  1139_linux-4.14.140.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.140
 
+Patch:  1140_linux-4.14.141.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.141
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1140_linux-4.14.141.patch b/1140_linux-4.14.141.patch
new file mode 100644
index 0000000..72c8cee
--- /dev/null
+++ b/1140_linux-4.14.141.patch
@@ -0,0 +1,2471 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
+index 13d80111bc1f..188a7db8501b 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3788,6 +3788,13 @@
+                       Run specified binary instead of /init from the ramdisk,
+                       used for early userspace startup. See initrd.
+ 
++      rdrand=         [X86]
++                      force - Override the decision by the kernel to hide the
++                              advertisement of RDRAND support (this affects
++                              certain AMD processors because of buggy BIOS
++                              support, specifically around the suspend/resume
++                              path).
++
+       rdt=            [HW,X86,RDT]
+                       Turn on/off individual RDT features. List is:
+                       cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, mba.
+diff --git a/Makefile b/Makefile
+index be7290af771e..eefd21f3d1ec 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 140
++SUBLEVEL = 141
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
+index 97d5239ca47b..428ef2189203 100644
+--- a/arch/mips/kernel/cacheinfo.c
++++ b/arch/mips/kernel/cacheinfo.c
+@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
+       if (c->tcache.waysize)
+               populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+ 
++      this_cpu_ci->cpu_map_populated = true;
++
+       return 0;
+ }
+ 
+diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
+index 5f209f111e59..df7ddd246eaa 100644
+--- a/arch/mips/kernel/i8253.c
++++ b/arch/mips/kernel/i8253.c
+@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
+ 
+ static int __init init_pit_clocksource(void)
+ {
+-      if (num_possible_cpus() > 1) /* PIT does not scale! */
++      if (num_possible_cpus() > 1 || /* PIT does not scale! */
++          !clockevent_state_periodic(&i8253_clockevent))
+               return 0;
+ 
+       return clocksource_i8253_init();
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 3280953a82cf..09af857ca099 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -134,7 +134,7 @@ _GLOBAL_TOC(flush_dcache_range)
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)    /* Get log-2 of dcache block 
size */
+-      srw.    r8,r8,r9                /* compute line count */
++      srd.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       mtctr   r8
+ 0:    dcbst   0,r6
+@@ -190,7 +190,7 @@ _GLOBAL(flush_inval_dcache_range)
+       subf    r8,r6,r4                /* compute length */
+       add     r8,r8,r5                /* ensure we get enough */
+       lwz     r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
+-      srw.    r8,r8,r9                /* compute line count */
++      srd.    r8,r8,r9                /* compute line count */
+       beqlr                           /* nothing to do? */
+       sync
+       isync
+diff --git a/arch/x86/include/asm/bootparam_utils.h 
b/arch/x86/include/asm/bootparam_utils.h
+index a07ffd23e4dd..d3983fdf1012 100644
+--- a/arch/x86/include/asm/bootparam_utils.h
++++ b/arch/x86/include/asm/bootparam_utils.h
+@@ -18,6 +18,20 @@
+  * Note: efi_info is commonly left uninitialized, but that field has a
+  * private magic, so it is better to leave it unchanged.
+  */
++
++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
++
++#define BOOT_PARAM_PRESERVE(struct_member)                            \
++      {                                                               \
++              .start = offsetof(struct boot_params, struct_member),   \
++              .len   = sizeof_mbr(struct boot_params, struct_member), \
++      }
++
++struct boot_params_to_save {
++      unsigned int start;
++      unsigned int len;
++};
++
+ static void sanitize_boot_params(struct boot_params *boot_params)
+ {
+       /* 
+@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params 
*boot_params)
+        */
+       if (boot_params->sentinel) {
+               /* fields in boot_params are left uninitialized, clear them */
+-              memset(&boot_params->ext_ramdisk_image, 0,
+-                     (char *)&boot_params->efi_info -
+-                      (char *)&boot_params->ext_ramdisk_image);
+-              memset(&boot_params->kbd_status, 0,
+-                     (char *)&boot_params->hdr -
+-                     (char *)&boot_params->kbd_status);
+-              memset(&boot_params->_pad7[0], 0,
+-                     (char *)&boot_params->edd_mbr_sig_buffer[0] -
+-                      (char *)&boot_params->_pad7[0]);
+-              memset(&boot_params->_pad8[0], 0,
+-                     (char *)&boot_params->eddbuf[0] -
+-                      (char *)&boot_params->_pad8[0]);
+-              memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
++              static struct boot_params scratch;
++              char *bp_base = (char *)boot_params;
++              char *save_base = (char *)&scratch;
++              int i;
++
++              const struct boot_params_to_save to_save[] = {
++                      BOOT_PARAM_PRESERVE(screen_info),
++                      BOOT_PARAM_PRESERVE(apm_bios_info),
++                      BOOT_PARAM_PRESERVE(tboot_addr),
++                      BOOT_PARAM_PRESERVE(ist_info),
++                      BOOT_PARAM_PRESERVE(hd0_info),
++                      BOOT_PARAM_PRESERVE(hd1_info),
++                      BOOT_PARAM_PRESERVE(sys_desc_table),
++                      BOOT_PARAM_PRESERVE(olpc_ofw_header),
++                      BOOT_PARAM_PRESERVE(efi_info),
++                      BOOT_PARAM_PRESERVE(alt_mem_k),
++                      BOOT_PARAM_PRESERVE(scratch),
++                      BOOT_PARAM_PRESERVE(e820_entries),
++                      BOOT_PARAM_PRESERVE(eddbuf_entries),
++                      BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
++                      BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
++                      BOOT_PARAM_PRESERVE(hdr),
++                      BOOT_PARAM_PRESERVE(e820_table),
++                      BOOT_PARAM_PRESERVE(eddbuf),
++              };
++
++              memset(&scratch, 0, sizeof(scratch));
++
++              for (i = 0; i < ARRAY_SIZE(to_save); i++) {
++                      memcpy(save_base + to_save[i].start,
++                             bp_base + to_save[i].start, to_save[i].len);
++              }
++
++              memcpy(boot_params, save_base, sizeof(*boot_params));
+       }
+ }
+ 
+diff --git a/arch/x86/include/asm/msr-index.h 
b/arch/x86/include/asm/msr-index.h
+index 7f1c8448d595..fda3bf75de6c 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -334,6 +334,7 @@
+ #define MSR_AMD64_PATCH_LEVEL         0x0000008b
+ #define MSR_AMD64_TSC_RATIO           0xc0000104
+ #define MSR_AMD64_NB_CFG              0xc001001f
++#define MSR_AMD64_CPUID_FN_1          0xc0011004
+ #define MSR_AMD64_PATCH_LOADER                0xc0010020
+ #define MSR_AMD64_OSVW_ID_LENGTH      0xc0010140
+ #define MSR_AMD64_OSVW_STATUS         0xc0010141
+diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
+index f1ddf3a1f307..f6b496d11097 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -202,7 +202,7 @@
+       "       lfence;\n"                                      \
+       "       jmp    902b;\n"                                 \
+       "       .align 16\n"                                    \
+-      "903:   addl   $4, %%esp;\n"                            \
++      "903:   lea    4(%%esp), %%esp;\n"                      \
+       "       pushl  %[thunk_target];\n"                      \
+       "       ret;\n"                                         \
+       "       .align 16\n"                                    \
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index ae410f7585f1..f8f9cfded97d 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -723,7 +723,7 @@ static __initdata unsigned long lapic_cal_pm1, 
lapic_cal_pm2;
+ static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
+ 
+ /*
+- * Temporary interrupt handler.
++ * Temporary interrupt handler and polled calibration function.
+  */
+ static void __init lapic_cal_handler(struct clock_event_device *dev)
+ {
+@@ -807,7 +807,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long 
*deltatsc)
+ static int __init calibrate_APIC_clock(void)
+ {
+       struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
+-      void (*real_handler)(struct clock_event_device *dev);
++      u64 tsc_perj = 0, tsc_start = 0;
++      unsigned long jif_start;
+       unsigned long deltaj;
+       long delta, deltatsc;
+       int pm_referenced = 0;
+@@ -838,28 +839,64 @@ static int __init calibrate_APIC_clock(void)
+       apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
+                   "calibrating APIC timer ...\n");
+ 
++      /*
++       * There are platforms w/o global clockevent devices. Instead of
++       * making the calibration conditional on that, use a polling based
++       * approach everywhere.
++       */
+       local_irq_disable();
+ 
+-      /* Replace the global interrupt handler */
+-      real_handler = global_clock_event->event_handler;
+-      global_clock_event->event_handler = lapic_cal_handler;
+-
+       /*
+        * Setup the APIC counter to maximum. There is no way the lapic
+        * can underflow in the 100ms detection time frame
+        */
+       __setup_APIC_LVTT(0xffffffff, 0, 0);
+ 
+-      /* Let the interrupts run */
++      /*
++       * Methods to terminate the calibration loop:
++       *  1) Global clockevent if available (jiffies)
++       *  2) TSC if available and frequency is known
++       */
++      jif_start = READ_ONCE(jiffies);
++
++      if (tsc_khz) {
++              tsc_start = rdtsc();
++              tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
++      }
++
++      /*
++       * Enable interrupts so the tick can fire, if a global
++       * clockevent device is available
++       */
+       local_irq_enable();
+ 
+-      while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
+-              cpu_relax();
++      while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
++              /* Wait for a tick to elapse */
++              while (1) {
++                      if (tsc_khz) {
++                              u64 tsc_now = rdtsc();
++                              if ((tsc_now - tsc_start) >= tsc_perj) {
++                                      tsc_start += tsc_perj;
++                                      break;
++                              }
++                      } else {
++                              unsigned long jif_now = READ_ONCE(jiffies);
+ 
+-      local_irq_disable();
++                              if (time_after(jif_now, jif_start)) {
++                                      jif_start = jif_now;
++                                      break;
++                              }
++                      }
++                      cpu_relax();
++              }
+ 
+-      /* Restore the real event handler */
+-      global_clock_event->event_handler = real_handler;
++              /* Invoke the calibration routine */
++              local_irq_disable();
++              lapic_cal_handler(NULL);
++              local_irq_enable();
++      }
++
++      local_irq_disable();
+ 
+       /* Build delta t1-t2 as apic timer counts down */
+       delta = lapic_cal_t1 - lapic_cal_t2;
+@@ -912,10 +949,11 @@ static int __init calibrate_APIC_clock(void)
+       levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
+ 
+       /*
+-       * PM timer calibration failed or not turned on
+-       * so lets try APIC timer based calibration
++       * PM timer calibration failed or not turned on so lets try APIC
++       * timer based calibration, if a global clockevent device is
++       * available.
+        */
+-      if (!pm_referenced) {
++      if (!pm_referenced && global_clock_event) {
+               apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
+ 
+               /*
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index bbebcd7a781e..3914f9218a6b 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -772,6 +772,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
+       msr_set_bit(MSR_AMD64_DE_CFG, 31);
+ }
+ 
++static bool rdrand_force;
++
++static int __init rdrand_cmdline(char *str)
++{
++      if (!str)
++              return -EINVAL;
++
++      if (!strcmp(str, "force"))
++              rdrand_force = true;
++      else
++              return -EINVAL;
++
++      return 0;
++}
++early_param("rdrand", rdrand_cmdline);
++
++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
++{
++      /*
++       * Saving of the MSR used to hide the RDRAND support during
++       * suspend/resume is done by arch/x86/power/cpu.c, which is
++       * dependent on CONFIG_PM_SLEEP.
++       */
++      if (!IS_ENABLED(CONFIG_PM_SLEEP))
++              return;
++
++      /*
++       * The nordrand option can clear X86_FEATURE_RDRAND, so check for
++       * RDRAND support using the CPUID function directly.
++       */
++      if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
++              return;
++
++      msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
++
++      /*
++       * Verify that the CPUID change has occurred in case the kernel is
++       * running virtualized and the hypervisor doesn't support the MSR.
++       */
++      if (cpuid_ecx(1) & BIT(30)) {
++              pr_info_once("BIOS may not properly restore RDRAND after 
suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
++              return;
++      }
++
++      clear_cpu_cap(c, X86_FEATURE_RDRAND);
++      pr_info_once("BIOS may not properly restore RDRAND after suspend, 
hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
++}
++
++static void init_amd_jg(struct cpuinfo_x86 *c)
++{
++      /*
++       * Some BIOS implementations do not restore proper RDRAND support
++       * across suspend and resume. Check on whether to hide the RDRAND
++       * instruction support via CPUID.
++       */
++      clear_rdrand_cpuid_bit(c);
++}
++
+ static void init_amd_bd(struct cpuinfo_x86 *c)
+ {
+       u64 value;
+@@ -786,6 +844,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+                       wrmsrl_safe(MSR_F15H_IC_CFG, value);
+               }
+       }
++
++      /*
++       * Some BIOS implementations do not restore proper RDRAND support
++       * across suspend and resume. Check on whether to hide the RDRAND
++       * instruction support via CPUID.
++       */
++      clear_rdrand_cpuid_bit(c);
+ }
+ 
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+@@ -828,6 +893,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+       case 0x10: init_amd_gh(c); break;
+       case 0x12: init_amd_ln(c); break;
+       case 0x15: init_amd_bd(c); break;
++      case 0x16: init_amd_jg(c); break;
+       case 0x17: init_amd_zn(c); break;
+       }
+ 
+diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
+index 2dd1fe13a37b..19f707992db2 100644
+--- a/arch/x86/lib/cpu.c
++++ b/arch/x86/lib/cpu.c
+@@ -1,5 +1,6 @@
+ #include <linux/types.h>
+ #include <linux/export.h>
++#include <asm/cpu.h>
+ 
+ unsigned int x86_family(unsigned int sig)
+ {
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 513ce09e9950..3aa3149df07f 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -13,6 +13,7 @@
+ #include <linux/smp.h>
+ #include <linux/perf_event.h>
+ #include <linux/tboot.h>
++#include <linux/dmi.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+@@ -24,7 +25,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/cpu.h>
+ #include <asm/mmu_context.h>
+-#include <linux/dmi.h>
++#include <asm/cpu_device_id.h>
+ 
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
+ 
+ core_initcall(bsp_pm_check_init);
+ 
+-static int msr_init_context(const u32 *msr_id, const int total_num)
++static int msr_build_context(const u32 *msr_id, const int num)
+ {
+-      int i = 0;
++      struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
+       struct saved_msr *msr_array;
++      int total_num;
++      int i, j;
+ 
+-      if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) 
{
+-              pr_err("x86/pm: MSR quirk already applied, please check your 
DMI match table.\n");
+-              return -EINVAL;
+-      }
++      total_num = saved_msrs->num + num;
+ 
+       msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), 
GFP_KERNEL);
+       if (!msr_array) {
+@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int 
total_num)
+               return -ENOMEM;
+       }
+ 
+-      for (i = 0; i < total_num; i++) {
+-              msr_array[i].info.msr_no        = msr_id[i];
++      if (saved_msrs->array) {
++              /*
++               * Multiple callbacks can invoke this function, so copy any
++               * MSR save requests from previous invocations.
++               */
++              memcpy(msr_array, saved_msrs->array,
++                     sizeof(struct saved_msr) * saved_msrs->num);
++
++              kfree(saved_msrs->array);
++      }
++
++      for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
++              msr_array[i].info.msr_no        = msr_id[j];
+               msr_array[i].valid              = false;
+               msr_array[i].info.reg.q         = 0;
+       }
+-      saved_context.saved_msrs.num    = total_num;
+-      saved_context.saved_msrs.array  = msr_array;
++      saved_msrs->num   = total_num;
++      saved_msrs->array = msr_array;
+ 
+       return 0;
+ }
+ 
+ /*
+- * The following section is a quirk framework for problematic BIOSen:
++ * The following sections are a quirk framework for problematic BIOSen:
+  * Sometimes MSRs are modified by the BIOSen after suspended to
+  * RAM, this might cause unexpected behavior after wakeup.
+  * Thus we save/restore these specified MSRs across suspend/resume
+@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id 
*d)
+       u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
+ 
+       pr_info("x86/pm: %s detected, MSR saving is needed during 
suspending.\n", d->ident);
+-      return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
++      return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ }
+ 
+ static const struct dmi_system_id msr_save_dmi_table[] = {
+@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
+       {}
+ };
+ 
++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
++{
++      u32 cpuid_msr_id[] = {
++              MSR_AMD64_CPUID_FN_1,
++      };
++
++      pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during 
suspending.\n",
++              c->family);
++
++      return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
++}
++
++static const struct x86_cpu_id msr_save_cpu_table[] = {
++      {
++              .vendor = X86_VENDOR_AMD,
++              .family = 0x15,
++              .model = X86_MODEL_ANY,
++              .feature = X86_FEATURE_ANY,
++              .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++      },
++      {
++              .vendor = X86_VENDOR_AMD,
++              .family = 0x16,
++              .model = X86_MODEL_ANY,
++              .feature = X86_FEATURE_ANY,
++              .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++      },
++      {}
++};
++
++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
++static int pm_cpu_check(const struct x86_cpu_id *c)
++{
++      const struct x86_cpu_id *m;
++      int ret = 0;
++
++      m = x86_match_cpu(msr_save_cpu_table);
++      if (m) {
++              pm_cpu_match_t fn;
++
++              fn = (pm_cpu_match_t)m->driver_data;
++              ret = fn(m);
++      }
++
++      return ret;
++}
++
+ static int pm_check_save_msr(void)
+ {
+       dmi_check_system(msr_save_dmi_table);
++      pm_cpu_check(msr_save_cpu_table);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index bf5777bc04d3..eb0c4ee20525 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1804,6 +1804,21 @@ nothing_to_do:
+       return 1;
+ }
+ 
++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
++{
++      struct request *rq = scmd->request;
++      u32 req_blocks;
++
++      if (!blk_rq_is_passthrough(rq))
++              return true;
++
++      req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
++      if (n_blocks > req_blocks)
++              return false;
++
++      return true;
++}
++
+ /**
+  *    ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
+  *    @qc: Storage for translated ATA taskfile
+@@ -1848,6 +1863,8 @@ static unsigned int ata_scsi_rw_xlat(struct 
ata_queued_cmd *qc)
+               scsi_10_lba_len(cdb, &block, &n_block);
+               if (cdb[1] & (1 << 3))
+                       tf_flags |= ATA_TFLAG_FUA;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       case READ_6:
+       case WRITE_6:
+@@ -1862,6 +1879,8 @@ static unsigned int ata_scsi_rw_xlat(struct 
ata_queued_cmd *qc)
+                */
+               if (!n_block)
+                       n_block = 256;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       case READ_16:
+       case WRITE_16:
+@@ -1872,6 +1891,8 @@ static unsigned int ata_scsi_rw_xlat(struct 
ata_queued_cmd *qc)
+               scsi_16_lba_len(cdb, &block, &n_block);
+               if (cdb[1] & (1 << 3))
+                       tf_flags |= ATA_TFLAG_FUA;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       default:
+               DPRINTK("no-byte command\n");
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index cc2f2e35f4c2..8c36ff0c2dd4 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -704,6 +704,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+       unsigned int offset;
+       unsigned char *buf;
+ 
++      if (!qc->cursg) {
++              qc->curbytes = qc->nbytes;
++              return;
++      }
+       if (qc->curbytes == qc->nbytes - qc->sect_size)
+               ap->hsm_task_state = HSM_ST_LAST;
+ 
+@@ -729,6 +733,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ 
+       if (qc->cursg_ofs == qc->cursg->length) {
+               qc->cursg = sg_next(qc->cursg);
++              if (!qc->cursg)
++                      ap->hsm_task_state = HSM_ST_LAST;
+               qc->cursg_ofs = 0;
+       }
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 25351b6b1e34..562e90bf73c9 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -971,9 +971,11 @@ static long gpio_ioctl(struct file *filp, unsigned int 
cmd, unsigned long arg)
+               if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+                       lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+               if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+-                      lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
++                      lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
++                                         GPIOLINE_FLAG_IS_OUT);
+               if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+-                      lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
++                      lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
++                                         GPIOLINE_FLAG_IS_OUT);
+ 
+               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+                       return -EFAULT;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+index b4e7404fe660..a11637b0f6cc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct 
i2c_msg *msgs, int num)
+               u8 *ptr = msg->buf;
+ 
+               while (remaining) {
+-                      u8 cnt = (remaining > 16) ? 16 : remaining;
+-                      u8 cmd;
++                      u8 cnt, retries, cmd;
+ 
+                       if (msg->flags & I2C_M_RD)
+                               cmd = 1;
+@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct 
i2c_msg *msgs, int num)
+                       if (mcnt || remaining > 16)
+                               cmd |= 4; /* MOT */
+ 
+-                      ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, 
&cnt);
+-                      if (ret < 0) {
+-                              nvkm_i2c_aux_release(aux);
+-                              return ret;
++                      for (retries = 0, cnt = 0;
++                           retries < 32 && !cnt;
++                           retries++) {
++                              cnt = min_t(u8, remaining, 16);
++                              ret = aux->func->xfer(aux, true, cmd,
++                                                    msg->addr, ptr, &cnt);
++                              if (ret < 0)
++                                      goto out;
++                      }
++                      if (!cnt) {
++                              AUX_TRACE(aux, "no data after 32 retries");
++                              ret = -EIO;
++                              goto out;
+                       }
+ 
+                       ptr += cnt;
+@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct 
i2c_msg *msgs, int num)
+               msg++;
+       }
+ 
++      ret = num;
++out:
+       nvkm_i2c_aux_release(aux);
+-      return num;
++      return ret;
+ }
+ 
+ static u32
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+index 97000996b8dc..50cc060cc552 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+@@ -300,8 +300,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void 
**msg,
+               break;
+       }
+ 
+-      if (retries == RETRIES)
++      if (retries == RETRIES) {
++              kfree(reply);
+               return -EINVAL;
++      }
+ 
+       *msg_len = reply_len;
+       *msg     = reply;
+diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
+index 9428ea7cdf8a..c52bd163abb3 100644
+--- a/drivers/hid/hid-a4tech.c
++++ b/drivers/hid/hid-a4tech.c
+@@ -26,12 +26,36 @@
+ #define A4_2WHEEL_MOUSE_HACK_7        0x01
+ #define A4_2WHEEL_MOUSE_HACK_B8       0x02
+ 
++#define A4_WHEEL_ORIENTATION  (HID_UP_GENDESK | 0x000000b8)
++
+ struct a4tech_sc {
+       unsigned long quirks;
+       unsigned int hw_wheel;
+       __s32 delayed_value;
+ };
+ 
++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++                          struct hid_field *field, struct hid_usage *usage,
++                          unsigned long **bit, int *max)
++{
++      struct a4tech_sc *a4 = hid_get_drvdata(hdev);
++
++      if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
++          usage->hid == A4_WHEEL_ORIENTATION) {
++              /*
++               * We do not want to have this usage mapped to anything as it's
++               * nonstandard and doesn't really behave like an HID report.
++               * It's only selecting the orientation (vertical/horizontal) of
++               * the previous mouse wheel report. The input_events will be
++               * generated once both reports are recorded in a4_event().
++               */
++              return -1;
++      }
++
++      return 0;
++
++}
++
+ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+               struct hid_field *field, struct hid_usage *usage,
+               unsigned long **bit, int *max)
+@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct 
hid_field *field,
+       struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+       struct input_dev *input;
+ 
+-      if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
+-                      !usage->type)
++      if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+               return 0;
+ 
+       input = field->hidinput->input;
+@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct 
hid_field *field,
+                       return 1;
+               }
+ 
+-              if (usage->hid == 0x000100b8) {
++              if (usage->hid == A4_WHEEL_ORIENTATION) {
+                       input_event(input, EV_REL, value ? REL_HWHEEL :
+                                       REL_WHEEL, a4->delayed_value);
+                       return 1;
+@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
+ static struct hid_driver a4_driver = {
+       .name = "a4tech",
+       .id_table = a4_devices,
++      .input_mapping = a4_input_mapping,
+       .input_mapped = a4_input_mapped,
+       .event = a4_event,
+       .probe = a4_probe,
+diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
+index b83376077d72..cfa0cb22c9b3 100644
+--- a/drivers/hid/hid-tmff.c
++++ b/drivers/hid/hid-tmff.c
+@@ -34,6 +34,8 @@
+ 
+ #include "hid-ids.h"
+ 
++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT      0xb320
++
+ static const signed short ff_rumble[] = {
+       FF_RUMBLE,
+       -1
+@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
+       struct hid_field *ff_field = tmff->ff_field;
+       int x, y;
+       int left, right;        /* Rumbling */
++      int motor_swap;
+ 
+       switch (effect->type) {
+       case FF_CONSTANT:
+@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
+                                       ff_field->logical_minimum,
+                                       ff_field->logical_maximum);
+ 
++              /* 2-in-1 strong motor is left */
++              if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
++                      motor_swap = left;
++                      left = right;
++                      right = motor_swap;
++              }
++
+               dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
+               ff_field->value[0] = left;
+               ff_field->value[1] = right;
+@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
+               .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304),   /* FireStorm 
Dual Power 2 (and 3) */
+               .driver_data = (unsigned long)ff_rumble },
++      { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 
THRUSTMASTER_DEVICE_ID_2_IN_1_DT),   /* Dual Trigger 2-in-1 */
++              .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323),   /* Dual Trigger 
3-in-1 (PC Mode) */
+               .driver_data = (unsigned long)ff_rumble },
+       { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324),   /* Dual Trigger 
3-in-1 (PS3 Mode) */
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 60e2d4cf1fe3..2e593874f5e0 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
+               y >>= 1;
+               distance >>= 1;
+       }
++      if (features->type == INTUOSHT2)
++              distance = features->distance_max - distance;
+       input_report_abs(input, ABS_X, x);
+       input_report_abs(input, ABS_Y, y);
+       input_report_abs(input, ABS_DISTANCE, distance);
+@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, 
size_t len)
+       input_report_key(input, BTN_BASE2, (data[11] & 0x02));
+ 
+       if (data[12] & 0x80)
+-              input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
++              input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
+       else
+               input_report_abs(input, ABS_WHEEL, 0);
+ 
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c 
b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 35983c7c3137..87588198d68f 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int 
num_packets_per_urb,
+                               printk(KERN_DEBUG
+                                      "%s: %s: alloc urb for fifo %i failed",
+                                      hw->name, __func__, fifo->fifonum);
++                              continue;
+                       }
+                       fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
+                       fifo->iso[i].indx = i;
+@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
+ static int
+ setup_hfcsusb(struct hfcsusb *hw)
+ {
++      void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
+       u_char b;
++      int ret;
+ 
+       if (debug & DBG_HFC_CALL_TRACE)
+               printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+ 
++      if (!dmabuf)
++              return -ENOMEM;
++
++      ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
++
++      memcpy(&b, dmabuf, sizeof(u_char));
++      kfree(dmabuf);
++
+       /* check the chip id */
+-      if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
++      if (ret != 1) {
+               printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
+                      hw->name, __func__);
+               return 1;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index b7d3b62dae7f..1e17e6421da3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1630,7 +1630,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
+       unsigned long freed;
+ 
+       c = container_of(shrink, struct dm_bufio_client, shrinker);
+-      if (!dm_bufio_trylock(c))
++      if (sc->gfp_mask & __GFP_FS)
++              dm_bufio_lock(c);
++      else if (!dm_bufio_trylock(c))
+               return SHRINK_STOP;
+ 
+       freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index b9d1897bcf5b..bd9a45b94b55 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -545,8 +545,10 @@ static int run_io_job(struct kcopyd_job *job)
+        * no point in continuing.
+        */
+       if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
+-          job->master_job->write_err)
++          job->master_job->write_err) {
++              job->write_err = job->master_job->write_err;
+               return -EIO;
++      }
+ 
+       io_job_start(job->kc->throttle);
+ 
+@@ -598,6 +600,7 @@ static int process_jobs(struct list_head *jobs, struct 
dm_kcopyd_client *kc,
+                       else
+                               job->read_err = 1;
+                       push(&kc->complete_jobs, job);
++                      wake(kc);
+                       break;
+               }
+ 
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index d76e685206b3..8f070debe498 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1308,7 +1308,7 @@ void dm_table_event(struct dm_table *t)
+ }
+ EXPORT_SYMBOL(dm_table_event);
+ 
+-sector_t dm_table_get_size(struct dm_table *t)
++inline sector_t dm_table_get_size(struct dm_table *t)
+ {
+       return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
+ }
+@@ -1333,6 +1333,9 @@ struct dm_target *dm_table_find_target(struct dm_table 
*t, sector_t sector)
+       unsigned int l, n = 0, k = 0;
+       sector_t *node;
+ 
++      if (unlikely(sector >= dm_table_get_size(t)))
++              return &t->targets[t->num_targets];
++
+       for (l = 0; l < t->depth; l++) {
+               n = get_child(n, k);
+               node = get_node(t, l, n);
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 597098a43aba..b322821a6323 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct 
dmz_metadata *zmd,
+       sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+       struct bio *bio;
+ 
++      if (dmz_bdev_is_dying(zmd->dev))
++              return ERR_PTR(-EIO);
++
+       /* Get a new block and a BIO to read it */
+       mblk = dmz_alloc_mblock(zmd, mblk_no);
+       if (!mblk)
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+ 
+       bio = bio_alloc(GFP_NOIO, 1);
+       if (!bio) {
+               dmz_free_mblock(zmd, mblk);
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+       }
+ 
+       spin_lock(&zmd->mblk_lock);
+@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct 
dmz_metadata *zmd,
+       if (!mblk) {
+               /* Cache miss: read the block from disk */
+               mblk = dmz_get_mblock_slow(zmd, mblk_no);
+-              if (!mblk)
+-                      return ERR_PTR(-ENOMEM);
++              if (IS_ERR(mblk))
++                      return mblk;
+       }
+ 
+       /* Wait for on-going read I/O and check for error */
+@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, 
struct dmz_mblock *mblk)
+ /*
+  * Issue a metadata block write BIO.
+  */
+-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock 
*mblk,
+-                           unsigned int set)
++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
++                          unsigned int set)
+ {
+       sector_t block = zmd->sb[set].block + mblk->no;
+       struct bio *bio;
+ 
++      if (dmz_bdev_is_dying(zmd->dev))
++              return -EIO;
++
+       bio = bio_alloc(GFP_NOIO, 1);
+       if (!bio) {
+               set_bit(DMZ_META_ERROR, &mblk->state);
+-              return;
++              return -ENOMEM;
+       }
+ 
+       set_bit(DMZ_META_WRITING, &mblk->state);
+@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, 
struct dmz_mblock *mblk,
+       bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
+       bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+       submit_bio(bio);
++
++      return 0;
+ }
+ 
+ /*
+@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int 
op, sector_t block,
+       struct bio *bio;
+       int ret;
+ 
++      if (dmz_bdev_is_dying(zmd->dev))
++              return -EIO;
++
+       bio = bio_alloc(GFP_NOIO, 1);
+       if (!bio)
+               return -ENOMEM;
+@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata 
*zmd,
+ {
+       struct dmz_mblock *mblk;
+       struct blk_plug plug;
+-      int ret = 0;
++      int ret = 0, nr_mblks_submitted = 0;
+ 
+       /* Issue writes */
+       blk_start_plug(&plug);
+-      list_for_each_entry(mblk, write_list, link)
+-              dmz_write_mblock(zmd, mblk, set);
++      list_for_each_entry(mblk, write_list, link) {
++              ret = dmz_write_mblock(zmd, mblk, set);
++              if (ret)
++                      break;
++              nr_mblks_submitted++;
++      }
+       blk_finish_plug(&plug);
+ 
+       /* Wait for completion */
+       list_for_each_entry(mblk, write_list, link) {
++              if (!nr_mblks_submitted)
++                      break;
+               wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
+                              TASK_UNINTERRUPTIBLE);
+               if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+                       clear_bit(DMZ_META_ERROR, &mblk->state);
+                       ret = -EIO;
+               }
++              nr_mblks_submitted--;
+       }
+ 
+       /* Flush drive cache (this will also sync data) */
+@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
+        */
+       dmz_lock_flush(zmd);
+ 
++      if (dmz_bdev_is_dying(zmd->dev)) {
++              ret = -EIO;
++              goto out;
++      }
++
+       /* Get dirty blocks */
+       spin_lock(&zmd->mblk_lock);
+       list_splice_init(&zmd->mblk_dirty_list, &write_list);
+@@ -1534,7 +1557,7 @@ static struct dm_zone 
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+       struct dm_zone *zone;
+ 
+       if (list_empty(&zmd->map_rnd_list))
+-              return NULL;
++              return ERR_PTR(-EBUSY);
+ 
+       list_for_each_entry(zone, &zmd->map_rnd_list, link) {
+               if (dmz_is_buf(zone))
+@@ -1545,7 +1568,7 @@ static struct dm_zone 
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+                       return dzone;
+       }
+ 
+-      return NULL;
++      return ERR_PTR(-EBUSY);
+ }
+ 
+ /*
+@@ -1556,7 +1579,7 @@ static struct dm_zone 
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+       struct dm_zone *zone;
+ 
+       if (list_empty(&zmd->map_seq_list))
+-              return NULL;
++              return ERR_PTR(-EBUSY);
+ 
+       list_for_each_entry(zone, &zmd->map_seq_list, link) {
+               if (!zone->bzone)
+@@ -1565,7 +1588,7 @@ static struct dm_zone 
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+                       return zone;
+       }
+ 
+-      return NULL;
++      return ERR_PTR(-EBUSY);
+ }
+ 
+ /*
+@@ -1623,6 +1646,10 @@ again:
+               /* Alloate a random zone */
+               dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+               if (!dzone) {
++                      if (dmz_bdev_is_dying(zmd->dev)) {
++                              dzone = ERR_PTR(-EIO);
++                              goto out;
++                      }
+                       dmz_wait_for_free_zones(zmd);
+                       goto again;
+               }
+@@ -1720,6 +1747,10 @@ again:
+       /* Alloate a random zone */
+       bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+       if (!bzone) {
++              if (dmz_bdev_is_dying(zmd->dev)) {
++                      bzone = ERR_PTR(-EIO);
++                      goto out;
++              }
+               dmz_wait_for_free_zones(zmd);
+               goto again;
+       }
+diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
+index 44a119e12f1a..a9f84a998476 100644
+--- a/drivers/md/dm-zoned-reclaim.c
++++ b/drivers/md/dm-zoned-reclaim.c
+@@ -37,7 +37,7 @@ enum {
+ /*
+  * Number of seconds of target BIO inactivity to consider the target idle.
+  */
+-#define DMZ_IDLE_PERIOD               (10UL * HZ)
++#define DMZ_IDLE_PERIOD                       (10UL * HZ)
+ 
+ /*
+  * Percentage of unmapped (free) random zones below which reclaim starts
+@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
+               set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
+ 
+       while (block < end_block) {
++              if (dev->flags & DMZ_BDEV_DYING)
++                      return -EIO;
++
+               /* Get a valid region from the source zone */
+               ret = dmz_first_valid_block(zmd, src_zone, &block);
+               if (ret <= 0)
+@@ -217,7 +220,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct 
dm_zone *dzone)
+ 
+       dmz_unlock_flush(zmd);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ /*
+@@ -261,7 +264,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, 
struct dm_zone *dzone)
+ 
+       dmz_unlock_flush(zmd);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ /*
+@@ -314,7 +317,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, 
struct dm_zone *dzone)
+ 
+       dmz_unlock_flush(zmd);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ /*
+@@ -336,7 +339,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, 
struct dm_zone *dzone)
+ /*
+  * Find a candidate zone for reclaim and process it.
+  */
+-static void dmz_reclaim(struct dmz_reclaim *zrc)
++static int dmz_do_reclaim(struct dmz_reclaim *zrc)
+ {
+       struct dmz_metadata *zmd = zrc->metadata;
+       struct dm_zone *dzone;
+@@ -346,8 +349,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
+ 
+       /* Get a data zone */
+       dzone = dmz_get_zone_for_reclaim(zmd);
+-      if (!dzone)
+-              return;
++      if (IS_ERR(dzone))
++              return PTR_ERR(dzone);
+ 
+       start = jiffies;
+ 
+@@ -393,13 +396,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
+ out:
+       if (ret) {
+               dmz_unlock_zone_reclaim(dzone);
+-              return;
++              return ret;
+       }
+ 
+-      (void) dmz_flush_metadata(zrc->metadata);
++      ret = dmz_flush_metadata(zrc->metadata);
++      if (ret) {
++              dmz_dev_debug(zrc->dev,
++                            "Metadata flush for zone %u failed, err %d\n",
++                            dmz_id(zmd, rzone), ret);
++              return ret;
++      }
+ 
+       dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
+                     dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
++      return 0;
+ }
+ 
+ /*
+@@ -444,6 +454,10 @@ static void dmz_reclaim_work(struct work_struct *work)
+       struct dmz_metadata *zmd = zrc->metadata;
+       unsigned int nr_rnd, nr_unmap_rnd;
+       unsigned int p_unmap_rnd;
++      int ret;
++
++      if (dmz_bdev_is_dying(zrc->dev))
++              return;
+ 
+       if (!dmz_should_reclaim(zrc)) {
+               mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
+@@ -473,7 +487,17 @@ static void dmz_reclaim_work(struct work_struct *work)
+                     (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+                     p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+ 
+-      dmz_reclaim(zrc);
++      ret = dmz_do_reclaim(zrc);
++      if (ret) {
++              dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
++              if (ret == -EIO)
++                      /*
++                       * LLD might be performing some error handling sequence
++                       * at the underlying device. To not interfere, do not
++                       * attempt to schedule the next reclaim run immediately.
++                       */
++                      return;
++      }
+ 
+       dmz_schedule_reclaim(zrc);
+ }
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 532bfce7f072..1e004d975e78 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct 
dm_zone *zone,
+ 
+       atomic_inc(&bioctx->ref);
+       generic_make_request(clone);
++      if (clone->bi_status == BLK_STS_IOERR)
++              return -EIO;
+ 
+       if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+               zone->wp_block += nr_blocks;
+@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target 
*dmz,
+ 
+       /* Get the buffer zone. One will be allocated if needed */
+       bzone = dmz_get_chunk_buffer(zmd, zone);
+-      if (!bzone)
+-              return -ENOSPC;
++      if (IS_ERR(bzone))
++              return PTR_ERR(bzone);
+ 
+       if (dmz_is_readonly(bzone))
+               return -EROFS;
+@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct 
dm_chunk_work *cw,
+ 
+       dmz_lock_metadata(zmd);
+ 
++      if (dmz->dev->flags & DMZ_BDEV_DYING) {
++              ret = -EIO;
++              goto out;
++      }
++
+       /*
+        * Get the data zone mapping the chunk. There may be no
+        * mapping for read and discard. If a mapping is obtained,
+@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
+ 
+       /* Flush dirty metadata blocks */
+       ret = dmz_flush_metadata(dmz->metadata);
++      if (ret)
++              dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
+ 
+       /* Process queued flush requests */
+       while (1) {
+@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
+  * Get a chunk work and start it to process a new BIO.
+  * If the BIO chunk has no work yet, create one.
+  */
+-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ {
+       unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+       struct dm_chunk_work *cw;
++      int ret = 0;
+ 
+       mutex_lock(&dmz->chunk_lock);
+ 
+       /* Get the BIO chunk work. If one is not active yet, create one */
+       cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+       if (!cw) {
+-              int ret;
+ 
+               /* Create a new chunk work */
+               cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
+-              if (!cw)
++              if (unlikely(!cw)) {
++                      ret = -ENOMEM;
+                       goto out;
++              }
+ 
+               INIT_WORK(&cw->work, dmz_chunk_work);
+               atomic_set(&cw->refcount, 0);
+@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, 
struct bio *bio)
+               ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
+               if (unlikely(ret)) {
+                       kfree(cw);
+-                      cw = NULL;
+                       goto out;
+               }
+       }
+@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, 
struct bio *bio)
+       bio_list_add(&cw->bio_list, bio);
+       dmz_get_chunk_work(cw);
+ 
++      dmz_reclaim_bio_acc(dmz->reclaim);
+       if (queue_work(dmz->chunk_wq, &cw->work))
+               dmz_get_chunk_work(cw);
+ out:
+       mutex_unlock(&dmz->chunk_lock);
++      return ret;
++}
++
++/*
++ * Check the backing device availability. If it's on the way out,
++ * start failing I/O. Reclaim and metadata components also call this
++ * function to cleanly abort operation in the event of such failure.
++ */
++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
++{
++      struct gendisk *disk;
++
++      if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
++              disk = dmz_dev->bdev->bd_disk;
++              if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
++                      dmz_dev_warn(dmz_dev, "Backing device queue dying");
++                      dmz_dev->flags |= DMZ_BDEV_DYING;
++              } else if (disk->fops->check_events) {
++                      if (disk->fops->check_events(disk, 0) &
++                                      DISK_EVENT_MEDIA_CHANGE) {
++                              dmz_dev_warn(dmz_dev, "Backing device offline");
++                              dmz_dev->flags |= DMZ_BDEV_DYING;
++                      }
++              }
++      }
++
++      return dmz_dev->flags & DMZ_BDEV_DYING;
+ }
+ 
+ /*
+@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+       sector_t sector = bio->bi_iter.bi_sector;
+       unsigned int nr_sectors = bio_sectors(bio);
+       sector_t chunk_sector;
++      int ret;
++
++      if (dmz_bdev_is_dying(dmz->dev))
++              return DM_MAPIO_KILL;
+ 
+       dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block 
%llu, %u blocks",
+                     bio_op(bio), (unsigned long long)sector, nr_sectors,
+@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+               dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+ 
+       /* Now ready to handle this BIO */
+-      dmz_reclaim_bio_acc(dmz->reclaim);
+-      dmz_queue_chunk_work(dmz, bio);
++      ret = dmz_queue_chunk_work(dmz, bio);
++      if (ret) {
++              dmz_dev_debug(dmz->dev,
++                            "BIO op %d, can't process chunk %llu, err %i\n",
++                            bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
++                            ret);
++              return DM_MAPIO_REQUEUE;
++      }
+ 
+       return DM_MAPIO_SUBMITTED;
+ }
+@@ -855,6 +903,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti,
+ {
+       struct dmz_target *dmz = ti->private;
+ 
++      if (dmz_bdev_is_dying(dmz->dev))
++              return -ENODEV;
++
+       *bdev = dmz->dev->bdev;
+ 
+       return 0;
+diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
+index ed8de49c9a08..93a64529f219 100644
+--- a/drivers/md/dm-zoned.h
++++ b/drivers/md/dm-zoned.h
+@@ -56,6 +56,8 @@ struct dmz_dev {
+ 
+       unsigned int            nr_zones;
+ 
++      unsigned int            flags;
++
+       sector_t                zone_nr_sectors;
+       unsigned int            zone_nr_sectors_shift;
+ 
+@@ -67,6 +69,9 @@ struct dmz_dev {
+                                (dev)->zone_nr_sectors_shift)
+ #define dmz_chunk_block(dev, b)       ((b) & ((dev)->zone_nr_blocks - 1))
+ 
++/* Device flags. */
++#define DMZ_BDEV_DYING                (1 << 0)
++
+ /*
+  * Zone descriptor.
+  */
+@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
+ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
+ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+ 
++/*
++ * Functions defined in dm-zoned-target.c
++ */
++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
++
+ #endif /* DM_ZONED_H */
+diff --git a/drivers/md/persistent-data/dm-btree.c 
b/drivers/md/persistent-data/dm-btree.c
+index 58b319757b1e..8aae0624a297 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, 
uint64_t key)
+ 
+       new_parent = shadow_current(s);
+ 
++      pn = dm_block_data(new_parent);
++      size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
++              sizeof(__le64) : s->info->value_type.size;
++
++      /* create & init the left block */
+       r = new_block(s->info, &left);
+       if (r < 0)
+               return r;
+ 
++      ln = dm_block_data(left);
++      nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
++
++      ln->header.flags = pn->header.flags;
++      ln->header.nr_entries = cpu_to_le32(nr_left);
++      ln->header.max_entries = pn->header.max_entries;
++      ln->header.value_size = pn->header.value_size;
++      memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
++      memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
++
++      /* create & init the right block */
+       r = new_block(s->info, &right);
+       if (r < 0) {
+               unlock_block(s->info, left);
+               return r;
+       }
+ 
+-      pn = dm_block_data(new_parent);
+-      ln = dm_block_data(left);
+       rn = dm_block_data(right);
+-
+-      nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+       nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+ 
+-      ln->header.flags = pn->header.flags;
+-      ln->header.nr_entries = cpu_to_le32(nr_left);
+-      ln->header.max_entries = pn->header.max_entries;
+-      ln->header.value_size = pn->header.value_size;
+-
+       rn->header.flags = pn->header.flags;
+       rn->header.nr_entries = cpu_to_le32(nr_right);
+       rn->header.max_entries = pn->header.max_entries;
+       rn->header.value_size = pn->header.value_size;
+-
+-      memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+       memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+-
+-      size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+-              sizeof(__le64) : s->info->value_type.size;
+-      memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+       memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
+              nr_right * size);
+ 
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c 
b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 4aed69d9dd17..b23cac2c4738 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
+       }
+ 
+       if (smm->recursion_count == 1)
+-              apply_bops(smm);
++              r = apply_bops(smm);
+ 
+       smm->recursion_count--;
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 60d0c270af85..c1eeba1906fd 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2153,6 +2153,15 @@ static void bond_miimon_commit(struct bonding *bond)
+       bond_for_each_slave(bond, slave, iter) {
+               switch (slave->new_link) {
+               case BOND_LINK_NOCHANGE:
++                      /* For 802.3ad mode, check current slave speed and
++                       * duplex again in case its port was disabled after
++                       * invalid speed/duplex reporting but recovered before
++                       * link monitoring could make a decision on the actual
++                       * link status
++                       */
++                      if (BOND_MODE(bond) == BOND_MODE_8023AD &&
++                          slave->link == BOND_LINK_UP)
++                              bond_3ad_adapter_speed_duplex_changed(slave);
+                       continue;
+ 
+               case BOND_LINK_UP:
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 7d61d8801220..d92113db4fb9 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -1217,6 +1217,8 @@ int register_candev(struct net_device *dev)
+               return -EINVAL;
+ 
+       dev->rtnl_link_ops = &can_link_ops;
++      netif_carrier_off(dev);
++
+       return register_netdev(dev);
+ }
+ EXPORT_SYMBOL_GPL(register_candev);
+diff --git a/drivers/net/can/sja1000/peak_pcmcia.c 
b/drivers/net/can/sja1000/peak_pcmcia.c
+index dd56133cc461..fc9f8b01ecae 100644
+--- a/drivers/net/can/sja1000/peak_pcmcia.c
++++ b/drivers/net/can/sja1000/peak_pcmcia.c
+@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
+               if (!netdev)
+                       continue;
+ 
+-              strncpy(name, netdev->name, IFNAMSIZ);
++              strlcpy(name, netdev->name, IFNAMSIZ);
+ 
+               unregister_sja1000dev(netdev);
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index d68c79f9a4b9..059282a6065c 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -881,7 +881,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
+ 
+               dev_prev_siblings = dev->prev_siblings;
+               dev->state &= ~PCAN_USB_STATE_CONNECTED;
+-              strncpy(name, netdev->name, IFNAMSIZ);
++              strlcpy(name, netdev->name, IFNAMSIZ);
+ 
+               unregister_netdev(netdev);
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c 
b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 79053d2ce7a3..338683e5ef1e 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+       if (!adapter->regs) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -ENOMEM;
+-              goto out_free_adapter;
++              goto out_free_adapter_nofail;
+       }
+ 
+       adapter->pdev = pdev;
+@@ -3390,6 +3390,9 @@ out_free_dev:
+               if (adapter->port[i])
+                       free_netdev(adapter->port[i]);
+ 
++out_free_adapter_nofail:
++      kfree_skb(adapter->nofail_skb);
++
+ out_free_adapter:
+       kfree(adapter);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c 
b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index c27054b8ce81..1bfe9544b3c1 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -157,6 +157,7 @@ struct hip04_priv {
+       unsigned int reg_inten;
+ 
+       struct napi_struct napi;
++      struct device *dev;
+       struct net_device *ndev;
+ 
+       struct tx_desc *tx_desc;
+@@ -185,7 +186,7 @@ struct hip04_priv {
+ 
+ static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+ {
+-      return (head - tail) % (TX_DESC_NUM - 1);
++      return (head - tail) % TX_DESC_NUM;
+ }
+ 
+ static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool 
force)
+               }
+ 
+               if (priv->tx_phys[tx_tail]) {
+-                      dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
++                      dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
+                                        priv->tx_skb[tx_tail]->len,
+                                        DMA_TO_DEVICE);
+                       priv->tx_phys[tx_tail] = 0;
+@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, 
struct net_device *ndev)
+               return NETDEV_TX_BUSY;
+       }
+ 
+-      phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+-      if (dma_mapping_error(&ndev->dev, phys)) {
++      phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
++      if (dma_mapping_error(priv->dev, phys)) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int 
budget)
+       u16 len;
+       u32 err;
+ 
++      /* clean up tx descriptors */
++      tx_remaining = hip04_tx_reclaim(ndev, false);
++
+       while (cnt && !last) {
+               buf = priv->rx_buf[priv->rx_head];
+               skb = build_skb(buf, priv->rx_buf_size);
+@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int 
budget)
+                       goto refill;
+               }
+ 
+-              dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
++              dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
+                                RX_BUF_SIZE, DMA_FROM_DEVICE);
+               priv->rx_phys[priv->rx_head] = 0;
+ 
+@@ -534,9 +538,9 @@ refill:
+               buf = netdev_alloc_frag(priv->rx_buf_size);
+               if (!buf)
+                       goto done;
+-              phys = dma_map_single(&ndev->dev, buf,
++              phys = dma_map_single(priv->dev, buf,
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+-              if (dma_mapping_error(&ndev->dev, phys))
++              if (dma_mapping_error(priv->dev, phys))
+                       goto done;
+               priv->rx_buf[priv->rx_head] = buf;
+               priv->rx_phys[priv->rx_head] = phys;
+@@ -557,8 +561,7 @@ refill:
+       }
+       napi_complete_done(napi, rx);
+ done:
+-      /* clean up tx descriptors and start a new timer if necessary */
+-      tx_remaining = hip04_tx_reclaim(ndev, false);
++      /* start a new timer if necessary */
+       if (rx < budget && tx_remaining)
+               hip04_start_tx_timer(priv);
+ 
+@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               dma_addr_t phys;
+ 
+-              phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
++              phys = dma_map_single(priv->dev, priv->rx_buf[i],
+                                     RX_BUF_SIZE, DMA_FROM_DEVICE);
+-              if (dma_mapping_error(&ndev->dev, phys))
++              if (dma_mapping_error(priv->dev, phys))
+                       return -EIO;
+ 
+               priv->rx_phys[i] = phys;
+@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
+ 
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               if (priv->rx_phys[i]) {
+-                      dma_unmap_single(&ndev->dev, priv->rx_phys[i],
++                      dma_unmap_single(priv->dev, priv->rx_phys[i],
+                                        RX_BUF_SIZE, DMA_FROM_DEVICE);
+                       priv->rx_phys[i] = 0;
+               }
+@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
+               return -ENOMEM;
+ 
+       priv = netdev_priv(ndev);
++      priv->dev = d;
+       priv->ndev = ndev;
+       platform_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c 
b/drivers/net/ethernet/qlogic/qed/qed_int.c
+index 7746417130bd..c5d9f290ec4c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
+@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
+                                               snprintf(bit_name, 30,
+                                                        p_aeu->bit_name, num);
+                                       else
+-                                              strncpy(bit_name,
++                                              strlcpy(bit_name,
+                                                       p_aeu->bit_name, 30);
+ 
+                                       /* We now need to pass bitmask in its
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c 
b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index 1e13dea66989..c9258aabca2d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -398,7 +398,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+       /* Vendor specific information */
+       dev->vendor_id = cdev->vendor_id;
+       dev->vendor_part_id = cdev->device_id;
+-      dev->hw_ver = 0;
++      dev->hw_ver = cdev->chip_rev;
+       dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+                     (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 4b0144b2a252..e2050afaab7a 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1220,6 +1220,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
+       {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
++      {QMI_FIXED_INTF(0x2020, 0x2060, 4)},    /* BroadMobi BM818 */
+       {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+       {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+       {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in 
QMI mode */
+diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
+index 56f2112e0cd8..85df2e009310 100644
+--- a/drivers/nfc/st-nci/se.c
++++ b/drivers/nfc/st-nci/se.c
+@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct 
nci_dev *ndev,
+ 
+               transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+                                           skb->len - 2, GFP_KERNEL);
++              if (!transaction)
++                      return -ENOMEM;
+ 
+               transaction->aid_len = skb->data[1];
+               memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
+index 3a98563d4a12..eac608a457f0 100644
+--- a/drivers/nfc/st21nfca/se.c
++++ b/drivers/nfc/st21nfca/se.c
+@@ -326,6 +326,8 @@ int st21nfca_connectivity_event_received(struct 
nfc_hci_dev *hdev, u8 host,
+ 
+               transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+                                                  skb->len - 2, GFP_KERNEL);
++              if (!transaction)
++                      return -ENOMEM;
+ 
+               transaction->aid_len = skb->data[1];
+               memcpy(transaction->aid, &skb->data[2],
+diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
+index e7cce412f2cf..cb647c8c7b68 100644
+--- a/fs/ceph/locks.c
++++ b/fs/ceph/locks.c
+@@ -78,8 +78,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, 
struct file *file,
+               req->r_wait_for_completion = ceph_lock_wait_for_completion;
+ 
+       err = ceph_mdsc_do_request(mdsc, inode, req);
+-
+-      if (operation == CEPH_MDS_OP_GETFILELOCK) {
++      if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
+               fl->fl_pid = 
-le64_to_cpu(req->r_reply_info.filelock_reply->pid);
+               if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
+                       fl->fl_type = F_RDLCK;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 23326b0cd562..58a502e622aa 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2168,7 +2168,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, 
struct smb_rqst *old_rq)
+ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+                                  unsigned int buflen)
+ {
+-      sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
++      void *addr;
++      /*
++       * VMAP_STACK (at least) puts stack into the vmalloc address space
++       */
++      if (is_vmalloc_addr(buf))
++              addr = vmalloc_to_page(buf);
++      else
++              addr = virt_to_page(buf);
++      sg_set_page(sg, addr, buflen, offset_in_page(buf));
+ }
+ 
+ static struct scatterlist *
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index a73144b3cb8c..22cff39cca29 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -433,7 +433,8 @@ static inline void nfs4_schedule_session_recovery(struct 
nfs4_session *session,
+ 
+ extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, 
struct rpc_cred *, gfp_t);
+ extern void nfs4_put_state_owner(struct nfs4_state_owner *);
+-extern void nfs4_purge_state_owners(struct nfs_server *);
++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
++extern void nfs4_free_state_owners(struct list_head *head);
+ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct 
nfs4_state_owner *);
+ extern void nfs4_put_open_state(struct nfs4_state *);
+ extern void nfs4_close_state(struct nfs4_state *, fmode_t);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 8f96f6548dc8..0924b68b5657 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -739,9 +739,12 @@ out:
+ 
+ static void nfs4_destroy_server(struct nfs_server *server)
+ {
++      LIST_HEAD(freeme);
++
+       nfs_server_return_all_delegations(server);
+       unset_pnfs_layoutdriver(server);
+-      nfs4_purge_state_owners(server);
++      nfs4_purge_state_owners(server, &freeme);
++      nfs4_free_state_owners(&freeme);
+ }
+ 
+ /*
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 85ec07e4aa91..f92bfc787c5f 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -614,24 +614,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
+ /**
+  * nfs4_purge_state_owners - Release all cached state owners
+  * @server: nfs_server with cached state owners to release
++ * @head: resulting list of state owners
+  *
+  * Called at umount time.  Remaining state owners will be on
+  * the LRU with ref count of zero.
++ * Note that the state owners are not freed, but are added
++ * to the list @head, which can later be used as an argument
++ * to nfs4_free_state_owners.
+  */
+-void nfs4_purge_state_owners(struct nfs_server *server)
++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head 
*head)
+ {
+       struct nfs_client *clp = server->nfs_client;
+       struct nfs4_state_owner *sp, *tmp;
+-      LIST_HEAD(doomed);
+ 
+       spin_lock(&clp->cl_lock);
+       list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+-              list_move(&sp->so_lru, &doomed);
++              list_move(&sp->so_lru, head);
+               nfs4_remove_state_owner_locked(sp);
+       }
+       spin_unlock(&clp->cl_lock);
++}
+ 
+-      list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
++/**
++ * nfs4_purge_state_owners - Release all cached state owners
++ * @head: resulting list of state owners
++ *
++ * Frees a list of state owners that was generated by
++ * nfs4_purge_state_owners
++ */
++void nfs4_free_state_owners(struct list_head *head)
++{
++      struct nfs4_state_owner *sp, *tmp;
++
++      list_for_each_entry_safe(sp, tmp, head, so_lru) {
+               list_del(&sp->so_lru);
+               nfs4_free_state_owner(sp);
+       }
+@@ -1782,12 +1797,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, 
const struct nfs4_state_recov
+       struct nfs4_state_owner *sp;
+       struct nfs_server *server;
+       struct rb_node *pos;
++      LIST_HEAD(freeme);
+       int status = 0;
+ 
+ restart:
+       rcu_read_lock();
+       list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+-              nfs4_purge_state_owners(server);
++              nfs4_purge_state_owners(server, &freeme);
+               spin_lock(&clp->cl_lock);
+               for (pos = rb_first(&server->state_owners);
+                    pos != NULL;
+@@ -1816,6 +1832,7 @@ restart:
+               spin_unlock(&clp->cl_lock);
+       }
+       rcu_read_unlock();
++      nfs4_free_state_owners(&freeme);
+       return 0;
+ }
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 7a908d683258..a609d480606d 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -854,6 +854,7 @@ static int userfaultfd_release(struct inode *inode, struct 
file *file)
+       /* len == 0 means wake all */
+       struct userfaultfd_wake_range range = { .len = 0, };
+       unsigned long new_flags;
++      bool still_valid;
+ 
+       ACCESS_ONCE(ctx->released) = true;
+ 
+@@ -869,8 +870,7 @@ static int userfaultfd_release(struct inode *inode, struct 
file *file)
+        * taking the mmap_sem for writing.
+        */
+       down_write(&mm->mmap_sem);
+-      if (!mmget_still_valid(mm))
+-              goto skip_mm;
++      still_valid = mmget_still_valid(mm);
+       prev = NULL;
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               cond_resched();
+@@ -881,19 +881,20 @@ static int userfaultfd_release(struct inode *inode, 
struct file *file)
+                       continue;
+               }
+               new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+-              prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+-                               new_flags, vma->anon_vma,
+-                               vma->vm_file, vma->vm_pgoff,
+-                               vma_policy(vma),
+-                               NULL_VM_UFFD_CTX);
+-              if (prev)
+-                      vma = prev;
+-              else
+-                      prev = vma;
++              if (still_valid) {
++                      prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
++                                       new_flags, vma->anon_vma,
++                                       vma->vm_file, vma->vm_pgoff,
++                                       vma_policy(vma),
++                                       NULL_VM_UFFD_CTX);
++                      if (prev)
++                              vma = prev;
++                      else
++                              prev = vma;
++              }
+               vma->vm_flags = new_flags;
+               vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+       }
+-skip_mm:
+       up_write(&mm->mmap_sem);
+       mmput(mm);
+ wakeup:
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 1daa965f1e08..4e6f2c8574f7 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -789,6 +789,7 @@ xfs_setattr_nonsize(
+ 
+ out_cancel:
+       xfs_trans_cancel(tp);
++      xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ out_dqrele:
+       xfs_qm_dqrele(udqp);
+       xfs_qm_dqrele(gdqp);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index aa08d4184608..92784b290564 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -277,6 +277,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
+       }
+ }
+ 
++static void irq_sysfs_del(struct irq_desc *desc)
++{
++      /*
++       * If irq_sysfs_init() has not yet been invoked (early boot), then
++       * irq_kobj_base is NULL and the descriptor was never added.
++       * kobject_del() complains about a object with no parent, so make
++       * it conditional.
++       */
++      if (irq_kobj_base)
++              kobject_del(&desc->kobj);
++}
++
+ static int __init irq_sysfs_init(void)
+ {
+       struct irq_desc *desc;
+@@ -307,6 +319,7 @@ static struct kobj_type irq_kobj_type = {
+ };
+ 
+ static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
++static void irq_sysfs_del(struct irq_desc *desc) {}
+ 
+ #endif /* CONFIG_SYSFS */
+ 
+@@ -420,7 +433,7 @@ static void free_desc(unsigned int irq)
+        * The sysfs entry must be serialized against a concurrent
+        * irq_sysfs_init() as well.
+        */
+-      kobject_del(&desc->kobj);
++      irq_sysfs_del(desc);
+       delete_irq_desc(irq);
+ 
+       /*
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 930f2aa3bb4d..1adc2e6c50f9 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -33,6 +33,7 @@
+ #include <linux/page_idle.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/oom.h>
++#include <linux/page_owner.h>
+ 
+ #include <asm/tlb.h>
+ #include <asm/pgalloc.h>
+@@ -2387,6 +2388,9 @@ static void __split_huge_page(struct page *page, struct 
list_head *list,
+       }
+ 
+       ClearPageCompound(head);
++
++      split_page_owner(head, HPAGE_PMD_ORDER);
++
+       /* See comment in __split_huge_page_tail() */
+       if (PageAnon(head)) {
+               /* Additional pin to radix tree of swap cache */
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 685049a9048d..c5317a7f05e9 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -52,6 +52,7 @@
+ #include <linux/zpool.h>
+ #include <linux/mount.h>
+ #include <linux/migrate.h>
++#include <linux/wait.h>
+ #include <linux/pagemap.h>
+ 
+ #define ZSPAGE_MAGIC  0x58
+@@ -267,6 +268,10 @@ struct zs_pool {
+ #ifdef CONFIG_COMPACTION
+       struct inode *inode;
+       struct work_struct free_work;
++      /* A wait queue for when migration races with async_free_zspage() */
++      struct wait_queue_head migration_wait;
++      atomic_long_t isolated_pages;
++      bool destroying;
+ #endif
+ };
+ 
+@@ -1878,6 +1883,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
+       zspage->isolated--;
+ }
+ 
++static void putback_zspage_deferred(struct zs_pool *pool,
++                                  struct size_class *class,
++                                  struct zspage *zspage)
++{
++      enum fullness_group fg;
++
++      fg = putback_zspage(class, zspage);
++      if (fg == ZS_EMPTY)
++              schedule_work(&pool->free_work);
++
++}
++
++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
++{
++      VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
++      atomic_long_dec(&pool->isolated_pages);
++      /*
++       * There's no possibility of racing, since wait_for_isolated_drain()
++       * checks the isolated count under &class->lock after enqueuing
++       * on migration_wait.
++       */
++      if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
++              wake_up_all(&pool->migration_wait);
++}
++
+ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+                               struct page *newpage, struct page *oldpage)
+ {
+@@ -1947,6 +1977,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t 
mode)
+        */
+       if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+               get_zspage_mapping(zspage, &class_idx, &fullness);
++              atomic_long_inc(&pool->isolated_pages);
+               remove_zspage(class, zspage, fullness);
+       }
+ 
+@@ -2046,8 +2077,16 @@ int zs_page_migrate(struct address_space *mapping, 
struct page *newpage,
+        * Page migration is done so let's putback isolated zspage to
+        * the list if @page is final isolated subpage in the zspage.
+        */
+-      if (!is_zspage_isolated(zspage))
+-              putback_zspage(class, zspage);
++      if (!is_zspage_isolated(zspage)) {
++              /*
++               * We cannot race with zs_destroy_pool() here because we wait
++               * for isolation to hit zero before we start destroying.
++               * Also, we ensure that everyone can see pool->destroying before
++               * we start waiting.
++               */
++              putback_zspage_deferred(pool, class, zspage);
++              zs_pool_dec_isolated(pool);
++      }
+ 
+       reset_page(page);
+       put_page(page);
+@@ -2093,13 +2132,12 @@ void zs_page_putback(struct page *page)
+       spin_lock(&class->lock);
+       dec_zspage_isolation(zspage);
+       if (!is_zspage_isolated(zspage)) {
+-              fg = putback_zspage(class, zspage);
+               /*
+                * Due to page_lock, we cannot free zspage immediately
+                * so let's defer.
+                */
+-              if (fg == ZS_EMPTY)
+-                      schedule_work(&pool->free_work);
++              putback_zspage_deferred(pool, class, zspage);
++              zs_pool_dec_isolated(pool);
+       }
+       spin_unlock(&class->lock);
+ }
+@@ -2123,8 +2161,36 @@ static int zs_register_migration(struct zs_pool *pool)
+       return 0;
+ }
+ 
++static bool pool_isolated_are_drained(struct zs_pool *pool)
++{
++      return atomic_long_read(&pool->isolated_pages) == 0;
++}
++
++/* Function for resolving migration */
++static void wait_for_isolated_drain(struct zs_pool *pool)
++{
++
++      /*
++       * We're in the process of destroying the pool, so there are no
++       * active allocations. zs_page_isolate() fails for completely free
++       * zspages, so we need only wait for the zs_pool's isolated
++       * count to hit zero.
++       */
++      wait_event(pool->migration_wait,
++                 pool_isolated_are_drained(pool));
++}
++
+ static void zs_unregister_migration(struct zs_pool *pool)
+ {
++      pool->destroying = true;
++      /*
++       * We need a memory barrier here to ensure global visibility of
++       * pool->destroying. Thus pool->isolated pages will either be 0 in which
++       * case we don't care, or it will be > 0 and pool->destroying will
++       * ensure that we wake up once isolation hits 0.
++       */
++      smp_mb();
++      wait_for_isolated_drain(pool); /* This can block */
+       flush_work(&pool->free_work);
+       iput(pool->inode);
+ }
+@@ -2365,6 +2431,8 @@ struct zs_pool *zs_create_pool(const char *name)
+       if (!pool->name)
+               goto err;
+ 
++      init_waitqueue_head(&pool->migration_wait);
++
+       if (create_cache(pool))
+               goto err;
+ 
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index f9c6e8ca1fcb..100b4f88179a 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2273,8 +2273,10 @@ static int compat_do_replace(struct net *net, void 
__user *user,
+       state.buf_kern_len = size64;
+ 
+       ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+-      if (WARN_ON(ret < 0))
++      if (WARN_ON(ret < 0)) {
++              vfree(entries_tmp);
+               goto out_unlock;
++      }
+ 
+       vfree(entries_tmp);
+       tmp.entries_size = size64;
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 53ea2d48896c..92b2641ab93b 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1330,7 +1330,7 @@ static enum calc_target_result calc_target(struct 
ceph_osd_client *osdc,
+       struct ceph_osds up, acting;
+       bool force_resend = false;
+       bool unpaused = false;
+-      bool legacy_change;
++      bool legacy_change = false;
+       bool split = false;
+       bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
+       bool recovery_deletes = ceph_osdmap_flag(osdc,
+@@ -1426,15 +1426,14 @@ static enum calc_target_result calc_target(struct 
ceph_osd_client *osdc,
+               t->osd = acting.primary;
+       }
+ 
+-      if (unpaused || legacy_change || force_resend ||
+-          (split && con && CEPH_HAVE_FEATURE(con->peer_features,
+-                                             RESEND_ON_SPLIT)))
++      if (unpaused || legacy_change || force_resend || split)
+               ct_res = CALC_TARGET_NEED_RESEND;
+       else
+               ct_res = CALC_TARGET_NO_ACTION;
+ 
+ out:
+-      dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
++      dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
++           legacy_change, force_resend, split, ct_res, t->osd);
+       return ct_res;
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_core.c 
b/net/netfilter/ipset/ip_set_core.c
+index a3f1dc7cf538..dbf17d3596a6 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1128,7 +1128,7 @@ static int ip_set_rename(struct net *net, struct sock 
*ctnl,
+               return -ENOENT;
+ 
+       write_lock_bh(&ip_set_ref_lock);
+-      if (set->ref != 0) {
++      if (set->ref != 0 || set->ref_netlink != 0) {
+               ret = -IPSET_ERR_REFERENCED;
+               goto out;
+       }
+diff --git a/sound/soc/davinci/davinci-mcasp.c 
b/sound/soc/davinci/davinci-mcasp.c
+index 9aa741d27279..0480ec4c8035 100644
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -1158,6 +1158,28 @@ static int davinci_mcasp_trigger(struct 
snd_pcm_substream *substream,
+       return ret;
+ }
+ 
++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
++                                          struct snd_pcm_hw_rule *rule)
++{
++      struct davinci_mcasp_ruledata *rd = rule->private;
++      struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++      struct snd_mask nfmt;
++      int i, slot_width;
++
++      snd_mask_none(&nfmt);
++      slot_width = rd->mcasp->slot_width;
++
++      for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
++              if (snd_mask_test(fmt, i)) {
++                      if (snd_pcm_format_width(i) <= slot_width) {
++                              snd_mask_set(&nfmt, i);
++                      }
++              }
++      }
++
++      return snd_mask_refine(fmt, &nfmt);
++}
++
+ static const unsigned int davinci_mcasp_dai_rates[] = {
+       8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
+       88200, 96000, 176400, 192000,
+@@ -1251,7 +1273,7 @@ static int davinci_mcasp_startup(struct 
snd_pcm_substream *substream,
+       struct davinci_mcasp_ruledata *ruledata =
+                                       &mcasp->ruledata[substream->stream];
+       u32 max_channels = 0;
+-      int i, dir;
++      int i, dir, ret;
+       int tdm_slots = mcasp->tdm_slots;
+ 
+       /* Do not allow more then one stream per direction */
+@@ -1280,6 +1302,7 @@ static int davinci_mcasp_startup(struct 
snd_pcm_substream *substream,
+                       max_channels++;
+       }
+       ruledata->serializers = max_channels;
++      ruledata->mcasp = mcasp;
+       max_channels *= tdm_slots;
+       /*
+        * If the already active stream has less channels than the calculated
+@@ -1305,20 +1328,22 @@ static int davinci_mcasp_startup(struct 
snd_pcm_substream *substream,
+                                  0, SNDRV_PCM_HW_PARAM_CHANNELS,
+                                  &mcasp->chconstr[substream->stream]);
+ 
+-      if (mcasp->slot_width)
+-              snd_pcm_hw_constraint_minmax(substream->runtime,
+-                                           SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+-                                           8, mcasp->slot_width);
++      if (mcasp->slot_width) {
++              /* Only allow formats require <= slot_width bits on the bus */
++              ret = snd_pcm_hw_rule_add(substream->runtime, 0,
++                                        SNDRV_PCM_HW_PARAM_FORMAT,
++                                        davinci_mcasp_hw_rule_slot_width,
++                                        ruledata,
++                                        SNDRV_PCM_HW_PARAM_FORMAT, -1);
++              if (ret)
++                      return ret;
++      }
+ 
+       /*
+        * If we rely on implicit BCLK divider setting we should
+        * set constraints based on what we can provide.
+        */
+       if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
+-              int ret;
+-
+-              ruledata->mcasp = mcasp;
+-
+               ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+                                         SNDRV_PCM_HW_PARAM_RATE,
+                                         davinci_mcasp_hw_rule_rate,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 42c2a3065b77..ff5206f5455d 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1757,8 +1757,11 @@ static int soc_probe_link_dais(struct snd_soc_card 
*card,
+               }
+       }
+ 
+-      if (dai_link->dai_fmt)
+-              snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
++      if (dai_link->dai_fmt) {
++              ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
++              if (ret)
++                      return ret;
++      }
+ 
+       ret = soc_post_component_init(rtd, dai_link->name);
+       if (ret)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b4c8ba412a5c..104d5f487c7d 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1152,8 +1152,8 @@ static __always_inline int is_connected_ep(struct 
snd_soc_dapm_widget *widget,
+               list_add_tail(&widget->work_list, list);
+ 
+       if (custom_stop_condition && custom_stop_condition(widget, dir)) {
+-              widget->endpoints[dir] = 1;
+-              return widget->endpoints[dir];
++              list = NULL;
++              custom_stop_condition = NULL;
+       }
+ 
+       if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) 
{
+@@ -1190,8 +1190,8 @@ static __always_inline int is_connected_ep(struct 
snd_soc_dapm_widget *widget,
+  *
+  * Optionally, can be supplied with a function acting as a stopping condition.
+  * This function takes the dapm widget currently being examined and the walk
+- * direction as an arguments, it should return true if the walk should be
+- * stopped and false otherwise.
++ * direction as an arguments, it should return true if widgets from that point
++ * in the graph onwards should not be added to the widget list.
+  */
+ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
+       struct list_head *list,
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index 997875c770b1..275f1c3c73b6 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
+ 
+       /* Allocate and initialize all memory on CPU#0: */
+       if (init_cpu0) {
+-              orig_mask = bind_to_node(0);
+-              bind_to_memnode(0);
++              int node = numa_node_of_cpu(0);
++
++              orig_mask = bind_to_node(node);
++              bind_to_memnode(node);
+       }
+ 
+       bytes = bytes0 + HPSIZE;
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index 25a42acabee1..13a33fb71a6d 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -162,7 +162,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
+       int last_cpu;
+ 
+       last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
+-      mask_size = (last_cpu + 3) / 4 + 1;
++      mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
+       mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
+ 
+       cpumask = malloc(mask_size);
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index d51dc9ca8861..94a7cabe9b82 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -346,6 +346,7 @@ static struct fixed {
+       { "inst_retired.any_p", "event=0xc0" },
+       { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
+       { "cpu_clk_unhalted.thread", "event=0x3c" },
++      { "cpu_clk_unhalted.core", "event=0x3c" },
+       { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+       { NULL, NULL},
+ };
+diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
+index 424b82a7d078..f0679613bd18 100644
+--- a/tools/perf/tests/parse-events.c
++++ b/tools/perf/tests/parse-events.c
+@@ -19,32 +19,6 @@
+ #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+                            PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+ 
+-#if defined(__s390x__)
+-/* Return true if kvm module is available and loaded. Test this
+- * and retun success when trace point kvm_s390_create_vm
+- * exists. Otherwise this test always fails.
+- */
+-static bool kvm_s390_create_vm_valid(void)
+-{
+-      char *eventfile;
+-      bool rc = false;
+-
+-      eventfile = get_events_file("kvm-s390");
+-
+-      if (eventfile) {
+-              DIR *mydir = opendir(eventfile);
+-
+-              if (mydir) {
+-                      rc = true;
+-                      closedir(mydir);
+-              }
+-              put_events_file(eventfile);
+-      }
+-
+-      return rc;
+-}
+-#endif
+-
+ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
+ {
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+@@ -1626,7 +1600,6 @@ static struct evlist_test test__events[] = {
+       {
+               .name  = "kvm-s390:kvm_s390_create_vm",
+               .check = test__checkevent_tracepoint,
+-              .valid = kvm_s390_create_vm_valid,
+               .id    = 100,
+       },
+ #endif
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index 383674f448fc..f93846edc1e0 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char 
*buf, size_t size)
+       unsigned char *bitmap;
+       int last_cpu = cpu_map__cpu(map, map->nr - 1);
+ 
+-      bitmap = zalloc((last_cpu + 7) / 8);
++      if (buf == NULL)
++              return 0;
++
++      bitmap = zalloc(last_cpu / 8 + 1);
+       if (bitmap == NULL) {
+               buf[0] = '\0';
+               return 0;
+diff --git a/tools/testing/selftests/kvm/config 
b/tools/testing/selftests/kvm/config
+new file mode 100644
+index 000000000000..63ed533f73d6
+--- /dev/null
++++ b/tools/testing/selftests/kvm/config
+@@ -0,0 +1,3 @@
++CONFIG_KVM=y
++CONFIG_KVM_INTEL=y
++CONFIG_KVM_AMD=y

Reply via email to