commit:     12f3c647f24bfba1d73bc6cf0fec459faa52b455
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 10 14:13:54 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 10 14:13:54 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=12f3c647

proj/linux-patches: Linux patch 4.20.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1014_linux-4.20.15.patch | 3168 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3172 insertions(+)

diff --git a/0000_README b/0000_README
index dd1fcee..dd61e24 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-4.20.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.20.14
 
+Patch:  1014_linux-4.20.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.20.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-4.20.15.patch b/1014_linux-4.20.15.patch
new file mode 100644
index 0000000..6fee7b4
--- /dev/null
+++ b/1014_linux-4.20.15.patch
@@ -0,0 +1,3168 @@
+diff --git a/Makefile b/Makefile
+index f7baaa0a3164b..25b45c24bac0b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 20
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+ 
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index ba150c755fcce..85b6c60f285d2 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
+ void __init init_IRQ(void)
+ {
+       int i;
++      unsigned int order = get_order(IRQ_STACK_SIZE);
+ 
+       for (i = 0; i < NR_IRQS; i++)
+               irq_set_noprobe(i);
+@@ -62,8 +63,7 @@ void __init init_IRQ(void)
+       arch_init_irq();
+ 
+       for_each_possible_cpu(i) {
+-              int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+-              void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++              void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+ 
+               irq_stack[i] = s;
+               pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+diff --git a/arch/x86/boot/compressed/pgtable_64.c 
b/arch/x86/boot/compressed/pgtable_64.c
+index 9e21573714910..f8debf7aeb4c1 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -1,5 +1,7 @@
++#include <linux/efi.h>
+ #include <asm/e820/types.h>
+ #include <asm/processor.h>
++#include <asm/efi.h>
+ #include "pgtable.h"
+ #include "../string.h"
+ 
+@@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option);
+ 
+ static unsigned long find_trampoline_placement(void)
+ {
+-      unsigned long bios_start, ebda_start;
++      unsigned long bios_start = 0, ebda_start = 0;
+       unsigned long trampoline_start;
+       struct boot_e820_entry *entry;
++      char *signature;
+       int i;
+ 
+       /*
+@@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void)
+        * This code is based on reserve_bios_regions().
+        */
+ 
+-      ebda_start = *(unsigned short *)0x40e << 4;
+-      bios_start = *(unsigned short *)0x413 << 10;
++      /*
++       * EFI systems may not provide legacy ROM. The memory may not be mapped
++       * at all.
++       *
++       * Only look for values in the legacy ROM for non-EFI system.
++       */
++      signature = (char *)&boot_params->efi_info.efi_loader_signature;
++      if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
++          strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
++              ebda_start = *(unsigned short *)0x40e << 4;
++              bios_start = *(unsigned short *)0x413 << 10;
++      }
+ 
+       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+               bios_start = BIOS_START_MAX;
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index eeea634bee0a7..6a25278e00929 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -818,11 +818,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+       set_cpu_cap(c, X86_FEATURE_ZEN);
+-      /*
+-       * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+-       * all up to and including B1.
+-       */
+-      if (c->x86_model <= 1 && c->x86_stepping <= 1)
++
++      /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
++      if (!cpu_has(c, X86_FEATURE_CPB))
+               set_cpu_cap(c, X86_FEATURE_CPB);
+ }
+ 
+diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
+index 4bb68133a72af..5a0e0bd68b769 100644
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -320,8 +320,8 @@ unsigned long get_wchan(struct task_struct *p)
+ 
+               /* Stack layout: sp-4: ra, sp-3: sp' */
+ 
+-              pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+-              sp = *(unsigned long *)sp - 3;
++              pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++              sp = SPILL_SLOT(sp, 1);
+       } while (count++ < 16);
+       return 0;
+ }
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index b834ee335d9a8..6d5f3aca8e9c2 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -356,6 +356,8 @@ struct binder_error {
+  *                        (invariant after initialized)
+  * @min_priority:         minimum scheduling priority
+  *                        (invariant after initialized)
++ * @txn_security_ctx:     require sender's security context
++ *                        (invariant after initialized)
+  * @async_todo:           list of async work items
+  *                        (protected by @proc->inner_lock)
+  *
+@@ -392,6 +394,7 @@ struct binder_node {
+                * invariant after initialization
+                */
+               u8 accept_fds:1;
++              u8 txn_security_ctx:1;
+               u8 min_priority;
+       };
+       bool has_async_transaction;
+@@ -642,6 +645,7 @@ struct binder_transaction {
+       long    saved_priority;
+       kuid_t  sender_euid;
+       struct list_head fd_fixups;
++      binder_uintptr_t security_ctx;
+       /**
+        * @lock:  protects @from, @to_proc, and @to_thread
+        *
+@@ -1165,6 +1169,7 @@ static struct binder_node *binder_init_node_ilocked(
+       node->work.type = BINDER_WORK_NODE;
+       node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+       node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
++      node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
+       spin_lock_init(&node->lock);
+       INIT_LIST_HEAD(&node->work.entry);
+       INIT_LIST_HEAD(&node->async_todo);
+@@ -2777,6 +2782,8 @@ static void binder_transaction(struct binder_proc *proc,
+       binder_size_t last_fixup_min_off = 0;
+       struct binder_context *context = proc->context;
+       int t_debug_id = atomic_inc_return(&binder_last_id);
++      char *secctx = NULL;
++      u32 secctx_sz = 0;
+ 
+       e = binder_transaction_log_add(&binder_transaction_log);
+       e->debug_id = t_debug_id;
+@@ -3017,6 +3024,20 @@ static void binder_transaction(struct binder_proc *proc,
+       t->flags = tr->flags;
+       t->priority = task_nice(current);
+ 
++      if (target_node && target_node->txn_security_ctx) {
++              u32 secid;
++
++              security_task_getsecid(proc->tsk, &secid);
++              ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
++              if (ret) {
++                      return_error = BR_FAILED_REPLY;
++                      return_error_param = ret;
++                      return_error_line = __LINE__;
++                      goto err_get_secctx_failed;
++              }
++              extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
++      }
++
+       trace_binder_transaction(reply, t, target_node);
+ 
+       t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
+@@ -3033,6 +3054,19 @@ static void binder_transaction(struct binder_proc *proc,
+               t->buffer = NULL;
+               goto err_binder_alloc_buf_failed;
+       }
++      if (secctx) {
++              size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
++                                  ALIGN(tr->offsets_size, sizeof(void *)) +
++                                  ALIGN(extra_buffers_size, sizeof(void *)) -
++                                  ALIGN(secctx_sz, sizeof(u64));
++              char *kptr = t->buffer->data + buf_offset;
++
++              t->security_ctx = (uintptr_t)kptr +
++                  binder_alloc_get_user_buffer_offset(&target_proc->alloc);
++              memcpy(kptr, secctx, secctx_sz);
++              security_release_secctx(secctx, secctx_sz);
++              secctx = NULL;
++      }
+       t->buffer->debug_id = t->debug_id;
+       t->buffer->transaction = t;
+       t->buffer->target_node = target_node;
+@@ -3302,6 +3336,9 @@ err_copy_data_failed:
+       t->buffer->transaction = NULL;
+       binder_alloc_free_buf(&target_proc->alloc, t->buffer);
+ err_binder_alloc_buf_failed:
++      if (secctx)
++              security_release_secctx(secctx, secctx_sz);
++err_get_secctx_failed:
+       kfree(tcomplete);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ err_alloc_tcomplete_failed:
+@@ -4033,11 +4070,13 @@ retry:
+ 
+       while (1) {
+               uint32_t cmd;
+-              struct binder_transaction_data tr;
++              struct binder_transaction_data_secctx tr;
++              struct binder_transaction_data *trd = &tr.transaction_data;
+               struct binder_work *w = NULL;
+               struct list_head *list = NULL;
+               struct binder_transaction *t = NULL;
+               struct binder_thread *t_from;
++              size_t trsize = sizeof(*trd);
+ 
+               binder_inner_proc_lock(proc);
+               if (!binder_worklist_empty_ilocked(&thread->todo))
+@@ -4232,8 +4271,8 @@ retry:
+               if (t->buffer->target_node) {
+                       struct binder_node *target_node = 
t->buffer->target_node;
+ 
+-                      tr.target.ptr = target_node->ptr;
+-                      tr.cookie =  target_node->cookie;
++                      trd->target.ptr = target_node->ptr;
++                      trd->cookie =  target_node->cookie;
+                       t->saved_priority = task_nice(current);
+                       if (t->priority < target_node->min_priority &&
+                           !(t->flags & TF_ONE_WAY))
+@@ -4243,22 +4282,23 @@ retry:
+                               binder_set_nice(target_node->min_priority);
+                       cmd = BR_TRANSACTION;
+               } else {
+-                      tr.target.ptr = 0;
+-                      tr.cookie = 0;
++                      trd->target.ptr = 0;
++                      trd->cookie = 0;
+                       cmd = BR_REPLY;
+               }
+-              tr.code = t->code;
+-              tr.flags = t->flags;
+-              tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
++              trd->code = t->code;
++              trd->flags = t->flags;
++              trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ 
+               t_from = binder_get_txn_from(t);
+               if (t_from) {
+                       struct task_struct *sender = t_from->proc->tsk;
+ 
+-                      tr.sender_pid = task_tgid_nr_ns(sender,
+-                                                      
task_active_pid_ns(current));
++                      trd->sender_pid =
++                              task_tgid_nr_ns(sender,
++                                              task_active_pid_ns(current));
+               } else {
+-                      tr.sender_pid = 0;
++                      trd->sender_pid = 0;
+               }
+ 
+               ret = binder_apply_fd_fixups(t);
+@@ -4289,15 +4329,20 @@ retry:
+                       }
+                       continue;
+               }
+-              tr.data_size = t->buffer->data_size;
+-              tr.offsets_size = t->buffer->offsets_size;
+-              tr.data.ptr.buffer = (binder_uintptr_t)
++              trd->data_size = t->buffer->data_size;
++              trd->offsets_size = t->buffer->offsets_size;
++              trd->data.ptr.buffer = (binder_uintptr_t)
+                       ((uintptr_t)t->buffer->data +
+                       binder_alloc_get_user_buffer_offset(&proc->alloc));
+-              tr.data.ptr.offsets = tr.data.ptr.buffer +
++              trd->data.ptr.offsets = trd->data.ptr.buffer +
+                                       ALIGN(t->buffer->data_size,
+                                           sizeof(void *));
+ 
++              tr.secctx = t->security_ctx;
++              if (t->security_ctx) {
++                      cmd = BR_TRANSACTION_SEC_CTX;
++                      trsize = sizeof(tr);
++              }
+               if (put_user(cmd, (uint32_t __user *)ptr)) {
+                       if (t_from)
+                               binder_thread_dec_tmpref(t_from);
+@@ -4308,7 +4353,7 @@ retry:
+                       return -EFAULT;
+               }
+               ptr += sizeof(uint32_t);
+-              if (copy_to_user(ptr, &tr, sizeof(tr))) {
++              if (copy_to_user(ptr, &tr, trsize)) {
+                       if (t_from)
+                               binder_thread_dec_tmpref(t_from);
+ 
+@@ -4317,7 +4362,7 @@ retry:
+ 
+                       return -EFAULT;
+               }
+-              ptr += sizeof(tr);
++              ptr += trsize;
+ 
+               trace_binder_transaction_received(t);
+               binder_stat_br(proc, thread, cmd);
+@@ -4325,16 +4370,18 @@ retry:
+                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr 
%016llx-%016llx\n",
+                            proc->pid, thread->pid,
+                            (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+-                           "BR_REPLY",
++                              (cmd == BR_TRANSACTION_SEC_CTX) ?
++                                   "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
+                            t->debug_id, t_from ? t_from->proc->pid : 0,
+                            t_from ? t_from->pid : 0, cmd,
+                            t->buffer->data_size, t->buffer->offsets_size,
+-                           (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
++                           (u64)trd->data.ptr.buffer,
++                           (u64)trd->data.ptr.offsets);
+ 
+               if (t_from)
+                       binder_thread_dec_tmpref(t_from);
+               t->buffer->allow_user_free = 1;
+-              if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
++              if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
+                       binder_inner_proc_lock(thread->proc);
+                       t->to_parent = thread->transaction_stack;
+                       t->to_thread = thread;
+@@ -4676,7 +4723,8 @@ out:
+       return ret;
+ }
+ 
+-static int binder_ioctl_set_ctx_mgr(struct file *filp)
++static int binder_ioctl_set_ctx_mgr(struct file *filp,
++                                  struct flat_binder_object *fbo)
+ {
+       int ret = 0;
+       struct binder_proc *proc = filp->private_data;
+@@ -4705,7 +4753,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
+       } else {
+               context->binder_context_mgr_uid = curr_euid;
+       }
+-      new_node = binder_new_node(proc, NULL);
++      new_node = binder_new_node(proc, fbo);
+       if (!new_node) {
+               ret = -ENOMEM;
+               goto out;
+@@ -4828,8 +4876,20 @@ static long binder_ioctl(struct file *filp, unsigned 
int cmd, unsigned long arg)
+               binder_inner_proc_unlock(proc);
+               break;
+       }
++      case BINDER_SET_CONTEXT_MGR_EXT: {
++              struct flat_binder_object fbo;
++
++              if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
++                      ret = -EINVAL;
++                      goto err;
++              }
++              ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
++              if (ret)
++                      goto err;
++              break;
++      }
+       case BINDER_SET_CONTEXT_MGR:
+-              ret = binder_ioctl_set_ctx_mgr(filp);
++              ret = binder_ioctl_set_ctx_mgr(filp, NULL);
+               if (ret)
+                       goto err;
+               break;
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 9d6604b50d75e..b4ee11d6e665a 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -965,9 +965,9 @@ static void __device_release_driver(struct device *dev, 
struct device *parent)
+                       drv->remove(dev);
+ 
+               device_links_driver_cleanup(dev);
+-              arch_teardown_dma_ops(dev);
+ 
+               devres_release_all(dev);
++              arch_teardown_dma_ops(dev);
+               dev->driver = NULL;
+               dev_set_drvdata(dev, NULL);
+               if (dev->pm_domain && dev->pm_domain->dismiss)
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 41405de27d665..c91bba00df4e4 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -552,10 +552,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev 
*hdev,
+                                           hdev->bus);
+ 
+       if (!btrtl_dev->ic_info) {
+-              rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci 
rev %04x, hci ver %04x",
++              rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci 
rev %04x, hci ver %04x",
+                           lmp_subver, hci_rev, hci_ver);
+-              ret = -EINVAL;
+-              goto err_free;
++              return btrtl_dev;
+       }
+ 
+       if (btrtl_dev->ic_info->has_rom_version) {
+@@ -610,6 +609,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
+        * standard btusb. Once that firmware is uploaded, the subver changes
+        * to a different value.
+        */
++      if (!btrtl_dev->ic_info) {
++              rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
++              return 0;
++      }
++
+       switch (btrtl_dev->ic_info->lmp_subver) {
+       case RTL_ROM_LMP_8723A:
+       case RTL_ROM_LMP_3499:
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index c0a5b1f3a9863..4ccc39e00ced3 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/io.h>
+ #include <linux/uaccess.h>
+@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char 
__user *buf, size_t count,
+       TicCard = st_loc.tic_des_from_pc;       /* tic number to send           
 */
+       IndexCard = NumCard - 1;
+ 
+-      if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
++      if (IndexCard >= MAX_BOARD)
++              return -EINVAL;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (!apbs[IndexCard].RamIO)
+               return -EINVAL;
+ 
+ #ifdef DEBUG
+@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, 
unsigned long arg)
+       unsigned char IndexCard;
+       void __iomem *pmem;
+       int ret = 0;
++      static int warncount = 10;
+       volatile unsigned char byte_reset_it;
+       struct st_ram_io *adgl;
+       void __user *argp = (void __user *)arg;
+@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int 
cmd, unsigned long arg)
+       mutex_lock(&ac_mutex);  
+       IndexCard = adgl->num_card-1;
+        
+-      if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+-              static int warncount = 10;
+-              if (warncount) {
+-                      printk( KERN_WARNING "APPLICOM driver IOCTL, bad board 
number %d\n",(int)IndexCard+1);
+-                      warncount--;
+-              }
+-              kfree(adgl);
+-              mutex_unlock(&ac_mutex);
+-              return -EINVAL;
+-      }
++      if (cmd != 6 && IndexCard >= MAX_BOARD)
++              goto err;
++      IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++      if (cmd != 6 && !apbs[IndexCard].RamIO)
++              goto err;
+ 
+       switch (cmd) {
+               
+@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, 
unsigned long arg)
+       kfree(adgl);
+       mutex_unlock(&ac_mutex);
+       return 0;
++
++err:
++      if (warncount) {
++              pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
++                      (int)IndexCard + 1);
++              warncount--;
++      }
++      kfree(adgl);
++      mutex_unlock(&ac_mutex);
++      return -EINVAL;
++
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index df34a12a388f3..a2e595b9b2df3 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
+  *                          SYSFS INTERFACE                          *
+  *********************************************************************/
+ static ssize_t show_boost(struct kobject *kobj,
+-                               struct attribute *attr, char *buf)
++                        struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ }
+ 
+-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+-                                const char *buf, size_t count)
++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++                         const char *buf, size_t count)
+ {
+       int ret, enable;
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 9578312e43f2f..e4125f404d86c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -873,7 +873,7 @@ static void intel_pstate_update_policies(void)
+ /************************** sysfs begin ************************/
+ #define show_one(file_name, object)                                   \
+       static ssize_t show_##file_name                                 \
+-      (struct kobject *kobj, struct attribute *attr, char *buf)       \
++      (struct kobject *kobj, struct kobj_attribute *attr, char *buf)  \
+       {                                                               \
+               return sprintf(buf, "%u\n", global.object);             \
+       }
+@@ -882,7 +882,7 @@ static ssize_t intel_pstate_show_status(char *buf);
+ static int intel_pstate_update_status(const char *buf, size_t size);
+ 
+ static ssize_t show_status(struct kobject *kobj,
+-                         struct attribute *attr, char *buf)
++                         struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+ 
+@@ -893,7 +893,7 @@ static ssize_t show_status(struct kobject *kobj,
+       return ret;
+ }
+ 
+-static ssize_t store_status(struct kobject *a, struct attribute *b,
++static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+                           const char *buf, size_t count)
+ {
+       char *p = memchr(buf, '\n', count);
+@@ -907,7 +907,7 @@ static ssize_t store_status(struct kobject *a, struct 
attribute *b,
+ }
+ 
+ static ssize_t show_turbo_pct(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total, no_turbo, turbo_pct;
+@@ -933,7 +933,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_num_pstates(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       struct cpudata *cpu;
+       int total;
+@@ -954,7 +954,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_no_turbo(struct kobject *kobj,
+-                           struct attribute *attr, char *buf)
++                           struct kobj_attribute *attr, char *buf)
+ {
+       ssize_t ret;
+ 
+@@ -976,7 +976,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
+       return ret;
+ }
+ 
+-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+                             const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1023,7 +1023,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct 
attribute *b,
+       return count;
+ }
+ 
+-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1053,7 +1053,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, 
struct attribute *b,
+       return count;
+ }
+ 
+-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+                                 const char *buf, size_t count)
+ {
+       unsigned int input;
+@@ -1085,12 +1085,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, 
struct attribute *b,
+ }
+ 
+ static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+-                              struct attribute *attr, char *buf)
++                              struct kobj_attribute *attr, char *buf)
+ {
+       return sprintf(buf, "%u\n", hwp_boost);
+ }
+ 
+-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
++static ssize_t store_hwp_dynamic_boost(struct kobject *a,
++                                     struct kobj_attribute *b,
+                                      const char *buf, size_t count)
+ {
+       unsigned int input;
+diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
+index 2c22836d3ffd5..4596fde16dfe6 100644
+--- a/drivers/gnss/sirf.c
++++ b/drivers/gnss/sirf.c
+@@ -310,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev)
+                       ret = -ENODEV;
+                       goto err_put_device;
+               }
++
++              ret = regulator_enable(data->vcc);
++              if (ret)
++                      goto err_put_device;
++
++              /* Wait for chip to boot into hibernate mode. */
++              msleep(SIRF_BOOT_DELAY);
+       }
+ 
+       if (data->wakeup) {
+               ret = gpiod_to_irq(data->wakeup);
+               if (ret < 0)
+-                      goto err_put_device;
+-
++                      goto err_disable_vcc;
+               data->irq = ret;
+ 
+-              ret = devm_request_threaded_irq(dev, data->irq, NULL,
+-                              sirf_wakeup_handler,
++              ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
+                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 
IRQF_ONESHOT,
+                               "wakeup", data);
+               if (ret)
+-                      goto err_put_device;
+-      }
+-
+-      if (data->on_off) {
+-              ret = regulator_enable(data->vcc);
+-              if (ret)
+-                      goto err_put_device;
+-
+-              /* Wait for chip to boot into hibernate mode */
+-              msleep(SIRF_BOOT_DELAY);
++                      goto err_disable_vcc;
+       }
+ 
+       if (IS_ENABLED(CONFIG_PM)) {
+@@ -342,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev)
+       } else {
+               ret = sirf_runtime_resume(dev);
+               if (ret < 0)
+-                      goto err_disable_vcc;
++                      goto err_free_irq;
+       }
+ 
+       ret = gnss_register_device(gdev);
+@@ -356,6 +352,9 @@ err_disable_rpm:
+               pm_runtime_disable(dev);
+       else
+               sirf_runtime_suspend(dev);
++err_free_irq:
++      if (data->wakeup)
++              free_irq(data->irq, data);
+ err_disable_vcc:
+       if (data->on_off)
+               regulator_disable(data->vcc);
+@@ -376,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev)
+       else
+               sirf_runtime_suspend(&serdev->dev);
+ 
++      if (data->wakeup)
++              free_irq(data->irq, data);
++
+       if (data->on_off)
+               regulator_disable(data->vcc);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index b0113f6fdbb46..9021f01651d65 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -896,7 +896,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct 
mv88e6xxx_chip *chip,
+       default:
+               return U64_MAX;
+       }
+-      value = (((u64)high) << 16) | low;
++      value = (((u64)high) << 32) | low;
+       return value;
+ }
+ 
+@@ -3082,7 +3082,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
+       .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+       .port_link_state = mv88e6352_port_link_state,
+       .port_get_cmode = mv88e6185_port_get_cmode,
+-      .stats_snapshot = mv88e6320_g1_stats_snapshot,
++      .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+       .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+       .stats_get_sset_count = mv88e6095_stats_get_sset_count,
+       .stats_get_strings = mv88e6095_stats_get_strings,
+@@ -4206,7 +4206,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6190",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .num_gpio = 16,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+@@ -4229,7 +4229,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6190X",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .num_gpio = 16,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+@@ -4252,7 +4252,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6191",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
+@@ -4299,7 +4299,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6290",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .num_gpio = 16,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+@@ -4461,7 +4461,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6390",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .num_gpio = 16,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+@@ -4484,7 +4484,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+               .name = "Marvell 88E6390X",
+               .num_databases = 4096,
+               .num_ports = 11,        /* 10 + Z80 */
+-              .num_internal_phys = 11,
++              .num_internal_phys = 9,
+               .num_gpio = 16,
+               .max_vid = 8191,
+               .port_base_addr = 0x0,
+@@ -4579,6 +4579,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip 
*chip,
+       return 0;
+ }
+ 
++static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
++{
++      int i;
++
++      for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
++              chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
++}
++
+ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
+                                                       int port)
+ {
+@@ -4615,6 +4623,8 @@ static const char *mv88e6xxx_drv_probe(struct device 
*dsa_dev,
+       if (err)
+               goto free;
+ 
++      mv88e6xxx_ports_cmode_init(chip);
++
+       mutex_lock(&chip->reg_lock);
+       err = mv88e6xxx_switch_reset(chip);
+       mutex_unlock(&chip->reg_lock);
+@@ -4821,6 +4831,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+       if (err)
+               goto out;
+ 
++      mv88e6xxx_ports_cmode_init(chip);
+       mv88e6xxx_phy_init(chip);
+ 
+       if (chip->info->ops->get_eeprom) {
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c 
b/drivers/net/dsa/mv88e6xxx/port.c
+index cd7db60a508ba..abe86d7781d82 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, 
int port, int dup)
+               /* normal duplex detection */
+               break;
+       default:
+-              return -EINVAL;
++              return -EOPNOTSUPP;
+       }
+ 
+       err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+@@ -395,6 +395,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip 
*chip, int port,
+               cmode = 0;
+       }
+ 
++      /* cmode doesn't change, nothing to do for us */
++      if (cmode == chip->ports[port].cmode)
++              return 0;
++
+       lane = mv88e6390x_serdes_get_lane(chip, port);
+       if (lane < 0)
+               return lane;
+@@ -405,7 +409,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, 
int port,
+                       return err;
+       }
+ 
+-      err = mv88e6390_serdes_power(chip, port, false);
++      err = mv88e6390x_serdes_power(chip, port, false);
+       if (err)
+               return err;
+ 
+@@ -421,7 +425,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, 
int port,
+               if (err)
+                       return err;
+ 
+-              err = mv88e6390_serdes_power(chip, port, true);
++              err = mv88e6390x_serdes_power(chip, port, true);
+               if (err)
+                       return err;
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h 
b/drivers/net/dsa/mv88e6xxx/port.h
+index 091aa0057f1f6..cbb64a7683e28 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -52,6 +52,7 @@
+ #define MV88E6185_PORT_STS_CMODE_1000BASE_X   0x0005
+ #define MV88E6185_PORT_STS_CMODE_PHY          0x0006
+ #define MV88E6185_PORT_STS_CMODE_DISABLED     0x0007
++#define MV88E6XXX_PORT_STS_CMODE_INVALID      0xff
+ 
+ /* Offset 0x01: MAC (or PCS or Physical) Control Register */
+ #define MV88E6XXX_PORT_MAC_CTL                                0x01
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 090207817ad8d..29b068b81e2ac 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -497,6 +497,12 @@ normal_tx:
+       }
+ 
+       length >>= 9;
++      if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
++              dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX 
packet.\n",
++                                   skb->len);
++              i = 0;
++              goto tx_dma_error;
++      }
+       flags |= bnxt_lhint_arr[length];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c 
b/drivers/net/ethernet/marvell/sky2.c
+index ae2f35039343b..1485f66cf7b0c 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -46,6 +46,7 @@
+ #include <linux/mii.h>
+ #include <linux/of_device.h>
+ #include <linux/of_net.h>
++#include <linux/dmi.h>
+ 
+ #include <asm/irq.h>
+ 
+@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
+ module_param(copybreak, int, 0);
+ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+ 
+-static int disable_msi = 0;
++static int disable_msi = -1;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+ 
+@@ -4931,6 +4932,24 @@ static const char *sky2_name(u8 chipid, char *buf, int 
sz)
+       return buf;
+ }
+ 
++static const struct dmi_system_id msi_blacklist[] = {
++      {
++              .ident = "Dell Inspiron 1545",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
++              },
++      },
++      {
++              .ident = "Gateway P-79",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
++              },
++      },
++      {}
++};
++
+ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+       struct net_device *dev, *dev1;
+@@ -5042,6 +5061,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+               goto err_out_free_pci;
+       }
+ 
++      if (disable_msi == -1)
++              disable_msi = !!dmi_check_system(msi_blacklist);
++
+       if (!disable_msi && pci_enable_msi(pdev) == 0) {
+               err = sky2_test_msi(hw);
+               if (err) {
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c 
b/drivers/net/ethernet/microchip/lan743x_main.c
+index 1ce8b729929fe..671ea75d0a4a1 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
+ }
+ 
+ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+-                                   unsigned int frame_length)
++                                   unsigned int frame_length,
++                                   int nr_frags)
+ {
+       /* called only from within lan743x_tx_xmit_frame.
+        * assuming tx->ring_lock has already been acquired.
+@@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx 
*tx,
+ 
+       /* wrap up previous descriptor */
+       tx->frame_data0 |= TX_DESC_DATA0_EXT_;
++      if (nr_frags <= 0) {
++              tx->frame_data0 |= TX_DESC_DATA0_LS_;
++              tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++      }
+       tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+       tx_descriptor->data0 = tx->frame_data0;
+ 
+@@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+       u32 tx_tail_flags = 0;
+ 
+       /* wrap up previous descriptor */
+-      tx->frame_data0 |= TX_DESC_DATA0_LS_;
+-      tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++      if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
++          TX_DESC_DATA0_DTYPE_DATA_) {
++              tx->frame_data0 |= TX_DESC_DATA0_LS_;
++              tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++      }
+ 
+       tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+       buffer_info = &tx->buffer_info[tx->frame_tail];
+@@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct 
lan743x_tx *tx,
+       }
+ 
+       if (gso)
+-              lan743x_tx_frame_add_lso(tx, frame_length);
++              lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
+ 
+       if (nr_frags <= 0)
+               goto finish;
+diff --git a/drivers/net/ethernet/mscc/ocelot_board.c 
b/drivers/net/ethernet/mscc/ocelot_board.c
+index 4c23d18bbf44a..14b83bbf828d3 100644
+--- a/drivers/net/ethernet/mscc/ocelot_board.c
++++ b/drivers/net/ethernet/mscc/ocelot_board.c
+@@ -266,6 +266,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+               struct phy *serdes;
+               void __iomem *regs;
+               char res_name[8];
++              int phy_mode;
+               u32 port;
+ 
+               if (of_property_read_u32(portnp, "reg", &port))
+@@ -291,11 +292,11 @@ static int mscc_ocelot_probe(struct platform_device 
*pdev)
+               if (err)
+                       return err;
+ 
+-              err = of_get_phy_mode(portnp);
+-              if (err < 0)
++              phy_mode = of_get_phy_mode(portnp);
++              if (phy_mode < 0)
+                       ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
+               else
+-                      ocelot->ports[port]->phy_mode = err;
++                      ocelot->ports[port]->phy_mode = phy_mode;
+ 
+               switch (ocelot->ports[port]->phy_mode) {
+               case PHY_INTERFACE_MODE_NA:
+@@ -303,6 +304,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+               case PHY_INTERFACE_MODE_SGMII:
+                       break;
+               case PHY_INTERFACE_MODE_QSGMII:
++                      /* Ensure clock signals and speed is set on all
++                       * QSGMII links
++                       */
++                      ocelot_port_writel(ocelot->ports[port],
++                                         DEV_CLOCK_CFG_LINK_SPEED
++                                         (OCELOT_SPEED_1000),
++                                         DEV_CLOCK_CFG);
+                       break;
+               default:
+                       dev_err(ocelot->dev,
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 2e6e11d8cf5cb..d6ad8331ca4f5 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -637,15 +637,20 @@ out:
+ static int geneve_open(struct net_device *dev)
+ {
+       struct geneve_dev *geneve = netdev_priv(dev);
+-      bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
+       bool metadata = geneve->collect_md;
++      bool ipv4, ipv6;
+       int ret = 0;
+ 
++      ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
++      ipv4 = !ipv6 || metadata;
+ #if IS_ENABLED(CONFIG_IPV6)
+-      if (ipv6 || metadata)
++      if (ipv6) {
+               ret = geneve_sock_add(geneve, true);
++              if (ret < 0 && ret != -EAFNOSUPPORT)
++                      ipv4 = false;
++      }
+ #endif
+-      if (!ret && (!ipv6 || metadata))
++      if (ipv4)
+               ret = geneve_sock_add(geneve, false);
+       if (ret < 0)
+               geneve_sock_release(geneve);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 80d9297ad9d9c..136533e2e4879 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
+       schedule_delayed_work(&ndev_ctx->dwork, 0);
+ }
+ 
++static void netvsc_comp_ipcsum(struct sk_buff *skb)
++{
++      struct iphdr *iph = (struct iphdr *)skb->data;
++
++      iph->check = 0;
++      iph->check = ip_fast_csum(iph, iph->ihl);
++}
++
+ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+                                            struct netvsc_channel *nvchan)
+ {
+@@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct 
net_device *net,
+       /* skb is already created with CHECKSUM_NONE */
+       skb_checksum_none_assert(skb);
+ 
+-      /*
+-       * In Linux, the IP checksum is always checked.
+-       * Do L4 checksum offload if enabled and present.
++      /* Incoming packets may have IP header checksum verified by the host.
++       * They may not have IP header checksum computed after coalescing.
++       * We compute it here if the flags are set, because on Linux, the IP
++       * checksum is always checked.
++       */
++      if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
++          csum_info->receive.ip_checksum_succeeded &&
++          skb->protocol == htons(ETH_P_IP))
++              netvsc_comp_ipcsum(skb);
++
++      /* Do L4 checksum offload if enabled and present.
+        */
+       if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+               if (csum_info->receive.tcp_checksum_succeeded ||
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 22dfbd4c6aaf4..6c50e7c1feb12 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -339,6 +339,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
+       return genphy_config_aneg(phydev);
+ }
+ 
++static int ksz8061_config_init(struct phy_device *phydev)
++{
++      int ret;
++
++      ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++      if (ret)
++              return ret;
++
++      return kszphy_config_init(phydev);
++}
++
+ static int ksz9021_load_values_from_of(struct phy_device *phydev,
+                                      const struct device_node *of_node,
+                                      u16 reg,
+@@ -1044,7 +1055,7 @@ static struct phy_driver ksphy_driver[] = {
+       .phy_id_mask    = MICREL_PHY_ID_MASK,
+       .features       = PHY_BASIC_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+-      .config_init    = kszphy_config_init,
++      .config_init    = ksz8061_config_init,
+       .ack_interrupt  = kszphy_ack_interrupt,
+       .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index b60c82065fd11..93365a5c25570 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -321,6 +321,10 @@ static int phylink_get_mac_state(struct phylink *pl, 
struct phylink_link_state *
+       linkmode_zero(state->lp_advertising);
+       state->interface = pl->link_config.interface;
+       state->an_enabled = pl->link_config.an_enabled;
++      state->speed = SPEED_UNKNOWN;
++      state->duplex = DUPLEX_UNKNOWN;
++      state->pause = MLO_PAUSE_NONE;
++      state->an_complete = 0;
+       state->link = 1;
+ 
+       return pl->ops->mac_link_state(ndev, state);
+diff --git a/drivers/net/team/team_mode_loadbalance.c 
b/drivers/net/team/team_mode_loadbalance.c
+index a5ef97010eb34..5541e1c19936c 100644
+--- a/drivers/net/team/team_mode_loadbalance.c
++++ b/drivers/net/team/team_mode_loadbalance.c
+@@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct 
team_gsetter_ctx *ctx)
+       return 0;
+ }
+ 
++static void lb_bpf_func_free(struct team *team)
++{
++      struct lb_priv *lb_priv = get_lb_priv(team);
++      struct bpf_prog *fp;
++
++      if (!lb_priv->ex->orig_fprog)
++              return;
++
++      __fprog_destroy(lb_priv->ex->orig_fprog);
++      fp = rcu_dereference_protected(lb_priv->fp,
++                                     lockdep_is_held(&team->lock));
++      bpf_prog_destroy(fp);
++}
++
+ static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+ {
+       struct lb_priv *lb_priv = get_lb_priv(team);
+@@ -639,6 +653,7 @@ static void lb_exit(struct team *team)
+ 
+       team_options_unregister(team, lb_options,
+                               ARRAY_SIZE(lb_options));
++      lb_bpf_func_free(team);
+       cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+       free_percpu(lb_priv->pcpu_stats);
+       kfree(lb_priv->ex);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 1e6f0da1fa8e7..efc08dad3a1bf 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2147,9 +2147,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int 
noblock, int *err)
+       }
+ 
+       add_wait_queue(&tfile->wq.wait, &wait);
+-      current->state = TASK_INTERRUPTIBLE;
+ 
+       while (1) {
++              set_current_state(TASK_INTERRUPTIBLE);
+               ptr = ptr_ring_consume(&tfile->tx_ring);
+               if (ptr)
+                       break;
+@@ -2165,7 +2165,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int 
noblock, int *err)
+               schedule();
+       }
+ 
+-      current->state = TASK_RUNNING;
++      __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&tfile->wq.wait, &wait);
+ 
+ out:
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 735ad838e2ba8..6e381354f658e 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -976,6 +976,13 @@ static const struct usb_device_id products[] = {
+                                             0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
+       },
++      {       /* Quectel EG12/EM12 */
++              USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
++                                            USB_CLASS_VENDOR_SPEC,
++                                            USB_SUBCLASS_VENDOR_SPEC,
++                                            0xff),
++              .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
++      },
+ 
+       /* 3. Combined interface devices matching on interface number */
+       {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
+@@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface 
*intf)
+       return false;
+ }
+ 
+-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
++static bool quectel_diag_detected(struct usb_interface *intf)
+ {
+       struct usb_device *dev = interface_to_usbdev(intf);
+       struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
++      u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
++      u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
+ 
+-      if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+-          le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+-          intf_desc.bNumEndpoints == 2)
+-              return true;
++      if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
++              return false;
+ 
+-      return false;
++      if (id_product == 0x0306 || id_product == 0x0512)
++              return true;
++      else
++              return false;
+ }
+ 
+ static int qmi_wwan_probe(struct usb_interface *intf,
+@@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+               return -ENODEV;
+       }
+ 
+-      /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
++      /* Several Quectel modems supports dynamic interface configuration, so
+        * we need to match on class/subclass/protocol. These values are
+        * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+        * different. Ignore the current interface if the number of endpoints
+        * the number for the diag interface (two).
+        */
+-      if (quectel_ep06_diag_detected(intf))
++      if (quectel_diag_detected(intf))
+               return -ENODEV;
+ 
+       return usbnet_probe(intf, id);
+diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
+index 0ccb021f1e786..10d580c3dea3f 100644
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
+       if (xenvif_hash_cache_size == 0)
+               return;
+ 
++      BUG_ON(vif->hash.cache.count);
++
+       spin_lock_init(&vif->hash.cache.lock);
+       INIT_LIST_HEAD(&vif->hash.cache.list);
+ }
+diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
+index 182d6770f1027..6da12518e6934 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, 
struct sk_buff *skb,
+ {
+       struct xenvif *vif = netdev_priv(dev);
+       unsigned int size = vif->hash.size;
++      unsigned int num_queues;
++
++      /* If queues are not set up internally - always return 0
++       * as the packet going to be dropped anyway */
++      num_queues = READ_ONCE(vif->num_queues);
++      if (num_queues < 1)
++              return 0;
+ 
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+               return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
+index 80aae3a32c2a3..f09948b009dd0 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue 
*queue, struct sk_buff *s
+               skb_frag_size_set(&frags[i], len);
+       }
+ 
+-      /* Copied all the bits from the frag list -- free it. */
+-      skb_frag_list_init(skb);
+-      xenvif_skb_zerocopy_prepare(queue, nskb);
+-      kfree_skb(nskb);
+-
+       /* Release all the original (foreign) frags. */
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               skb_frag_unref(skb, f);
+@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+               xenvif_fill_frags(queue, skb);
+ 
+               if (unlikely(skb_has_frag_list(skb))) {
++                      struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
++                      xenvif_skb_zerocopy_prepare(queue, nskb);
+                       if (xenvif_handle_frag_list(queue, skb)) {
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+                               kfree_skb(skb);
+                               continue;
+                       }
++                      /* Copied all the bits from the frag list -- free it. */
++                      skb_frag_list_init(skb);
++                      kfree_skb(nskb);
+               }
+ 
+               skb->dev      = queue->vif->dev;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index fa6e0c3b3aa67..520b6fcd72628 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -757,6 +757,7 @@ static blk_status_t scsi_result_to_blk_status(struct 
scsi_cmnd *cmd, int result)
+               set_host_byte(cmd, DID_OK);
+               return BLK_STS_TARGET;
+       case DID_NEXUS_FAILURE:
++              set_host_byte(cmd, DID_OK);
+               return BLK_STS_NEXUS;
+       case DID_ALLOC_FAILURE:
+               set_host_byte(cmd, DID_OK);
+diff --git a/drivers/staging/android/ashmem.c 
b/drivers/staging/android/ashmem.c
+index a880b5c6c6c32..be815330ed958 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -75,6 +75,9 @@ struct ashmem_range {
+ /* LRU list of unpinned pages, protected by ashmem_mutex */
+ static LIST_HEAD(ashmem_lru_list);
+ 
++static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
++static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
++
+ /*
+  * long lru_count - The count of pages on our LRU list.
+  *
+@@ -168,19 +171,15 @@ static inline void lru_del(struct ashmem_range *range)
+  * @end:         The ending page (inclusive)
+  *
+  * This function is protected by ashmem_mutex.
+- *
+- * Return: 0 if successful, or -ENOMEM if there is an error
+  */
+-static int range_alloc(struct ashmem_area *asma,
+-                     struct ashmem_range *prev_range, unsigned int purged,
+-                     size_t start, size_t end)
++static void range_alloc(struct ashmem_area *asma,
++                      struct ashmem_range *prev_range, unsigned int purged,
++                      size_t start, size_t end,
++                      struct ashmem_range **new_range)
+ {
+-      struct ashmem_range *range;
+-
+-      range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+-      if (!range)
+-              return -ENOMEM;
++      struct ashmem_range *range = *new_range;
+ 
++      *new_range = NULL;
+       range->asma = asma;
+       range->pgstart = start;
+       range->pgend = end;
+@@ -190,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
+ 
+       if (range_on_lru(range))
+               lru_add(range);
+-
+-      return 0;
+ }
+ 
+ /**
+@@ -438,7 +435,6 @@ out:
+ static unsigned long
+ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-      struct ashmem_range *range, *next;
+       unsigned long freed = 0;
+ 
+       /* We might recurse into filesystem code, so bail out if necessary */
+@@ -448,21 +444,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
+       if (!mutex_trylock(&ashmem_mutex))
+               return -1;
+ 
+-      list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
++      while (!list_empty(&ashmem_lru_list)) {
++              struct ashmem_range *range =
++                      list_first_entry(&ashmem_lru_list, typeof(*range), lru);
+               loff_t start = range->pgstart * PAGE_SIZE;
+               loff_t end = (range->pgend + 1) * PAGE_SIZE;
++              struct file *f = range->asma->file;
+ 
+-              range->asma->file->f_op->fallocate(range->asma->file,
+-                              FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+-                              start, end - start);
++              get_file(f);
++              atomic_inc(&ashmem_shrink_inflight);
+               range->purged = ASHMEM_WAS_PURGED;
+               lru_del(range);
+ 
+               freed += range_size(range);
++              mutex_unlock(&ashmem_mutex);
++              f->f_op->fallocate(f,
++                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
++                                 start, end - start);
++              fput(f);
++              if (atomic_dec_and_test(&ashmem_shrink_inflight))
++                      wake_up_all(&ashmem_shrink_wait);
++              if (!mutex_trylock(&ashmem_mutex))
++                      goto out;
+               if (--sc->nr_to_scan <= 0)
+                       break;
+       }
+       mutex_unlock(&ashmem_mutex);
++out:
+       return freed;
+ }
+ 
+@@ -582,7 +590,8 @@ static int get_name(struct ashmem_area *asma, void __user 
*name)
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
++static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
++                    struct ashmem_range **new_range)
+ {
+       struct ashmem_range *range, *next;
+       int ret = ASHMEM_NOT_PURGED;
+@@ -635,7 +644,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t 
pgstart, size_t pgend)
+                        * second half and adjust the first chunk's endpoint.
+                        */
+                       range_alloc(asma, range, range->purged,
+-                                  pgend + 1, range->pgend);
++                                  pgend + 1, range->pgend, new_range);
+                       range_shrink(range, range->pgstart, pgstart - 1);
+                       break;
+               }
+@@ -649,7 +658,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t 
pgstart, size_t pgend)
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t 
pgend)
++static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t 
pgend,
++                      struct ashmem_range **new_range)
+ {
+       struct ashmem_range *range, *next;
+       unsigned int purged = ASHMEM_NOT_PURGED;
+@@ -675,7 +685,8 @@ restart:
+               }
+       }
+ 
+-      return range_alloc(asma, range, purged, pgstart, pgend);
++      range_alloc(asma, range, purged, pgstart, pgend, new_range);
++      return 0;
+ }
+ 
+ /*
+@@ -708,11 +719,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, 
unsigned long cmd,
+       struct ashmem_pin pin;
+       size_t pgstart, pgend;
+       int ret = -EINVAL;
++      struct ashmem_range *range = NULL;
+ 
+       if (copy_from_user(&pin, p, sizeof(pin)))
+               return -EFAULT;
+ 
++      if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
++              range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
++              if (!range)
++                      return -ENOMEM;
++      }
++
+       mutex_lock(&ashmem_mutex);
++      wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
+ 
+       if (!asma->file)
+               goto out_unlock;
+@@ -735,10 +754,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, 
unsigned long cmd,
+ 
+       switch (cmd) {
+       case ASHMEM_PIN:
+-              ret = ashmem_pin(asma, pgstart, pgend);
++              ret = ashmem_pin(asma, pgstart, pgend, &range);
+               break;
+       case ASHMEM_UNPIN:
+-              ret = ashmem_unpin(asma, pgstart, pgend);
++              ret = ashmem_unpin(asma, pgstart, pgend, &range);
+               break;
+       case ASHMEM_GET_PIN_STATUS:
+               ret = ashmem_get_pin_status(asma, pgstart, pgend);
+@@ -747,6 +766,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, 
unsigned long cmd,
+ 
+ out_unlock:
+       mutex_unlock(&ashmem_mutex);
++      if (range)
++              kmem_cache_free(ashmem_range_cachep, range);
+ 
+       return ret;
+ }
+diff --git a/drivers/staging/android/ion/ion_system_heap.c 
b/drivers/staging/android/ion/ion_system_heap.c
+index 548bb02c0ca6b..a8621dc8e1f67 100644
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -224,10 +224,10 @@ static void ion_system_heap_destroy_pools(struct 
ion_page_pool **pools)
+ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
+ {
+       int i;
+-      gfp_t gfp_flags = low_order_gfp_flags;
+ 
+       for (i = 0; i < NUM_ORDERS; i++) {
+               struct ion_page_pool *pool;
++              gfp_t gfp_flags = low_order_gfp_flags;
+ 
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+diff --git a/drivers/staging/comedi/drivers/ni_660x.c 
b/drivers/staging/comedi/drivers/ni_660x.c
+index e70a461e723f8..405573e927cfc 100644
+--- a/drivers/staging/comedi/drivers/ni_660x.c
++++ b/drivers/staging/comedi/drivers/ni_660x.c
+@@ -656,6 +656,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device 
*dev,
+       case NI_660X_PFI_OUTPUT_DIO:
+               if (chan > 31)
+                       return -EINVAL;
++              break;
+       default:
+               return -EINVAL;
+       }
+diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
+index d7fbf5f4600f3..f99954dbfdb58 100644
+--- a/drivers/staging/erofs/inode.c
++++ b/drivers/staging/erofs/inode.c
+@@ -185,16 +185,16 @@ static int fill_inode(struct inode *inode, int isdir)
+               /* setup the new inode */
+               if (S_ISREG(inode->i_mode)) {
+ #ifdef CONFIG_EROFS_FS_XATTR
+-                      if (vi->xattr_isize)
+-                              inode->i_op = &erofs_generic_xattr_iops;
++                      inode->i_op = &erofs_generic_xattr_iops;
+ #endif
+                       inode->i_fop = &generic_ro_fops;
+               } else if (S_ISDIR(inode->i_mode)) {
+                       inode->i_op =
+ #ifdef CONFIG_EROFS_FS_XATTR
+-                              vi->xattr_isize ? &erofs_dir_xattr_iops :
+-#endif
++                              &erofs_dir_xattr_iops;
++#else
+                               &erofs_dir_iops;
++#endif
+                       inode->i_fop = &erofs_dir_fops;
+               } else if (S_ISLNK(inode->i_mode)) {
+                       /* by default, page_get_link is used for symlink */
+diff --git a/drivers/staging/erofs/internal.h 
b/drivers/staging/erofs/internal.h
+index 8929443558676..2aaa7d3a1d62c 100644
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -352,12 +352,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info 
*sbi, erofs_nid_t nid)
+       return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+ }
+ 
+-#define inode_set_inited_xattr(inode)   (EROFS_V(inode)->flags |= 1)
+-#define inode_has_inited_xattr(inode)   (EROFS_V(inode)->flags & 1)
++/* atomic flag definitions */
++#define EROFS_V_EA_INITED_BIT 0
++
++/* bitlock definitions (arranged in reverse order) */
++#define EROFS_V_BL_XATTR_BIT  (BITS_PER_LONG - 1)
+ 
+ struct erofs_vnode {
+       erofs_nid_t nid;
+-      unsigned int flags;
++
++      /* atomic flags (including bitlocks) */
++      unsigned long flags;
+ 
+       unsigned char data_mapping_mode;
+       /* inline size in bytes */
+diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
+index 1c4b3e0343f58..6348dbfbf77b5 100644
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -59,15 +59,30 @@ enum z_erofs_vle_work_role {
+       Z_EROFS_VLE_WORK_SECONDARY,
+       Z_EROFS_VLE_WORK_PRIMARY,
+       /*
+-       * The current work has at least been linked with the following
+-       * processed chained works, which means if the processing page
+-       * is the tail partial page of the work, the current work can
+-       * safely use the whole page, as illustrated below:
+-       * +--------------+-------------------------------------------+
+-       * |  tail page   |      head page (of the previous work)     |
+-       * +--------------+-------------------------------------------+
+-       *   /\  which belongs to the current work
+-       * [  (*) this page can be used for the current work itself.  ]
++       * The current work was the tail of an exist chain, and the previous
++       * processed chained works are all decided to be hooked up to it.
++       * A new chain should be created for the remaining unprocessed works,
++       * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
++       * the next work cannot reuse the whole page in the following scenario:
++       *  ________________________________________________________________
++       * |      tail (partial) page     |       head (partial) page       |
++       * |  (belongs to the next work)  |  (belongs to the current work)  |
++       * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
++       */
++      Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
++      /*
++       * The current work has been linked with the processed chained works,
++       * and could be also linked with the potential remaining works, which
++       * means if the processing page is the tail partial page of the work,
++       * the current work can safely use the whole page (since the next work
++       * is under control) for in-place decompression, as illustrated below:
++       *  ________________________________________________________________
++       * |  tail (partial) page  |          head (partial) page           |
++       * | (of the current work) |         (of the previous work)         |
++       * |  PRIMARY_FOLLOWED or  |                                        |
++       * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
++       *
++       * [  (*) the above page can be used for the current work itself.  ]
+        */
+       Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+       Z_EROFS_VLE_WORK_MAX
+@@ -236,10 +251,10 @@ static int z_erofs_vle_work_add_page(
+       return ret ? 0 : -EAGAIN;
+ }
+ 
+-static inline bool try_to_claim_workgroup(
+-      struct z_erofs_vle_workgroup *grp,
+-      z_erofs_vle_owned_workgrp_t *owned_head,
+-      bool *hosted)
++static enum z_erofs_vle_work_role
++try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
++                     z_erofs_vle_owned_workgrp_t *owned_head,
++                     bool *hosted)
+ {
+       DBG_BUGON(*hosted == true);
+ 
+@@ -253,6 +268,9 @@ retry:
+ 
+               *owned_head = grp;
+               *hosted = true;
++              /* lucky, I am the followee :) */
++              return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
++
+       } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
+               /*
+                * type 2, link to the end of a existing open chain,
+@@ -262,12 +280,11 @@ retry:
+               if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
+                       Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
+                       goto retry;
+-
+               *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
+-      } else
+-              return false;   /* :( better luck next time */
++              return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
++      }
+ 
+-      return true;    /* lucky, I am the followee :) */
++      return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
+ }
+ 
+ struct z_erofs_vle_work_finder {
+@@ -345,12 +362,9 @@ z_erofs_vle_work_lookup(const struct 
z_erofs_vle_work_finder *f)
+       *f->hosted = false;
+       if (!primary)
+               *f->role = Z_EROFS_VLE_WORK_SECONDARY;
+-      /* claim the workgroup if possible */
+-      else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
+-              *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+-      else
+-              *f->role = Z_EROFS_VLE_WORK_PRIMARY;
+-
++      else    /* claim the workgroup if possible */
++              *f->role = try_to_claim_workgroup(grp, f->owned_head,
++                                                f->hosted);
+       return work;
+ }
+ 
+@@ -410,6 +424,9 @@ z_erofs_vle_work_register(const struct 
z_erofs_vle_work_finder *f,
+       return work;
+ }
+ 
++#define builder_is_hooked(builder) \
++      ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
++
+ #define builder_is_followed(builder) \
+       ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
+ 
+@@ -584,7 +601,7 @@ static int z_erofs_do_read_page(struct 
z_erofs_vle_frontend *fe,
+       struct z_erofs_vle_work_builder *const builder = &fe->builder;
+       const loff_t offset = page_offset(page);
+ 
+-      bool tight = builder_is_followed(builder);
++      bool tight = builder_is_hooked(builder);
+       struct z_erofs_vle_work *work = builder->work;
+ 
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+@@ -609,8 +626,12 @@ repeat:
+ 
+       /* lucky, within the range of the current map_blocks */
+       if (offset + cur >= map->m_la &&
+-              offset + cur < map->m_la + map->m_llen)
++              offset + cur < map->m_la + map->m_llen) {
++              /* didn't get a valid unzip work previously (very rare) */
++              if (!builder->work)
++                      goto restart_now;
+               goto hitted;
++      }
+ 
+       /* go ahead the next map_blocks */
+       debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
+@@ -624,6 +645,7 @@ repeat:
+       if (unlikely(err))
+               goto err_out;
+ 
++restart_now:
+       if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
+               goto hitted;
+ 
+@@ -649,7 +671,7 @@ repeat:
+               builder->role = Z_EROFS_VLE_WORK_PRIMARY;
+ #endif
+ 
+-      tight &= builder_is_followed(builder);
++      tight &= builder_is_hooked(builder);
+       work = builder->work;
+ hitted:
+       cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+@@ -664,6 +686,9 @@ hitted:
+                       (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+                               Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+ 
++      if (cur)
++              tight &= builder_is_followed(builder);
++
+ retry:
+       err = z_erofs_vle_work_add_page(builder, page, page_type);
+       /* should allocate an additional staging page for pagevec */
+@@ -904,11 +929,10 @@ repeat:
+       if (llen > grp->llen)
+               llen = grp->llen;
+ 
+-      err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
+-              clusterpages, pages, llen, work->pageofs,
+-              z_erofs_onlinepage_endio);
++      err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
++                                          pages, llen, work->pageofs);
+       if (err != -ENOTSUPP)
+-              goto out_percpu;
++              goto out;
+ 
+       if (sparsemem_pages >= nr_pages)
+               goto skip_allocpage;
+@@ -929,8 +953,25 @@ skip_allocpage:
+       erofs_vunmap(vout, nr_pages);
+ 
+ out:
++      /* must handle all compressed pages before endding pages */
++      for (i = 0; i < clusterpages; ++i) {
++              page = compressed_pages[i];
++
++#ifdef EROFS_FS_HAS_MANAGED_CACHE
++              if (page->mapping == mngda)
++                      continue;
++#endif
++              /* recycle all individual staging pages */
++              (void)z_erofs_gather_if_stagingpage(page_pool, page);
++
++              WRITE_ONCE(compressed_pages[i], NULL);
++      }
++
+       for (i = 0; i < nr_pages; ++i) {
+               page = pages[i];
++              if (!page)
++                      continue;
++
+               DBG_BUGON(page->mapping == NULL);
+ 
+               /* recycle all individual staging pages */
+@@ -943,20 +984,6 @@ out:
+               z_erofs_onlinepage_endio(page);
+       }
+ 
+-out_percpu:
+-      for (i = 0; i < clusterpages; ++i) {
+-              page = compressed_pages[i];
+-
+-#ifdef EROFS_FS_HAS_MANAGED_CACHE
+-              if (page->mapping == mngda)
+-                      continue;
+-#endif
+-              /* recycle all individual staging pages */
+-              (void)z_erofs_gather_if_stagingpage(page_pool, page);
+-
+-              WRITE_ONCE(compressed_pages[i], NULL);
+-      }
+-
+       if (pages == z_pagemap_global)
+               mutex_unlock(&z_pagemap_global_lock);
+       else if (unlikely(pages != pages_onstack))
+diff --git a/drivers/staging/erofs/unzip_vle.h 
b/drivers/staging/erofs/unzip_vle.h
+index 3316bc36965d4..684ff06fc7bf8 100644
+--- a/drivers/staging/erofs/unzip_vle.h
++++ b/drivers/staging/erofs/unzip_vle.h
+@@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page 
**compressed_pages,
+ 
+ extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+       unsigned clusterpages, struct page **pages,
+-      unsigned outlen, unsigned short pageofs,
+-      void (*endio)(struct page *));
++      unsigned int outlen, unsigned short pageofs);
+ 
+ extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+       unsigned clusterpages, void *vaddr, unsigned llen,
+diff --git a/drivers/staging/erofs/unzip_vle_lz4.c 
b/drivers/staging/erofs/unzip_vle_lz4.c
+index 16ac335ee59f4..8fa8a71a5445b 100644
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -105,8 +105,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+                                 unsigned int clusterpages,
+                                 struct page **pages,
+                                 unsigned int outlen,
+-                                unsigned short pageofs,
+-                                void (*endio)(struct page *))
++                                unsigned short pageofs)
+ {
+       void *vin, *vout;
+       unsigned int nr_pages, i, j;
+@@ -128,19 +127,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+       ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+                               clusterpages * PAGE_SIZE, outlen);
+ 
+-      if (ret >= 0) {
+-              outlen = ret;
+-              ret = 0;
+-      }
++      if (ret < 0)
++              goto out;
++      ret = 0;
+ 
+       for (i = 0; i < nr_pages; ++i) {
+               j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
+ 
+               if (pages[i]) {
+-                      if (ret < 0) {
+-                              SetPageError(pages[i]);
+-                      } else if (clusterpages == 1 &&
+-                                 pages[i] == compressed_pages[0]) {
++                      if (clusterpages == 1 &&
++                          pages[i] == compressed_pages[0]) {
+                               memcpy(vin + pageofs, vout + pageofs, j);
+                       } else {
+                               void *dst = kmap_atomic(pages[i]);
+@@ -148,12 +144,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+                               memcpy(dst + pageofs, vout + pageofs, j);
+                               kunmap_atomic(dst);
+                       }
+-                      endio(pages[i]);
+               }
+               vout += PAGE_SIZE;
+               outlen -= j;
+               pageofs = 0;
+       }
++
++out:
+       preempt_enable();
+ 
+       if (clusterpages == 1)
+diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
+index 80dca6a4adbe2..6cb05ae312338 100644
+--- a/drivers/staging/erofs/xattr.c
++++ b/drivers/staging/erofs/xattr.c
+@@ -44,19 +44,48 @@ static inline void xattr_iter_end_final(struct xattr_iter 
*it)
+ 
+ static int init_inode_xattrs(struct inode *inode)
+ {
++      struct erofs_vnode *const vi = EROFS_V(inode);
+       struct xattr_iter it;
+       unsigned int i;
+       struct erofs_xattr_ibody_header *ih;
+       struct super_block *sb;
+       struct erofs_sb_info *sbi;
+-      struct erofs_vnode *vi;
+       bool atomic_map;
++      int ret = 0;
+ 
+-      if (likely(inode_has_inited_xattr(inode)))
++      /* the most case is that xattrs of this inode are initialized. */
++      if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+               return 0;
+ 
+-      vi = EROFS_V(inode);
+-      BUG_ON(!vi->xattr_isize);
++      if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
++              return -ERESTARTSYS;
++
++      /* someone has initialized xattrs for us? */
++      if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
++              goto out_unlock;
++
++      /*
++       * bypass all xattr operations if ->xattr_isize is not greater than
++       * sizeof(struct erofs_xattr_ibody_header), in detail:
++       * 1) it is not enough to contain erofs_xattr_ibody_header then
++       *    ->xattr_isize should be 0 (it means no xattr);
++       * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
++       *    undefined right now (maybe use later with some new sb feature).
++       */
++      if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
++              errln("xattr_isize %d of nid %llu is not supported yet",
++                    vi->xattr_isize, vi->nid);
++              ret = -ENOTSUPP;
++              goto out_unlock;
++      } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
++              if (unlikely(vi->xattr_isize)) {
++                      DBG_BUGON(1);
++                      ret = -EIO;
++                      goto out_unlock;        /* xattr ondisk layout error */
++              }
++              ret = -ENOATTR;
++              goto out_unlock;
++      }
+ 
+       sb = inode->i_sb;
+       sbi = EROFS_SB(sb);
+@@ -64,8 +93,10 @@ static int init_inode_xattrs(struct inode *inode)
+       it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
+ 
+       it.page = erofs_get_inline_page(inode, it.blkaddr);
+-      if (IS_ERR(it.page))
+-              return PTR_ERR(it.page);
++      if (IS_ERR(it.page)) {
++              ret = PTR_ERR(it.page);
++              goto out_unlock;
++      }
+ 
+       /* read in shared xattr array (non-atomic, see kmalloc below) */
+       it.kaddr = kmap(it.page);
+@@ -78,7 +109,8 @@ static int init_inode_xattrs(struct inode *inode)
+                                               sizeof(uint), GFP_KERNEL);
+       if (vi->xattr_shared_xattrs == NULL) {
+               xattr_iter_end(&it, atomic_map);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto out_unlock;
+       }
+ 
+       /* let's skip ibody header */
+@@ -92,8 +124,12 @@ static int init_inode_xattrs(struct inode *inode)
+ 
+                       it.page = erofs_get_meta_page(sb,
+                               ++it.blkaddr, S_ISDIR(inode->i_mode));
+-                      if (IS_ERR(it.page))
+-                              return PTR_ERR(it.page);
++                      if (IS_ERR(it.page)) {
++                              kfree(vi->xattr_shared_xattrs);
++                              vi->xattr_shared_xattrs = NULL;
++                              ret = PTR_ERR(it.page);
++                              goto out_unlock;
++                      }
+ 
+                       it.kaddr = kmap_atomic(it.page);
+                       atomic_map = true;
+@@ -105,8 +141,11 @@ static int init_inode_xattrs(struct inode *inode)
+       }
+       xattr_iter_end(&it, atomic_map);
+ 
+-      inode_set_inited_xattr(inode);
+-      return 0;
++      set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
++
++out_unlock:
++      clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
++      return ret;
+ }
+ 
+ /*
+@@ -422,7 +461,6 @@ static int erofs_xattr_generic_get(const struct 
xattr_handler *handler,
+               struct dentry *unused, struct inode *inode,
+               const char *name, void *buffer, size_t size)
+ {
+-      struct erofs_vnode *const vi = EROFS_V(inode);
+       struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+ 
+       switch (handler->flags) {
+@@ -440,9 +478,6 @@ static int erofs_xattr_generic_get(const struct 
xattr_handler *handler,
+               return -EINVAL;
+       }
+ 
+-      if (!vi->xattr_isize)
+-              return -ENOATTR;
+-
+       return erofs_getxattr(inode, handler->flags, name, buffer, size);
+ }
+ 
+diff --git a/drivers/staging/wilc1000/linux_wlan.c 
b/drivers/staging/wilc1000/linux_wlan.c
+index 76c901235e93e..693a471890750 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -1104,8 +1104,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device 
*dev, int io_type,
+               vif->wilc = *wilc;
+               vif->ndev = ndev;
+               wl->vif[i] = vif;
+-              wl->vif_num = i;
+-              vif->idx = wl->vif_num;
++              wl->vif_num = i + 1;
++              vif->idx = i;
+ 
+               ndev->netdev_ops = &wilc_netdev_ops;
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index a9ec7051f2864..c2fe218e051f0 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -194,6 +194,7 @@ static void xhci_pci_quirks(struct device *dev, struct 
xhci_hcd *xhci)
+               xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++           pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+               xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 938ff06c03495..efb0cad8710e3 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -941,9 +941,9 @@ static void tegra_xusb_powerdomain_remove(struct device 
*dev,
+               device_link_del(tegra->genpd_dl_ss);
+       if (tegra->genpd_dl_host)
+               device_link_del(tegra->genpd_dl_host);
+-      if (tegra->genpd_dev_ss)
++      if (!IS_ERR_OR_NULL(tegra->genpd_dev_ss))
+               dev_pm_domain_detach(tegra->genpd_dev_ss, true);
+-      if (tegra->genpd_dev_host)
++      if (!IS_ERR_OR_NULL(tegra->genpd_dev_host))
+               dev_pm_domain_detach(tegra->genpd_dev_host, true);
+ }
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c0777a374a88f..4c66edf533fe9 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless 
smartcard reader */
+       { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC 
Device */
+       { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console 
*/
++      { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+       { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher 
Acceptor */
+       { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+       { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+@@ -1353,8 +1354,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, 
unsigned int gpio)
+       if (priv->partnum == CP210X_PARTNUM_CP2105)
+               req_type = REQTYPE_INTERFACE_TO_HOST;
+ 
++      result = usb_autopm_get_interface(serial->interface);
++      if (result)
++              return result;
++
+       result = cp210x_read_vendor_block(serial, req_type,
+                                         CP210X_READ_LATCH, &buf, sizeof(buf));
++      usb_autopm_put_interface(serial->interface);
+       if (result < 0)
+               return result;
+ 
+@@ -1375,6 +1381,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, 
unsigned int gpio, int value)
+ 
+       buf.mask = BIT(gpio);
+ 
++      result = usb_autopm_get_interface(serial->interface);
++      if (result)
++              goto out;
++
+       if (priv->partnum == CP210X_PARTNUM_CP2105) {
+               result = cp210x_write_vendor_block(serial,
+                                                  REQTYPE_HOST_TO_INTERFACE,
+@@ -1392,6 +1402,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, 
unsigned int gpio, int value)
+                                        NULL, 0, USB_CTRL_SET_TIMEOUT);
+       }
+ 
++      usb_autopm_put_interface(serial->interface);
++out:
+       if (result < 0) {
+               dev_err(&serial->interface->dev, "failed to set GPIO value: 
%d\n",
+                               result);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index f459c1a18156e..9f3ba5c854d0a 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1025,6 +1025,8 @@ static const struct usb_device_id id_table_combined[] = {
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+       { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+       { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
++      /* EZPrototypes devices */
++      { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
+       { }                                     /* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 975d02666c5a0..b863bedb55a13 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1308,6 +1308,12 @@
+ #define IONICS_VID                    0x1c0c
+ #define IONICS_PLUGCOMPUTER_PID               0x0102
+ 
++/*
++ * EZPrototypes (PID reseller)
++ */
++#define EZPROTOTYPES_VID              0x1c40
++#define HJELMSLUND_USB485_ISO_PID     0x0477
++
+ /*
+  * Dresden Elektronik Sensor Terminal Board
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index aef15497ff31f..11b21d9410f35 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1148,6 +1148,8 @@ static const struct usb_device_id option_ids[] = {
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+         .driver_info = NCTRL(0) | RSVD(3) },
++      { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),    /* 
Telit ME910 (ECM) */
++        .driver_info = NCTRL(0) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+diff --git a/fs/aio.c b/fs/aio.c
+index aac9659381d25..7f73f6928939b 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1661,6 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, 
unsigned mode, int sync,
+       struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+       __poll_t mask = key_to_poll(key);
++      unsigned long flags;
+ 
+       req->woken = true;
+ 
+@@ -1669,10 +1670,15 @@ static int aio_poll_wake(struct wait_queue_entry 
*wait, unsigned mode, int sync,
+               if (!(mask & req->events))
+                       return 0;
+ 
+-              /* try to complete the iocb inline if we can: */
+-              if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
++              /*
++               * Try to complete the iocb inline if we can. Use
++               * irqsave/irqrestore because not all filesystems (e.g. fuse)
++               * call this function with IRQs disabled and because IRQs
++               * have to be disabled before ctx_lock is obtained.
++               */
++              if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+                       list_del(&iocb->ki_list);
+-                      spin_unlock(&iocb->ki_ctx->ctx_lock);
++                      spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+ 
+                       list_del_init(&req->wait.entry);
+                       aio_poll_complete(iocb, mask);
+diff --git a/fs/exec.c b/fs/exec.c
+index fc281b738a982..20c33029a0628 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -929,7 +929,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t 
*size,
+               bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
+               if (bytes < 0) {
+                       ret = bytes;
+-                      goto out;
++                      goto out_free;
+               }
+ 
+               if (bytes == 0)
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 882a9b9e34bc2..72f59e8321e79 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
+ static struct freq_attr _name =                       \
+ __ATTR(_name, 0200, NULL, store_##_name)
+ 
+-struct global_attr {
+-      struct attribute attr;
+-      ssize_t (*show)(struct kobject *kobj,
+-                      struct attribute *attr, char *buf);
+-      ssize_t (*store)(struct kobject *a, struct attribute *b,
+-                       const char *c, size_t count);
+-};
+-
+ #define define_one_global_ro(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0444, show_##_name, NULL)
+ 
+ #define define_one_global_rw(_name)           \
+-static struct global_attr _name =             \
++static struct kobj_attribute _name =          \
+ __ATTR(_name, 0644, show_##_name, store_##_name)
+ 
+ 
+diff --git a/include/net/bluetooth/bluetooth.h 
b/include/net/bluetooth/bluetooth.h
+index ec9d6bc658559..fabee6db0abb7 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -276,7 +276,7 @@ int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, 
unsigned long arg);
+ int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+ int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+ 
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
+ void bt_accept_unlink(struct sock *sk);
+ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+ 
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 3ef2743a8eecc..8665bf24e3b7a 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -22,6 +22,7 @@
+ 
+ #include <net/inet_sock.h>
+ #include <net/snmp.h>
++#include <net/ip.h>
+ 
+ struct icmp_err {
+   int         errno;
+@@ -39,7 +40,13 @@ struct net_proto_family;
+ struct sk_buff;
+ struct net;
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++               const struct ip_options *opt);
++static inline void icmp_send(struct sk_buff *skb_in, int type, int code, 
__be32 info)
++{
++      __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
++}
++
+ int icmp_rcv(struct sk_buff *skb);
+ void icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 72593e171d14c..b2b7cdb3f6fda 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -665,6 +665,8 @@ static inline int ip_options_echo(struct net *net, struct 
ip_options *dopt,
+ }
+ 
+ void ip_options_fragment(struct sk_buff *skb);
++int __ip_options_compile(struct net *net, struct ip_options *opt,
++                       struct sk_buff *skb, __be32 *info);
+ int ip_options_compile(struct net *net, struct ip_options *opt,
+                      struct sk_buff *skb);
+ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+@@ -714,7 +716,7 @@ extern int sysctl_icmp_msgs_burst;
+ int ip_misc_proc_init(void);
+ #endif
+ 
+-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
++int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+                               struct netlink_ext_ack *extack);
+ 
+ #endif        /* _IP_H */
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 4d736427a4cb9..13327bd432312 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -48,7 +48,10 @@ struct qdisc_size_table {
+ struct qdisc_skb_head {
+       struct sk_buff  *head;
+       struct sk_buff  *tail;
+-      __u32           qlen;
++      union {
++              u32             qlen;
++              atomic_t        atomic_qlen;
++      };
+       spinlock_t      lock;
+ };
+ 
+@@ -405,27 +408,19 @@ static inline void qdisc_cb_private_validate(const 
struct sk_buff *skb, int sz)
+       BUILD_BUG_ON(sizeof(qcb->data) < sz);
+ }
+ 
+-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+-{
+-      return this_cpu_ptr(q->cpu_qstats)->qlen;
+-}
+-
+ static inline int qdisc_qlen(const struct Qdisc *q)
+ {
+       return q->q.qlen;
+ }
+ 
+-static inline int qdisc_qlen_sum(const struct Qdisc *q)
++static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
+ {
+-      __u32 qlen = q->qstats.qlen;
+-      int i;
++      u32 qlen = q->qstats.qlen;
+ 
+-      if (q->flags & TCQ_F_NOLOCK) {
+-              for_each_possible_cpu(i)
+-                      qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+-      } else {
++      if (q->flags & TCQ_F_NOLOCK)
++              qlen += atomic_read(&q->q.atomic_qlen);
++      else
+               qlen += q->q.qlen;
+-      }
+ 
+       return qlen;
+ }
+@@ -798,14 +793,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct 
Qdisc *sch,
+       this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+ }
+ 
+-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
++static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
+ {
+-      this_cpu_inc(sch->cpu_qstats->qlen);
++      atomic_inc(&sch->q.atomic_qlen);
+ }
+ 
+-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
++static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
+ {
+-      this_cpu_dec(sch->cpu_qstats->qlen);
++      atomic_dec(&sch->q.atomic_qlen);
+ }
+ 
+ static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+diff --git a/include/uapi/linux/android/binder.h 
b/include/uapi/linux/android/binder.h
+index b9ba520f7e4bb..2832134e53971 100644
+--- a/include/uapi/linux/android/binder.h
++++ b/include/uapi/linux/android/binder.h
+@@ -41,6 +41,14 @@ enum {
+ enum {
+       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
++
++      /**
++       * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
++       *
++       * Only when set, causes senders to include their security
++       * context
++       */
++      FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
+ };
+ 
+ #ifdef BINDER_IPC_32BIT
+@@ -218,6 +226,7 @@ struct binder_node_info_for_ref {
+ #define BINDER_VERSION                        _IOWR('b', 9, struct 
binder_version)
+ #define BINDER_GET_NODE_DEBUG_INFO    _IOWR('b', 11, struct 
binder_node_debug_info)
+ #define BINDER_GET_NODE_INFO_FOR_REF  _IOWR('b', 12, struct 
binder_node_info_for_ref)
++#define BINDER_SET_CONTEXT_MGR_EXT    _IOW('b', 13, struct flat_binder_object)
+ 
+ /*
+  * NOTE: Two special error codes you should check for when calling
+@@ -276,6 +285,11 @@ struct binder_transaction_data {
+       } data;
+ };
+ 
++struct binder_transaction_data_secctx {
++      struct binder_transaction_data transaction_data;
++      binder_uintptr_t secctx;
++};
++
+ struct binder_transaction_data_sg {
+       struct binder_transaction_data transaction_data;
+       binder_size_t buffers_size;
+@@ -311,6 +325,11 @@ enum binder_driver_return_protocol {
+       BR_OK = _IO('r', 1),
+       /* No parameters! */
+ 
++      BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
++                                    struct binder_transaction_data_secctx),
++      /*
++       * binder_transaction_data_secctx: the received command.
++       */
+       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+       /*
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e4c8262291521..63a9253679f2a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6428,7 +6428,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+                       u32 off_reg;
+ 
+                       aux = &env->insn_aux_data[i + delta];
+-                      if (!aux->alu_state)
++                      if (!aux->alu_state ||
++                          aux->alu_state == BPF_ALU_NON_POINTER)
+                               continue;
+ 
+                       isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+diff --git a/kernel/trace/trace_events_filter.c 
b/kernel/trace/trace_events_filter.c
+index 5574e862de8d5..5a1c64a26e819 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1301,7 +1301,7 @@ static int parse_pred(const char *str, void *data,
+               /* go past the last quote */
+               i++;
+ 
+-      } else if (isdigit(str[i])) {
++      } else if (isdigit(str[i]) || str[i] == '-') {
+ 
+               /* Make sure the field is not a string */
+               if (is_string_field(field)) {
+@@ -1314,6 +1314,9 @@ static int parse_pred(const char *str, void *data,
+                       goto err_free;
+               }
+ 
++              if (str[i] == '-')
++                      i++;
++
+               /* We allow 0xDEADBEEF */
+               while (isalnum(str[i]))
+                       i++;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index deacc52d7ff18..8d12198eaa949 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -154,15 +154,25 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock 
*sk)
+ }
+ EXPORT_SYMBOL(bt_sock_unlink);
+ 
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
+ {
+       BT_DBG("parent %p, sk %p", parent, sk);
+ 
+       sock_hold(sk);
+-      lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
++      if (bh)
++              bh_lock_sock_nested(sk);
++      else
++              lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
+       list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
+       bt_sk(sk)->parent = parent;
+-      release_sock(sk);
++
++      if (bh)
++              bh_unlock_sock(sk);
++      else
++              release_sock(sk);
++
+       parent->sk_ack_backlog++;
+ }
+ EXPORT_SYMBOL(bt_accept_enqueue);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 686bdc6b35b03..a3a2cd55e23a9 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1252,7 +1252,7 @@ static struct l2cap_chan 
*l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ 
+       l2cap_sock_init(sk, parent);
+ 
+-      bt_accept_enqueue(parent, sk);
++      bt_accept_enqueue(parent, sk, false);
+ 
+       release_sock(parent);
+ 
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index d606e92122916..c044ff2f73e6c 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 
channel, struct rfcomm_dlc *
+       rfcomm_pi(sk)->channel = channel;
+ 
+       sk->sk_state = BT_CONFIG;
+-      bt_accept_enqueue(parent, sk);
++      bt_accept_enqueue(parent, sk, true);
+ 
+       /* Accept connection and return socket DLC */
+       *d = rfcomm_pi(sk)->dlc;
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 8f0f9279eac9f..a4ca55df73908 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct 
sock *sk,
+       conn->sk = sk;
+ 
+       if (parent)
+-              bt_accept_enqueue(parent, sk);
++              bt_accept_enqueue(parent, sk, true);
+ }
+ 
+ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 9bf1b9ad17806..ac679f74ba475 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue 
*qstats,
+       for_each_possible_cpu(i) {
+               const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
+ 
+-              qstats->qlen = 0;
+               qstats->backlog += qcpu->backlog;
+               qstats->drops += qcpu->drops;
+               qstats->requeues += qcpu->requeues;
+@@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue 
*qstats,
+       if (cpu) {
+               __gnet_stats_copy_queue_cpu(qstats, cpu);
+       } else {
+-              qstats->qlen = q->qlen;
+               qstats->backlog = q->backlog;
+               qstats->drops = q->drops;
+               qstats->requeues = q->requeues;
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index bd67c4d0fcfdf..2aabb7eb08541 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1547,6 +1547,9 @@ static int register_queue_kobjects(struct net_device 
*dev)
+ error:
+       netdev_queue_update_kobjects(dev, txq, 0);
+       net_rx_queue_update_kobjects(dev, rxq, 0);
++#ifdef CONFIG_SYSFS
++      kset_unregister(dev->queues_kset);
++#endif
+       return error;
+ }
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 777fa3b7fb13d..f0165c5f376b3 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct 
cipso_v4_doi *doi_def, u8 level)
+       case CIPSO_V4_MAP_PASS:
+               return 0;
+       case CIPSO_V4_MAP_TRANS:
+-              if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
++              if ((level < doi_def->map.std->lvl.cipso_size) &&
++                  (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
+                       return 0;
+               break;
+       }
+@@ -1735,13 +1736,26 @@ validate_return:
+  */
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+ {
++      unsigned char optbuf[sizeof(struct ip_options) + 40];
++      struct ip_options *opt = (struct ip_options *)optbuf;
++
+       if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+               return;
+ 
++      /*
++       * We might be called above the IP layer,
++       * so we can not use icmp_send and IPCB here.
++       */
++
++      memset(opt, 0, sizeof(struct ip_options));
++      opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
++      if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++              return;
++
+       if (gateway)
+-              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
++              __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
+       else
+-              icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
++              __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
+ }
+ 
+ /**
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index fe4f6a6242383..ed14ec2455847 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -710,6 +710,10 @@ static int rtm_to_fib_config(struct net *net, struct 
sk_buff *skb,
+               case RTA_GATEWAY:
+                       cfg->fc_gw = nla_get_be32(attr);
+                       break;
++              case RTA_VIA:
++                      NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA 
attribute");
++                      err = -EINVAL;
++                      goto errout;
+               case RTA_PRIORITY:
+                       cfg->fc_priority = nla_get_u32(attr);
+                       break;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index d832beed6e3a3..c897fec9f903a 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -570,7 +570,8 @@ relookup_failed:
+  *                    MUST reply to only the first fragment.
+  */
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++               const struct ip_options *opt)
+ {
+       struct iphdr *iph;
+       int room;
+@@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, 
__be32 info)
+                                         iph->tos;
+       mark = IP4_REPLY_MARK(net, skb_in->mark);
+ 
+-      if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
++      if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
+               goto out_unlock;
+ 
+ 
+@@ -742,7 +743,7 @@ out_bh_enable:
+       local_bh_enable();
+ out:;
+ }
+-EXPORT_SYMBOL(icmp_send);
++EXPORT_SYMBOL(__icmp_send);
+ 
+ 
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 3163428219cd5..a433b74a4f761 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -308,11 +308,10 @@ drop:
+ }
+ 
+ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+-                            struct sk_buff *skb)
++                            struct sk_buff *skb, struct net_device *dev)
+ {
+       const struct iphdr *iph = ip_hdr(skb);
+       int (*edemux)(struct sk_buff *skb);
+-      struct net_device *dev = skb->dev;
+       struct rtable *rt;
+       int err;
+ 
+@@ -401,6 +400,7 @@ drop_error:
+ 
+ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff 
*skb)
+ {
++      struct net_device *dev = skb->dev;
+       int ret;
+ 
+       /* if ingress device is enslaved to an L3 master device pass the
+@@ -410,7 +410,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+       if (!skb)
+               return NET_RX_SUCCESS;
+ 
+-      ret = ip_rcv_finish_core(net, sk, skb);
++      ret = ip_rcv_finish_core(net, sk, skb, dev);
+       if (ret != NET_RX_DROP)
+               ret = dst_input(skb);
+       return ret;
+@@ -546,6 +546,7 @@ static void ip_list_rcv_finish(struct net *net, struct 
sock *sk,
+ 
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
++              struct net_device *dev = skb->dev;
+               struct dst_entry *dst;
+ 
+               skb_list_del_init(skb);
+@@ -555,7 +556,7 @@ static void ip_list_rcv_finish(struct net *net, struct 
sock *sk,
+               skb = l3mdev_ip_rcv(skb);
+               if (!skb)
+                       continue;
+-              if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
++              if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
+                       continue;
+ 
+               dst = skb_dst(skb);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index ed194d46c00e3..32a35043c9f59 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff 
*skb)
+  * If opt == NULL, then skb->data should point to IP header.
+  */
+ 
+-int ip_options_compile(struct net *net,
+-                     struct ip_options *opt, struct sk_buff *skb)
++int __ip_options_compile(struct net *net,
++                       struct ip_options *opt, struct sk_buff *skb,
++                       __be32 *info)
+ {
+       __be32 spec_dst = htonl(INADDR_ANY);
+       unsigned char *pp_ptr = NULL;
+@@ -468,11 +469,22 @@ eol:
+               return 0;
+ 
+ error:
+-      if (skb) {
+-              icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
+-      }
++      if (info)
++              *info = htonl((pp_ptr-iph)<<24);
+       return -EINVAL;
+ }
++
++int ip_options_compile(struct net *net,
++                     struct ip_options *opt, struct sk_buff *skb)
++{
++      int ret;
++      __be32 info;
++
++      ret = __ip_options_compile(net, opt, skb, &info);
++      if (ret != 0 && skb)
++              icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
++      return ret;
++}
+ EXPORT_SYMBOL(ip_options_compile);
+ 
+ /*
+diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
+index f86bb4f066095..d8e3a1fb8e826 100644
+--- a/net/ipv4/netlink.c
++++ b/net/ipv4/netlink.c
+@@ -3,9 +3,10 @@
+ #include <linux/types.h>
+ #include <net/net_namespace.h>
+ #include <net/netlink.h>
++#include <linux/in6.h>
+ #include <net/ip.h>
+ 
+-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
++int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+                               struct netlink_ext_ack *extack)
+ {
+       *ip_proto = nla_get_u8(attr);
+@@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 
*ip_proto,
+       switch (*ip_proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
++              return 0;
+       case IPPROTO_ICMP:
++              if (family != AF_INET)
++                      break;
++              return 0;
++#if IS_ENABLED(CONFIG_IPV6)
++      case IPPROTO_ICMPV6:
++              if (family != AF_INET6)
++                      break;
+               return 0;
+-      default:
+-              NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+-              return -EOPNOTSUPP;
++#endif
+       }
++      NL_SET_ERR_MSG(extack, "Unsupported ip proto");
++      return -EOPNOTSUPP;
+ }
+ EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index d1ddf1d037215..efe45200db4f5 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2803,7 +2803,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, 
struct nlmsghdr *nlh,
+ 
+       if (tb[RTA_IP_PROTO]) {
+               err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
+-                                                &ip_proto, extack);
++                                                &ip_proto, AF_INET, extack);
+               if (err)
+                       return err;
+       }
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index edda3f9daab9a..9ed08ac6066c8 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1964,10 +1964,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int 
cmd, void __user *arg)
+ 
+ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+ {
+-      __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                      IPSTATS_MIB_OUTFORWDATAGRAMS);
+-      __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-                      IPSTATS_MIB_OUTOCTETS, skb->len);
++      IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
++                    IPSTATS_MIB_OUTFORWDATAGRAMS);
++      IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
++                    IPSTATS_MIB_OUTOCTETS, skb->len);
+       return dst_output(net, sk, skb);
+ }
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 059f0531f7c1c..c87ce5732338c 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4163,6 +4163,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+               cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
+               cfg->fc_flags |= RTF_GATEWAY;
+       }
++      if (tb[RTA_VIA]) {
++              NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA 
attribute");
++              goto errout;
++      }
+ 
+       if (tb[RTA_DST]) {
+               int plen = (rtm->rtm_dst_len + 7) >> 3;
+@@ -4880,7 +4884,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, 
struct nlmsghdr *nlh,
+ 
+       if (tb[RTA_IP_PROTO]) {
+               err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
+-                                                &fl6.flowi6_proto, extack);
++                                                &fl6.flowi6_proto, AF_INET6,
++                                                extack);
+               if (err)
+                       goto errout;
+       }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index e8a1dabef803e..09e440e8dfaec 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
+ 
+ err_reg_dev:
+       ipip6_dev_free(sitn->fb_tunnel_dev);
++      free_netdev(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+       return err;
+ }
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 7d55d4c040887..fa763e2e50ec2 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1838,6 +1838,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
+                               goto errout;
+                       break;
+               }
++              case RTA_GATEWAY:
++                      NL_SET_ERR_MSG(extack, "MPLS does not support 
RTA_GATEWAY attribute");
++                      goto errout;
+               case RTA_VIA:
+               {
+                       if (nla_get_via(nla, &cfg->rc_via_alen,
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index ea7c67050792c..ee3e5b6471a69 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 
bitmap_len,
+                   (state == 0 && (byte & bitmask) == 0))
+                       return bit_spot;
+ 
+-              bit_spot++;
++              if (++bit_spot >= bitmap_len)
++                      return -1;
+               bitmask >>= 1;
+               if (bitmask == 0) {
+                       byte = bitmap[++byte_offset];
+diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
+index 6a196e438b6c0..d1fc019e932e0 100644
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+                                                     sock->service_name,
+                                                     sock->service_name_len,
+                                                     &service_name_tlv_length);
++              if (!service_name_tlv) {
++                      err = -ENOMEM;
++                      goto error_tlv;
++              }
+               size += service_name_tlv_length;
+       }
+ 
+@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+                                     &miux_tlv_length);
++      if (!miux_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += miux_tlv_length;
+ 
+       rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++      if (!rw_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += rw_tlv_length;
+ 
+       pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+                                     &miux_tlv_length);
++      if (!miux_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += miux_tlv_length;
+ 
+       rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++      if (!rw_tlv) {
++              err = -ENOMEM;
++              goto error_tlv;
++      }
+       size += rw_tlv_length;
+ 
+       skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index ef4026a23e802..4fa015208aab1 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct 
nfc_llcp_local *local)
+ 
+ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+ {
+-      u8 *gb_cur, *version_tlv, version, version_length;
+-      u8 *lto_tlv, lto_length;
+-      u8 *wks_tlv, wks_length;
+-      u8 *miux_tlv, miux_length;
++      u8 *gb_cur, version, version_length;
++      u8 lto_length, wks_length, miux_length;
++      u8 *version_tlv = NULL, *lto_tlv = NULL,
++         *wks_tlv = NULL, *miux_tlv = NULL;
+       __be16 wks = cpu_to_be16(local->local_wks);
+       u8 gb_len = 0;
+       int ret = 0;
+@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local 
*local)
+       version = LLCP_VERSION_11;
+       version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+                                        1, &version_length);
++      if (!version_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += version_length;
+ 
+       lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
++      if (!lto_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += lto_length;
+ 
+       pr_debug("Local wks 0x%lx\n", local->local_wks);
+       wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
++      if (!wks_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += wks_length;
+ 
+       miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
+                                     &miux_length);
++      if (!miux_tlv) {
++              ret = -ENOMEM;
++              goto out;
++      }
+       gb_len += miux_length;
+ 
+       gb_len += ARRAY_SIZE(llcp_magic);
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 8af6c11d2482a..faa1addf89b3b 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -199,8 +199,7 @@ err3:
+ err2:
+       kfree(tname);
+ err1:
+-      if (ret == ACT_P_CREATED)
+-              tcf_idr_release(*a, bind);
++      tcf_idr_release(*a, bind);
+       return err;
+ }
+ 
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 64dba3708fce2..cfceed28c3331 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -189,8 +189,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr 
*nla,
+ 
+       params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+       if (unlikely(!params_new)) {
+-              if (ret == ACT_P_CREATED)
+-                      tcf_idr_release(*a, bind);
++              tcf_idr_release(*a, bind);
+               return -ENOMEM;
+       }
+ 
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 904730b8ce8f2..6f16202d4b4ff 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr 
*nla,
+       return ret;
+ 
+ release_tun_meta:
+-      dst_release(&metadata->dst);
++      if (metadata)
++              dst_release(&metadata->dst);
+ 
+ err_out:
+       if (exists)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index de1663f7d3ad6..52f429b1cdd53 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct 
Qdisc *q)
+                       skb = __skb_dequeue(&q->skb_bad_txq);
+                       if (qdisc_is_percpu_stats(q)) {
+                               qdisc_qstats_cpu_backlog_dec(q, skb);
+-                              qdisc_qstats_cpu_qlen_dec(q);
++                              qdisc_qstats_atomic_qlen_dec(q);
+                       } else {
+                               qdisc_qstats_backlog_dec(q, skb);
+                               q->q.qlen--;
+@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc 
*q,
+ 
+       if (qdisc_is_percpu_stats(q)) {
+               qdisc_qstats_cpu_backlog_inc(q, skb);
+-              qdisc_qstats_cpu_qlen_inc(q);
++              qdisc_qstats_atomic_qlen_inc(q);
+       } else {
+               qdisc_qstats_backlog_inc(q, skb);
+               q->q.qlen++;
+@@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff 
*skb, struct Qdisc *q)
+ 
+               qdisc_qstats_cpu_requeues_inc(q);
+               qdisc_qstats_cpu_backlog_inc(q, skb);
+-              qdisc_qstats_cpu_qlen_inc(q);
++              qdisc_qstats_atomic_qlen_inc(q);
+ 
+               skb = next;
+       }
+@@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool 
*validate,
+                       skb = __skb_dequeue(&q->gso_skb);
+                       if (qdisc_is_percpu_stats(q)) {
+                               qdisc_qstats_cpu_backlog_dec(q, skb);
+-                              qdisc_qstats_cpu_qlen_dec(q);
++                              qdisc_qstats_atomic_qlen_dec(q);
+                       } else {
+                               qdisc_qstats_backlog_dec(q, skb);
+                               q->q.qlen--;
+@@ -645,7 +645,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct 
Qdisc *qdisc,
+       if (unlikely(err))
+               return qdisc_drop_cpu(skb, qdisc, to_free);
+ 
+-      qdisc_qstats_cpu_qlen_inc(qdisc);
++      qdisc_qstats_atomic_qlen_inc(qdisc);
+       /* Note: skb can not be used after skb_array_produce(),
+        * so we better not use qdisc_qstats_cpu_backlog_inc()
+        */
+@@ -670,7 +670,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc 
*qdisc)
+       if (likely(skb)) {
+               qdisc_qstats_cpu_backlog_dec(qdisc, skb);
+               qdisc_bstats_cpu_update(qdisc, skb);
+-              qdisc_qstats_cpu_qlen_dec(qdisc);
++              qdisc_qstats_atomic_qlen_dec(qdisc);
+       }
+ 
+       return skb;
+@@ -714,7 +714,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
+               struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+ 
+               q->backlog = 0;
+-              q->qlen = 0;
+       }
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 22cd46a600576..ffb9e21ffc5c7 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -430,6 +430,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+       int nb = 0;
+       int count = 1;
+       int rc = NET_XMIT_SUCCESS;
++      int rc_drop = NET_XMIT_DROP;
+ 
+       /* Do not fool qdisc_drop_all() */
+       skb->prev = NULL;
+@@ -469,6 +470,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               q->duplicate = 0;
+               rootq->enqueue(skb2, rootq, to_free);
+               q->duplicate = dupsave;
++              rc_drop = NET_XMIT_SUCCESS;
+       }
+ 
+       /*
+@@ -481,7 +483,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch, to_free);
+                       if (!segs)
+-                              return NET_XMIT_DROP;
++                              return rc_drop;
+               } else {
+                       segs = skb;
+               }
+@@ -504,8 +506,10 @@ static int netem_enqueue(struct sk_buff *skb, struct 
Qdisc *sch,
+                       1<<(prandom_u32() % 8);
+       }
+ 
+-      if (unlikely(sch->q.qlen >= sch->limit))
+-              return qdisc_drop_all(skb, sch, to_free);
++      if (unlikely(sch->q.qlen >= sch->limit)) {
++              qdisc_drop_all(skb, sch, to_free);
++              return rc_drop;
++      }
+ 
+       qdisc_qstats_backlog_inc(sch, skb);
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index b7a534f6d7c20..1b5728e9c4974 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1866,6 +1866,7 @@ static int sctp_sendmsg_check_sflags(struct 
sctp_association *asoc,
+ 
+               pr_debug("%s: aborting association:%p\n", __func__, asoc);
+               sctp_primitive_ABORT(net, asoc, chunk);
++              iov_iter_revert(&msg->msg_iter, msg_len);
+ 
+               return 0;
+       }
+diff --git a/net/socket.c b/net/socket.c
+index 93a45f15ee40d..d4ac431b78b28 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct 
inode *inode)
+               if (inode)
+                       inode_lock(inode);
+               sock->ops->release(sock);
++              sock->sk = NULL;
+               if (inode)
+                       inode_unlock(inode);
+               sock->ops = NULL;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 8f34db2a97857..f08d8e4923f23 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -376,11 +376,13 @@ static int tipc_sk_sock_err(struct socket *sock, long 
*timeout)
+ 
+ #define tipc_wait_for_cond(sock_, timeo_, condition_)                        \
+ ({                                                                            
 \
++      DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
+       struct sock *sk_;                                                      \
+       int rc_;                                                               \
+                                                                              \
+       while ((rc_ = !(condition_))) {                                        \
+-              DEFINE_WAIT_FUNC(wait_, woken_wake_function);                  \
++              /* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
++              smp_rmb();                                                     \
+               sk_ = (sock_)->sk;                                             \
+               rc_ = tipc_sk_sock_err((sock_), timeo_);                       \
+               if (rc_)                                                       \
+@@ -1320,7 +1322,7 @@ static int __tipc_sendmsg(struct socket *sock, struct 
msghdr *m, size_t dlen)
+ 
+       if (unlikely(!dest)) {
+               dest = &tsk->peer;
+-              if (!syn || dest->family != AF_TIPC)
++              if (!syn && dest->family != AF_TIPC)
+                       return -EDESTADDRREQ;
+       }
+ 
+@@ -1968,6 +1970,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
+               return;
+       case SOCK_WAKEUP:
+               tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
++              /* coupled with smp_rmb() in tipc_wait_for_cond() */
++              smp_wmb();
+               tsk->cong_link_cnt--;
+               wakeup = true;
+               break;
+diff --git a/tools/testing/selftests/firmware/fw_lib.sh 
b/tools/testing/selftests/firmware/fw_lib.sh
+index 6c5f1b2ffb745..1cbb12e284a68 100755
+--- a/tools/testing/selftests/firmware/fw_lib.sh
++++ b/tools/testing/selftests/firmware/fw_lib.sh
+@@ -91,7 +91,7 @@ verify_reqs()
+       if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
+               if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+                       echo "usermode helper disabled so ignoring test"
+-                      exit $ksft_skip
++                      exit 0
+               fi
+       fi
+ }

Reply via email to