commit:     160539b70010e941a701f6a04687797753ffb9f6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 26 11:14:31 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 26 11:14:31 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=160539b7

Linux patch 4.14.195

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1194_linux-4.14.195.patch | 1484 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1488 insertions(+)

diff --git a/0000_README b/0000_README
index ac7660d..7d4b532 100644
--- a/0000_README
+++ b/0000_README
@@ -819,6 +819,10 @@ Patch:  1193_linux-4.14.194.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.194
 
+Patch:  1194_linux-4.14.195.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.195
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1194_linux-4.14.195.patch b/1194_linux-4.14.195.patch
new file mode 100644
index 0000000..b7cb544
--- /dev/null
+++ b/1194_linux-4.14.195.patch
@@ -0,0 +1,1484 @@
+diff --git a/Makefile b/Makefile
+index 8e2a1418c5ae6..a5946969f4fcb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 194
++SUBLEVEL = 195
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
+index d123ff90f7a83..9995bed6e92e2 100644
+--- a/arch/alpha/include/asm/io.h
++++ b/arch/alpha/include/asm/io.h
+@@ -493,10 +493,10 @@ extern inline void writeq(u64 b, volatile void __iomem 
*addr)
+ }
+ #endif
+ 
+-#define ioread16be(p) be16_to_cpu(ioread16(p))
+-#define ioread32be(p) be32_to_cpu(ioread32(p))
+-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
++#define ioread16be(p) swab16(ioread16(p))
++#define ioread32be(p) swab32(ioread32(p))
++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+ 
+ #define inb_p         inb
+ #define inw_p         inw
+diff --git a/arch/m68k/include/asm/m53xxacr.h 
b/arch/m68k/include/asm/m53xxacr.h
+index 9138a624c5c81..692f90e7fecc1 100644
+--- a/arch/m68k/include/asm/m53xxacr.h
++++ b/arch/m68k/include/asm/m53xxacr.h
+@@ -89,9 +89,9 @@
+  * coherency though in all cases. And for copyback caches we will need
+  * to push cached data as well.
+  */
+-#define CACHE_INIT      CACR_CINVA
+-#define CACHE_INVALIDATE  CACR_CINVA
+-#define CACHE_INVALIDATED CACR_CINVA
++#define CACHE_INIT        (CACHE_MODE + CACR_CINVA - CACR_EC)
++#define CACHE_INVALIDATE  (CACHE_MODE + CACR_CINVA)
++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
+ 
+ #define ACR0_MODE     ((CONFIG_RAMBASE & 0xff000000) + \
+                        (0x000f0000) + \
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 5fc8a010fdf07..ebe97e5500ee5 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -22,6 +22,7 @@
+ #include <linux/errno.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
++#include <linux/pagemap.h>
+ #include <linux/ptrace.h>
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+@@ -66,15 +67,11 @@ static inline bool notify_page_fault(struct pt_regs *regs)
+ }
+ 
+ /*
+- * Check whether the instruction at regs->nip is a store using
++ * Check whether the instruction inst is a store using
+  * an update addressing form which will update r1.
+  */
+-static bool store_updates_sp(struct pt_regs *regs)
++static bool store_updates_sp(unsigned int inst)
+ {
+-      unsigned int inst;
+-
+-      if (get_user(inst, (unsigned int __user *)regs->nip))
+-              return false;
+       /* check for 1 in the rA field */
+       if (((inst >> 16) & 0x1f) != 1)
+               return false;
+@@ -227,20 +224,24 @@ static bool bad_kernel_fault(bool is_exec, unsigned long 
error_code,
+       return is_exec || (address >= TASK_SIZE);
+ }
+ 
++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
++#define SIGFRAME_MAX_SIZE     (4096 + 128)
++
+ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
+-                              struct vm_area_struct *vma,
+-                              bool store_update_sp)
++                              struct vm_area_struct *vma, unsigned int flags,
++                              bool *must_retry)
+ {
+       /*
+        * N.B. The POWER/Open ABI allows programs to access up to
+        * 288 bytes below the stack pointer.
+-       * The kernel signal delivery code writes up to about 1.5kB
++       * The kernel signal delivery code writes a bit over 4KB
+        * below the stack pointer (r1) before decrementing it.
+        * The exec code can write slightly over 640kB to the stack
+        * before setting the user r1.  Thus we allow the stack to
+        * expand to 1MB without further checks.
+        */
+       if (address + 0x100000 < vma->vm_end) {
++              unsigned int __user *nip = (unsigned int __user *)regs->nip;
+               /* get user regs even if this fault is in kernel mode */
+               struct pt_regs *uregs = current->thread.regs;
+               if (uregs == NULL)
+@@ -258,8 +259,22 @@ static bool bad_stack_expansion(struct pt_regs *regs, 
unsigned long address,
+                * between the last mapped region and the stack will
+                * expand the stack rather than segfaulting.
+                */
+-              if (address + 2048 < uregs->gpr[1] && !store_update_sp)
+-                      return true;
++              if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
++                      return false;
++
++              if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
++                  access_ok(VERIFY_READ, nip, sizeof(*nip))) {
++                      unsigned int inst;
++                      int res;
++
++                      pagefault_disable();
++                      res = __get_user_inatomic(inst, nip);
++                      pagefault_enable();
++                      if (!res)
++                              return !store_updates_sp(inst);
++                      *must_retry = true;
++              }
++              return true;
+       }
+       return false;
+ }
+@@ -392,7 +407,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
+       int is_user = user_mode(regs);
+       int is_write = page_fault_is_write(error_code);
+       int fault, major = 0;
+-      bool store_update_sp = false;
++      bool must_retry = false;
+ 
+       if (notify_page_fault(regs))
+               return 0;
+@@ -439,9 +454,6 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
+        * can result in fault, which will cause a deadlock when called with
+        * mmap_sem held
+        */
+-      if (is_write && is_user)
+-              store_update_sp = store_updates_sp(regs);
+-
+       if (is_user)
+               flags |= FAULT_FLAG_USER;
+       if (is_write)
+@@ -488,8 +500,17 @@ retry:
+               return bad_area(regs, address);
+ 
+       /* The stack is being expanded, check if it's valid */
+-      if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp)))
+-              return bad_area(regs, address);
++      if (unlikely(bad_stack_expansion(regs, address, vma, flags,
++                                       &must_retry))) {
++              if (!must_retry)
++                      return bad_area(regs, address);
++
++              up_read(&mm->mmap_sem);
++              if (fault_in_pages_readable((const char __user *)regs->nip,
++                                          sizeof(unsigned int)))
++                      return bad_area_nosemaphore(regs, address);
++              goto retry;
++      }
+ 
+       /* Try to expand it */
+       if (unlikely(expand_stack(vma, address)))
+diff --git a/arch/powerpc/platforms/pseries/ras.c 
b/arch/powerpc/platforms/pseries/ras.c
+index 5ec935521204a..8d20d49b252a0 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -115,7 +115,6 @@ static void handle_system_shutdown(char event_modifier)
+       case EPOW_SHUTDOWN_ON_UPS:
+               pr_emerg("Loss of system power detected. System is running on"
+                        " UPS/battery. Check RTAS error log for details\n");
+-              orderly_poweroff(true);
+               break;
+ 
+       case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 44b6f23cc851d..4289c519af1be 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -39,6 +39,17 @@ static HLIST_HEAD(clk_root_list);
+ static HLIST_HEAD(clk_orphan_list);
+ static LIST_HEAD(clk_notifier_list);
+ 
++static struct hlist_head *all_lists[] = {
++      &clk_root_list,
++      &clk_orphan_list,
++      NULL,
++};
++
++static struct hlist_head *orphan_list[] = {
++      &clk_orphan_list,
++      NULL,
++};
++
+ /***    private data structures    ***/
+ 
+ struct clk_core {
+@@ -1993,17 +2004,6 @@ static int inited = 0;
+ static DEFINE_MUTEX(clk_debug_lock);
+ static HLIST_HEAD(clk_debug_list);
+ 
+-static struct hlist_head *all_lists[] = {
+-      &clk_root_list,
+-      &clk_orphan_list,
+-      NULL,
+-};
+-
+-static struct hlist_head *orphan_list[] = {
+-      &clk_orphan_list,
+-      NULL,
+-};
+-
+ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+                                int level)
+ {
+@@ -2735,6 +2735,34 @@ static const struct clk_ops clk_nodrv_ops = {
+       .set_parent     = clk_nodrv_set_parent,
+ };
+ 
++static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
++                                              struct clk_core *target)
++{
++      int i;
++      struct clk_core *child;
++
++      for (i = 0; i < root->num_parents; i++)
++              if (root->parents[i] == target)
++                      root->parents[i] = NULL;
++
++      hlist_for_each_entry(child, &root->children, child_node)
++              clk_core_evict_parent_cache_subtree(child, target);
++}
++
++/* Remove this clk from all parent caches */
++static void clk_core_evict_parent_cache(struct clk_core *core)
++{
++      struct hlist_head **lists;
++      struct clk_core *root;
++
++      lockdep_assert_held(&prepare_lock);
++
++      for (lists = all_lists; *lists; lists++)
++              hlist_for_each_entry(root, *lists, child_node)
++                      clk_core_evict_parent_cache_subtree(root, core);
++
++}
++
+ /**
+  * clk_unregister - unregister a currently registered clock
+  * @clk: clock to unregister
+@@ -2773,6 +2801,8 @@ void clk_unregister(struct clk *clk)
+                       clk_core_set_parent(child, NULL);
+       }
+ 
++      clk_core_evict_parent_cache(clk->core);
++
+       hlist_del_init(&clk->core->child_node);
+ 
+       if (clk->core->prepare_count)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 1aa0b05c8cbdf..5c41dc9aaa46d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1378,6 +1378,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata 
*cpu)
+ 
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
++              cpu->pstate.turbo_pstate = phy_max;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * 
cpu->pstate.scaling;
+       }
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index aa592277d5108..67037eb9a80ee 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -220,32 +220,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, 
struct drm_device *dev,
+       return 0;
+ }
+ 
+-static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+-                           uint32_t handle, uint64_t *offset)
+-{
+-      struct drm_gem_object *obj;
+-      int ret;
+-
+-      obj = drm_gem_object_lookup(file, handle);
+-      if (!obj)
+-              return -ENOENT;
+-
+-      if (!obj->filp) {
+-              ret = -EINVAL;
+-              goto unref;
+-      }
+-
+-      ret = drm_gem_create_mmap_offset(obj);
+-      if (ret)
+-              goto unref;
+-
+-      *offset = drm_vma_node_offset_addr(&obj->vma_node);
+-unref:
+-      drm_gem_object_put_unlocked(obj);
+-
+-      return ret;
+-}
+-
+ static struct drm_ioctl_desc vgem_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, 
DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, 
DRM_AUTH|DRM_RENDER_ALLOW),
+@@ -439,7 +413,6 @@ static struct drm_driver vgem_driver = {
+       .fops                           = &vgem_driver_fops,
+ 
+       .dumb_create                    = vgem_gem_dumb_create,
+-      .dumb_map_offset                = vgem_gem_dumb_map,
+ 
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+diff --git a/drivers/input/mouse/psmouse-base.c 
b/drivers/input/mouse/psmouse-base.c
+index 8ac9e03c05b45..ca8f726dab2e7 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -2012,7 +2012,7 @@ static int psmouse_get_maxproto(char *buffer, const 
struct kernel_param *kp)
+ {
+       int type = *((unsigned int *)kp->arg);
+ 
+-      return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
++      return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
+ }
+ 
+ static int __init psmouse_init(void)
+diff --git a/drivers/media/pci/ttpci/budget-core.c 
b/drivers/media/pci/ttpci/budget-core.c
+index 97499b2af7144..20524376b83be 100644
+--- a/drivers/media/pci/ttpci/budget-core.c
++++ b/drivers/media/pci/ttpci/budget-core.c
+@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
+       ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+ 
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+ 
+       budget->mem_frontend.source = DMX_MEMORY_FE;
+       ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+ 
+       ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, 
&budget->hw_frontend);
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+ 
+       dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
+ 
+       return 0;
++
++err_release_dmx:
++      dvb_dmxdev_release(&budget->dmxdev);
++      dvb_dmx_release(&budget->demux);
++      return ret;
+ }
+ 
+ static void budget_unregister(struct budget *budget)
+diff --git a/drivers/media/platform/davinci/vpss.c 
b/drivers/media/platform/davinci/vpss.c
+index 2ee4cd9e6d80f..d984f45c03149 100644
+--- a/drivers/media/platform/davinci/vpss.c
++++ b/drivers/media/platform/davinci/vpss.c
+@@ -514,19 +514,31 @@ static void vpss_exit(void)
+ 
+ static int __init vpss_init(void)
+ {
++      int ret;
++
+       if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+               return -EBUSY;
+ 
+       oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+       if (unlikely(!oper_cfg.vpss_regs_base2)) {
+-              release_mem_region(VPSS_CLK_CTRL, 4);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto err_ioremap;
+       }
+ 
+       writel(VPSS_CLK_CTRL_VENCCLKEN |
+-                   VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++             VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++
++      ret = platform_driver_register(&vpss_driver);
++      if (ret)
++              goto err_pd_register;
++
++      return 0;
+ 
+-      return platform_driver_register(&vpss_driver);
++err_pd_register:
++      iounmap(oper_cfg.vpss_regs_base2);
++err_ioremap:
++      release_mem_region(VPSS_CLK_CTRL, 4);
++      return ret;
+ }
+ subsys_initcall(vpss_init);
+ module_exit(vpss_exit);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1f867e275408e..861d2c0a521a4 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2010,7 +2010,8 @@ static int  bond_release_and_destroy(struct net_device 
*bond_dev,
+       int ret;
+ 
+       ret = __bond_release_one(bond_dev, slave_dev, false, true);
+-      if (ret == 0 && !bond_has_slaves(bond)) {
++      if (ret == 0 && !bond_has_slaves(bond) &&
++          bond_dev->reg_state != NETREG_UNREGISTERING) {
+               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+               netdev_info(bond_dev, "Destroying bond %s\n",
+                           bond_dev->name);
+@@ -2752,6 +2753,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
+                       if (bond_time_in_interval(bond, last_rx, 1)) {
+                               bond_propose_link_state(slave, BOND_LINK_UP);
+                               commit++;
++                      } else if (slave->link == BOND_LINK_BACK) {
++                              bond_propose_link_state(slave, BOND_LINK_FAIL);
++                              commit++;
+                       }
+                       continue;
+               }
+@@ -2862,6 +2866,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
+ 
+                       continue;
+ 
++              case BOND_LINK_FAIL:
++                      bond_set_slave_link_state(slave, BOND_LINK_FAIL,
++                                                BOND_SLAVE_NOTIFY_NOW);
++                      bond_set_slave_inactive_flags(slave,
++                                                    BOND_SLAVE_NOTIFY_NOW);
++
++                      /* A slave has just been enslaved and has become
++                       * the current active slave.
++                       */
++                      if (rtnl_dereference(bond->curr_active_slave))
++                              RCU_INIT_POINTER(bond->current_arp_slave, NULL);
++                      continue;
++
+               default:
+                       netdev_err(bond->dev, "impossible: new_link %d on slave 
%s\n",
+                                  slave->link_new_state, slave->dev->name);
+@@ -2911,8 +2928,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
+                       return should_notify_rtnl;
+       }
+ 
+-      bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
+-
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               if (!found && !before && bond_slave_is_up(slave))
+                       before = slave;
+@@ -4156,13 +4171,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff 
*skb, struct net_device *dev)
+       return ret;
+ }
+ 
++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
++{
++      if (speed == 0 || speed == SPEED_UNKNOWN)
++              speed = slave->speed;
++      else
++              speed = min(speed, slave->speed);
++
++      return speed;
++}
++
+ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
+                                          struct ethtool_link_ksettings *cmd)
+ {
+       struct bonding *bond = netdev_priv(bond_dev);
+-      unsigned long speed = 0;
+       struct list_head *iter;
+       struct slave *slave;
++      u32 speed = 0;
+ 
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->base.port = PORT_OTHER;
+@@ -4174,8 +4199,13 @@ static int bond_ethtool_get_link_ksettings(struct 
net_device *bond_dev,
+        */
+       bond_for_each_slave(bond, slave, iter) {
+               if (bond_slave_can_tx(slave)) {
+-                      if (slave->speed != SPEED_UNKNOWN)
+-                              speed += slave->speed;
++                      if (slave->speed != SPEED_UNKNOWN) {
++                              if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
++                                      speed = bond_mode_bcast_speed(slave,
++                                                                    speed);
++                              else
++                                      speed += slave->speed;
++                      }
+                       if (cmd->base.duplex == DUPLEX_UNKNOWN &&
+                           slave->duplex != DUPLEX_UNKNOWN)
+                               cmd->base.duplex = slave->duplex;
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index 274d369151107..5c3fa0be8844e 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1160,6 +1160,8 @@ static int b53_arl_op(struct b53_device *dev, int op, 
int port,
+               return ret;
+ 
+       switch (ret) {
++      case -ETIMEDOUT:
++              return ret;
+       case -ENOSPC:
+               dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
+                       addr, vid);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c 
b/drivers/net/ethernet/freescale/fec_main.c
+index 8ba915cc4c2e4..22f964ef859e5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3536,11 +3536,11 @@ failed_mii_init:
+ failed_irq:
+ failed_init:
+       fec_ptp_stop(pdev);
+-      if (fep->reg_phy)
+-              regulator_disable(fep->reg_phy);
+ failed_reset:
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
++      if (fep->reg_phy)
++              regulator_disable(fep->reg_phy);
+ failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+ failed_clk_ahb:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h 
b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+index 5d5f422cbae55..f82da2b47d9a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+@@ -1175,7 +1175,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
+ #define I40E_AQC_SET_VSI_PROMISC_BROADCAST    0x04
+ #define I40E_AQC_SET_VSI_DEFAULT              0x08
+ #define I40E_AQC_SET_VSI_PROMISC_VLAN         0x10
+-#define I40E_AQC_SET_VSI_PROMISC_TX           0x8000
++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY      0x8000
+       __le16  seid;
+ #define I40E_AQC_VSI_PROM_CMD_SEID_MASK               0x3FF
+       __le16  vlan_tag;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c 
b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 111426ba5fbce..3fd2dfaf2bd53 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1914,6 +1914,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, 
u8 cmd_flags,
+       return status;
+ }
+ 
++/**
++ * i40e_is_aq_api_ver_ge
++ * @aq: pointer to AdminQ info containing HW API version to compare
++ * @maj: API major value
++ * @min: API minor value
++ *
++ * Assert whether current HW API version is greater/equal than provided.
++ **/
++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
++                                u16 min)
++{
++      return (aq->api_maj_ver > maj ||
++              (aq->api_maj_ver == maj && aq->api_min_ver >= min));
++}
++
+ /**
+  * i40e_aq_add_vsi
+  * @hw: pointer to the hw struct
+@@ -2039,18 +2054,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct 
i40e_hw *hw,
+ 
+       if (set) {
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+-              if (rx_only_promisc &&
+-                  (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+-                   (hw->aq.api_maj_ver > 1)))
+-                      flags |= I40E_AQC_SET_VSI_PROMISC_TX;
++              if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++                      flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+       }
+ 
+       cmd->promiscuous_flags = cpu_to_le16(flags);
+ 
+       cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+-      if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+-          (hw->aq.api_maj_ver > 1))
+-              cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
++      if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++              cmd->valid_flags |=
++                      cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+ 
+       cmd->seid = cpu_to_le16(seid);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+@@ -2147,11 +2160,17 @@ enum i40e_status_code 
i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         
i40e_aqc_opc_set_vsi_promiscuous_modes);
+ 
+-      if (enable)
++      if (enable) {
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
++              if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++                      flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
++      }
+ 
+       cmd->promiscuous_flags = cpu_to_le16(flags);
+       cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
++      if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
++              cmd->valid_flags |=
++                      cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
+       cmd->seid = cpu_to_le16(seid);
+       cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index aa2b446d6ad0f..f4475cbf8ce86 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11822,6 +11822,9 @@ static void i40e_remove(struct pci_dev *pdev)
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
+ 
++      while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
++              usleep_range(1000, 2000);
++
+       /* no more scheduling of any task */
+       set_bit(__I40E_SUSPENDED, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 10c3480c2da89..dbc6c9ed1c8f8 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -500,7 +500,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct 
net_device *vf_netdev,
+       int rc;
+ 
+       skb->dev = vf_netdev;
+-      skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
++      skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+ 
+       rc = dev_queue_xmit(skb);
+       if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index a1c44d0c85578..30cbe22c57a8e 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
+               rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
+               writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
+               writel(rtc_alarm64, base + TIMER_ALARM_LOW);
++              writel(1, base + TIMER_IRQ_ENABLED);
+       } else {
+               /*
+                * if this function was called with enabled=0
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index 28b50ab2fbb01..62f83cc151b22 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -605,8 +605,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct 
fc_frame *fp,
+ 
+       if (PTR_ERR(fp) == -FC_EX_CLOSED)
+               goto out;
+-      if (IS_ERR(fp))
+-              goto redisc;
++      if (IS_ERR(fp)) {
++              mutex_lock(&disc->disc_mutex);
++              fc_disc_restart(disc);
++              mutex_unlock(&disc->disc_mutex);
++              goto out;
++      }
+ 
+       cp = fc_frame_payload_get(fp, sizeof(*cp));
+       if (!cp)
+@@ -633,7 +637,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct 
fc_frame *fp,
+                               new_rdata->disc_id = disc->disc_id;
+                               fc_rport_login(new_rdata);
+                       }
+-                      goto out;
++                      goto free_fp;
+               }
+               rdata->disc_id = disc->disc_id;
+               mutex_unlock(&rdata->rp_mutex);
+@@ -650,6 +654,8 @@ redisc:
+               fc_disc_restart(disc);
+               mutex_unlock(&disc->disc_mutex);
+       }
++free_fp:
++      fc_frame_free(fp);
+ out:
+       kref_put(&rdata->kref, fc_rport_destroy);
+       if (!IS_ERR(fp))
+diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
+index 71f73d1d1ad1f..6c944fbefd40a 100644
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -21,6 +21,7 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL  "ANY_MODEL"
+ 
++#define UFS_VENDOR_MICRON      0x12C
+ #define UFS_VENDOR_TOSHIBA     0x198
+ #define UFS_VENDOR_SAMSUNG     0x1CE
+ #define UFS_VENDOR_SKHYNIX     0x1AD
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 1e2a97a10033b..11e917b44a0f1 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -189,6 +189,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum 
ufs_dev_pwr_mode dev_state,
+ 
+ static struct ufs_dev_fix ufs_fixups[] = {
+       /* UFS cards deviations table */
++      UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
++              UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index a75f2a2cf7805..4b6a1629969f3 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -827,4 +827,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
+ 
+ endif # SPI_SLAVE
+ 
++config SPI_DYNAMIC
++      def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
++
+ endif # SPI
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 49eee894f51d4..ab6a4f85bcde7 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -428,6 +428,12 @@ static LIST_HEAD(spi_controller_list);
+  */
+ static DEFINE_MUTEX(board_lock);
+ 
++/*
++ * Prevents addition of devices with same chip select and
++ * addition of devices below an unregistering controller.
++ */
++static DEFINE_MUTEX(spi_add_lock);
++
+ /**
+  * spi_alloc_device - Allocate a new SPI device
+  * @ctlr: Controller to which device is connected
+@@ -506,7 +512,6 @@ static int spi_dev_check(struct device *dev, void *data)
+  */
+ int spi_add_device(struct spi_device *spi)
+ {
+-      static DEFINE_MUTEX(spi_add_lock);
+       struct spi_controller *ctlr = spi->controller;
+       struct device *dev = ctlr->dev.parent;
+       int status;
+@@ -534,6 +539,13 @@ int spi_add_device(struct spi_device *spi)
+               goto done;
+       }
+ 
++      /* Controller may unregister concurrently */
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
++          !device_is_registered(&ctlr->dev)) {
++              status = -ENODEV;
++              goto done;
++      }
++
+       if (ctlr->cs_gpios)
+               spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
+ 
+@@ -2265,6 +2277,10 @@ void spi_unregister_controller(struct spi_controller 
*ctlr)
+       struct spi_controller *found;
+       int id = ctlr->bus_num;
+ 
++      /* Prevent addition of new devices, unregister existing ones */
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++              mutex_lock(&spi_add_lock);
++
+       device_for_each_child(&ctlr->dev, NULL, __unregister);
+ 
+       /* First make sure that this controller was ever added */
+@@ -2285,6 +2301,9 @@ void spi_unregister_controller(struct spi_controller 
*ctlr)
+       if (found == ctlr)
+               idr_remove(&spi_master_idr, id);
+       mutex_unlock(&board_lock);
++
++      if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++              mutex_unlock(&spi_add_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+ 
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 35a3750a6ddd3..f22425501bc16 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -1086,13 +1086,16 @@ static int vfio_bus_type(struct device *dev, void 
*data)
+ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+                            struct vfio_domain *domain)
+ {
+-      struct vfio_domain *d;
++      struct vfio_domain *d = NULL;
+       struct rb_node *n;
+       unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       int ret;
+ 
+       /* Arbitrarily pick the first domain in the list for lookups */
+-      d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
++      if (!list_empty(&iommu->domain_list))
++              d = list_first_entry(&iommu->domain_list,
++                                   struct vfio_domain, next);
++
+       n = rb_first(&iommu->dma_list);
+ 
+       for (; n; n = rb_next(n)) {
+@@ -1110,6 +1113,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+                               phys_addr_t p;
+                               dma_addr_t i;
+ 
++                              if (WARN_ON(!d)) { /* mapped w/o a domain?! */
++                                      ret = -EINVAL;
++                                      goto unwind;
++                              }
++
+                               phys = iommu_iova_to_phys(d->domain, iova);
+ 
+                               if (WARN_ON(!phys)) {
+@@ -1139,7 +1147,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+                               if (npage <= 0) {
+                                       WARN_ON(!npage);
+                                       ret = (int)npage;
+-                                      return ret;
++                                      goto unwind;
+                               }
+ 
+                               phys = pfn << PAGE_SHIFT;
+@@ -1148,14 +1156,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
+ 
+                       ret = iommu_map(domain->domain, iova, phys,
+                                       size, dma->prot | domain->prot);
+-                      if (ret)
+-                              return ret;
++                      if (ret) {
++                              if (!dma->iommu_mapped)
++                                      vfio_unpin_pages_remote(dma, iova,
++                                                      phys >> PAGE_SHIFT,
++                                                      size >> PAGE_SHIFT,
++                                                      true);
++                              goto unwind;
++                      }
+ 
+                       iova += size;
+               }
++      }
++
++      /* All dmas are now mapped, defer to second tree walk for unwind */
++      for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
++              struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++
+               dma->iommu_mapped = true;
+       }
++
+       return 0;
++
++unwind:
++      for (; n; n = rb_prev(n)) {
++              struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++              dma_addr_t iova;
++
++              if (dma->iommu_mapped) {
++                      iommu_unmap(domain->domain, dma->iova, dma->size);
++                      continue;
++              }
++
++              iova = dma->iova;
++              while (iova < dma->iova + dma->size) {
++                      phys_addr_t phys, p;
++                      size_t size;
++                      dma_addr_t i;
++
++                      phys = iommu_iova_to_phys(domain->domain, iova);
++                      if (!phys) {
++                              iova += PAGE_SIZE;
++                              continue;
++                      }
++
++                      size = PAGE_SIZE;
++                      p = phys + size;
++                      i = iova + size;
++                      while (i < dma->iova + dma->size &&
++                             p == iommu_iova_to_phys(domain->domain, i)) {
++                              size += PAGE_SIZE;
++                              p += PAGE_SIZE;
++                              i += PAGE_SIZE;
++                      }
++
++                      iommu_unmap(domain->domain, iova, size);
++                      vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
++                                              size >> PAGE_SHIFT, true);
++              }
++      }
++
++      return ret;
+ }
+ 
+ /*
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index b82bb0b081615..51278f8bd3ab3 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -829,6 +829,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned 
last_used_idx)
+ {
+       struct vring_virtqueue *vq = to_vvq(_vq);
+ 
++      if (unlikely(vq->broken))
++              return false;
++
+       virtio_mb(vq->weak_barriers);
+       return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, 
vq->vring.used->idx);
+ }
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index 5f6b77ea34fb5..128375ff80b8c 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ {
+       if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+-                   && need_resched())) {
++                   && need_resched() && !preempt_count())) {
+               /*
+                * Clear flag as we may be rescheduled on a different
+                * cpu.
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 5412b12491cb8..de951987fd23d 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3262,6 +3262,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char 
*buffer, size_t size);
+ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+                       unsigned long new_flags);
+ int btrfs_sync_fs(struct super_block *sb, int wait);
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++                                        u64 subvol_objectid);
+ 
+ static inline __printf(2, 3)
+ void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, 
...)
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 3aeb5770f8965..b6ce765aa7f33 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -56,9 +56,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int 
*max_len,
+       return type;
+ }
+ 
+-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+-                                     u64 root_objectid, u32 generation,
+-                                     int check_generation)
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++                              u64 root_objectid, u32 generation,
++                              int check_generation)
+ {
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root;
+@@ -151,7 +151,7 @@ static struct dentry *btrfs_fh_to_dentry(struct 
super_block *sb, struct fid *fh,
+       return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
+ }
+ 
+-static struct dentry *btrfs_get_parent(struct dentry *child)
++struct dentry *btrfs_get_parent(struct dentry *child)
+ {
+       struct inode *dir = d_inode(child);
+       struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
+diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
+index 91b3908e7c549..15db024621414 100644
+--- a/fs/btrfs/export.h
++++ b/fs/btrfs/export.h
+@@ -17,4 +17,9 @@ struct btrfs_fid {
+       u64 parent_root_objectid;
+ } __attribute__ ((packed));
+ 
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++                              u64 root_objectid, u32 generation,
++                              int check_generation);
++struct dentry *btrfs_get_parent(struct dentry *child);
++
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 57908ee964a20..17856e92b93d1 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -629,7 +629,21 @@ cont:
+                               btrfs_free_reserved_data_space_noquota(inode,
+                                                              start,
+                                                              end - start + 1);
+-                      goto free_pages_out;
++
++                      /*
++                       * Ensure we only free the compressed pages if we have
++                       * them allocated, as we can still reach here with
++                       * inode_need_compress() == false.
++                       */
++                      if (pages) {
++                              for (i = 0; i < nr_pages; i++) {
++                                      WARN_ON(pages[i]->mapping);
++                                      put_page(pages[i]);
++                              }
++                              kfree(pages);
++                      }
++
++                      return;
+               }
+       }
+ 
+@@ -708,13 +722,6 @@ cleanup_and_bail_uncompressed:
+       *num_added += 1;
+ 
+       return;
+-
+-free_pages_out:
+-      for (i = 0; i < nr_pages; i++) {
+-              WARN_ON(pages[i]->mapping);
+-              put_page(pages[i]);
+-      }
+-      kfree(pages);
+ }
+ 
+ static void free_async_extent_pages(struct async_extent *async_extent)
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 17a8463ef35c1..eb64d4b159e07 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -939,8 +939,8 @@ out:
+       return error;
+ }
+ 
+-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+-                                         u64 subvol_objectid)
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++                                        u64 subvol_objectid)
+ {
+       struct btrfs_root *root = fs_info->tree_root;
+       struct btrfs_root *fs_root;
+@@ -1221,6 +1221,7 @@ static int btrfs_show_options(struct seq_file *seq, 
struct dentry *dentry)
+ {
+       struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
+       char *compress_type;
++      const char *subvol_name;
+ 
+       if (btrfs_test_opt(info, DEGRADED))
+               seq_puts(seq, ",degraded");
+@@ -1307,8 +1308,13 @@ static int btrfs_show_options(struct seq_file *seq, 
struct dentry *dentry)
+ #endif
+       seq_printf(seq, ",subvolid=%llu",
+                 BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+-      seq_puts(seq, ",subvol=");
+-      seq_dentry(seq, dentry, " \t\n\\");
++      subvol_name = btrfs_get_subvol_name_from_objectid(info,
++                      BTRFS_I(d_inode(dentry))->root->root_key.objectid);
++      if (!IS_ERR(subvol_name)) {
++              seq_puts(seq, ",subvol=");
++              seq_escape(seq, subvol_name, " \t\n\\");
++              kfree(subvol_name);
++      }
+       return 0;
+ }
+ 
+@@ -1427,8 +1433,8 @@ static struct dentry *mount_subvol(const char 
*subvol_name, u64 subvol_objectid,
+                               goto out;
+                       }
+               }
+-              subvol_name = 
get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
+-                                                          subvol_objectid);
++              subvol_name = btrfs_get_subvol_name_from_objectid(
++                                      btrfs_sb(mnt->mnt_sb), subvol_objectid);
+               if (IS_ERR(subvol_name)) {
+                       root = ERR_CAST(subvol_name);
+                       subvol_name = NULL;
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index f05341bda1d14..383546ff62f04 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -25,6 +25,7 @@
+ #include <linux/bug.h>
+ #include <linux/genhd.h>
+ #include <linux/debugfs.h>
++#include <linux/sched/mm.h>
+ 
+ #include "ctree.h"
+ #include "disk-io.h"
+@@ -749,7 +750,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices 
*fs_devices,
+ {
+       int error = 0;
+       struct btrfs_device *dev;
++      unsigned int nofs_flag;
+ 
++      nofs_flag = memalloc_nofs_save();
+       list_for_each_entry(dev, &fs_devices->devices, dev_list) {
+               struct hd_struct *disk;
+               struct kobject *disk_kobj;
+@@ -768,6 +771,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices 
*fs_devices,
+               if (error)
+                       break;
+       }
++      memalloc_nofs_restore(nofs_flag);
+ 
+       return error;
+ }
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index c291bf61afb9c..00f0902e27e88 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1900,9 +1900,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, 
int call_nests)
+                        * not already there, and calling reverse_path_check()
+                        * during ep_insert().
+                        */
+-                      if (list_empty(&epi->ffd.file->f_tfile_llink))
++                      if (list_empty(&epi->ffd.file->f_tfile_llink)) {
++                              get_file(epi->ffd.file);
+                               list_add(&epi->ffd.file->f_tfile_llink,
+                                        &tfile_check_list);
++                      }
+               }
+       }
+       mutex_unlock(&ep->mtx);
+@@ -1946,6 +1948,7 @@ static void clear_tfile_check_list(void)
+               file = list_first_entry(&tfile_check_list, struct file,
+                                       f_tfile_llink);
+               list_del_init(&file->f_tfile_llink);
++              fput(file);
+       }
+       INIT_LIST_HEAD(&tfile_check_list);
+ }
+@@ -2096,13 +2099,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+                       mutex_lock(&epmutex);
+                       if (is_file_epoll(tf.file)) {
+                               error = -ELOOP;
+-                              if (ep_loop_check(ep, tf.file) != 0) {
+-                                      clear_tfile_check_list();
++                              if (ep_loop_check(ep, tf.file) != 0)
+                                       goto error_tgt_fput;
+-                              }
+-                      } else
++                      } else {
++                              get_file(tf.file);
+                               list_add(&tf.file->f_tfile_llink,
+                                                       &tfile_check_list);
++                      }
+                       mutex_lock_nested(&ep->mtx, 0);
+                       if (is_file_epoll(tf.file)) {
+                               tep = tf.file->private_data;
+@@ -2126,8 +2129,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+                       error = ep_insert(ep, &epds, tf.file, fd, full_check);
+               } else
+                       error = -EEXIST;
+-              if (full_check)
+-                      clear_tfile_check_list();
+               break;
+       case EPOLL_CTL_DEL:
+               if (epi)
+@@ -2150,8 +2151,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+       mutex_unlock(&ep->mtx);
+ 
+ error_tgt_fput:
+-      if (full_check)
++      if (full_check) {
++              clear_tfile_check_list();
+               mutex_unlock(&epmutex);
++      }
+ 
+       fdput(tf);
+ error_fput:
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 161099f39ab9c..3f999053457b6 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1308,8 +1308,8 @@ int ext4_search_dir(struct buffer_head *bh, char 
*search_buf, int buf_size,
+                   ext4_match(fname, de)) {
+                       /* found a match - just to be sure, do
+                        * a full check */
+-                      if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+-                                               bh->b_size, offset))
++                      if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
++                                               buf_size, offset))
+                               return -1;
+                       *res_dir = de;
+                       return 1;
+@@ -1741,7 +1741,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t 
*handle, struct inode *dir,
+                            blocksize, hinfo, map);
+       map -= count;
+       dx_sort_map(map, count);
+-      /* Split the existing block in the middle, size-wise */
++      /* Ensure that neither split block is over half full */
+       size = 0;
+       move = 0;
+       for (i = count-1; i >= 0; i--) {
+@@ -1751,8 +1751,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t 
*handle, struct inode *dir,
+               size += map[i].size;
+               move++;
+       }
+-      /* map index at which we will split */
+-      split = count - move;
++      /*
++       * map index at which we will split
++       *
++       * If the sum of active entries didn't exceed half the block size, just
++       * split it in half by count; each resulting block will have at least
++       * half the space free.
++       */
++      if (i > 0)
++              split = count - move;
++      else
++              split = count/2;
++
+       hash2 = map[split].hash;
+       continued = hash2 == map[split - 1].hash;
+       dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
+@@ -2353,7 +2363,7 @@ int ext4_generic_delete_entry(handle_t *handle,
+       de = (struct ext4_dir_entry_2 *)entry_buf;
+       while (i < buf_size - csum_size) {
+               if (ext4_check_dir_entry(dir, NULL, de, bh,
+-                                       bh->b_data, bh->b_size, i))
++                                       entry_buf, buf_size, i))
+                       return -EFSCORRUPTED;
+               if (de == de_del)  {
+                       if (pde)
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 6e054b368b5fe..93a466cf58ba7 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1356,8 +1356,10 @@ static int jbd2_write_superblock(journal_t *journal, 
int write_flags)
+       int ret;
+ 
+       /* Buffer got discarded which means block device got invalidated */
+-      if (!buffer_mapped(bh))
++      if (!buffer_mapped(bh)) {
++              unlock_buffer(bh);
+               return -EIO;
++      }
+ 
+       trace_jbd2_write_superblock(journal, write_flags);
+       if (!(journal->j_flags & JBD2_BARRIER))
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index e5a6deb38e1e1..f4a5ec92f5dc7 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct 
dentry *dentry)
+       int ret;
+       uint32_t now = get_seconds();
+ 
++      mutex_lock(&f->sem);
+       for (fd = f->dents ; fd; fd = fd->next) {
+-              if (fd->ino)
++              if (fd->ino) {
++                      mutex_unlock(&f->sem);
+                       return -ENOTEMPTY;
++              }
+       }
++      mutex_unlock(&f->sem);
+ 
+       ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+                             dentry->d_name.len, f, now);
+diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
+index f86f51f99aceb..1dcadd22b440d 100644
+--- a/fs/romfs/storage.c
++++ b/fs/romfs/storage.c
+@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long 
pos,
+       size_t limit;
+ 
+       limit = romfs_maxsize(sb);
+-      if (pos >= limit)
++      if (pos >= limit || buflen > limit - pos)
+               return -EIO;
+-      if (buflen > limit - pos)
+-              buflen = limit - pos;
+ 
+ #ifdef CONFIG_ROMFS_ON_MTD
+       if (sb->s_mtd)
+diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
+index d04637181ef21..980c9429abec5 100644
+--- a/fs/xfs/xfs_sysfs.h
++++ b/fs/xfs/xfs_sysfs.h
+@@ -44,9 +44,11 @@ xfs_sysfs_init(
+       struct xfs_kobj         *parent_kobj,
+       const char              *name)
+ {
++      struct kobject          *parent;
++
++      parent = parent_kobj ? &parent_kobj->kobject : NULL;
+       init_completion(&kobj->complete);
+-      return kobject_init_and_add(&kobj->kobject, ktype,
+-                                  &parent_kobj->kobject, "%s", name);
++      return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
+ }
+ 
+ static inline void
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index c3d547211d160..9c42e50a5cb7e 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -669,7 +669,7 @@ xfs_trans_dqresv(
+                       }
+               }
+               if (ninos > 0) {
+-                      total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
++                      total_count = dqp->q_res_icount + ninos;
+                       timer = be32_to_cpu(dqp->q_core.d_itimer);
+                       warns = be16_to_cpu(dqp->q_core.d_iwarns);
+                       warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
+diff --git a/kernel/relay.c b/kernel/relay.c
+index b141ce697679f..53c2a1a4b057f 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -196,6 +196,7 @@ free_buf:
+ static void relay_destroy_channel(struct kref *kref)
+ {
+       struct rchan *chan = container_of(kref, struct rchan, kref);
++      free_percpu(chan->buf);
+       kfree(chan);
+ }
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d6464045d3b97..194125cf2d2b9 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4575,25 +4575,21 @@ static bool vma_shareable(struct vm_area_struct *vma, 
unsigned long addr)
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+ {
+-      unsigned long check_addr = *start;
++      unsigned long a_start, a_end;
+ 
+       if (!(vma->vm_flags & VM_MAYSHARE))
+               return;
+ 
+-      for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+-              unsigned long a_start = check_addr & PUD_MASK;
+-              unsigned long a_end = a_start + PUD_SIZE;
++      /* Extend the range to be PUD aligned for a worst case scenario */
++      a_start = ALIGN_DOWN(*start, PUD_SIZE);
++      a_end = ALIGN(*end, PUD_SIZE);
+ 
+-              /*
+-               * If sharing is possible, adjust start/end if necessary.
+-               */
+-              if (range_in_vma(vma, a_start, a_end)) {
+-                      if (a_start < *start)
+-                              *start = a_start;
+-                      if (a_end > *end)
+-                              *end = a_end;
+-              }
+-      }
++      /*
++       * Intersect the range with the vma range, since pmd sharing won't be
++       * across vma after all
++       */
++      *start = max(vma->vm_start, a_start);
++      *end = min(vma->vm_end, a_end);
+ }
+ 
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 04b4c38d0c184..9dfe364d4c0d1 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -394,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
+ 
+ static inline int khugepaged_test_exit(struct mm_struct *mm)
+ {
+-      return atomic_read(&mm->mm_users) == 0;
++      return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
+ }
+ 
+ int __khugepaged_enter(struct mm_struct *mm)
+@@ -407,7 +407,7 @@ int __khugepaged_enter(struct mm_struct *mm)
+               return -ENOMEM;
+ 
+       /* __khugepaged_exit() must not run from under us */
+-      VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
++      VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
+       if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
+               free_mm_slot(mm_slot);
+               return 0;
+@@ -1006,9 +1006,6 @@ static void collapse_huge_page(struct mm_struct *mm,
+        * handled by the anon_vma lock + PG_lock.
+        */
+       down_write(&mm->mmap_sem);
+-      result = SCAN_ANY_PROCESS;
+-      if (!mmget_still_valid(mm))
+-              goto out;
+       result = hugepage_vma_revalidate(mm, address, &vma);
+       if (result)
+               goto out;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index e992afe3a58e9..a3958b4fec6cb 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1114,6 +1114,11 @@ static void free_pcppages_bulk(struct zone *zone, int 
count,
+       spin_lock(&zone->lock);
+       isolated_pageblocks = has_isolate_pageblock(zone);
+ 
++      /*
++       * Ensure proper count is passed which otherwise would stuck in the
++       * below while (list_empty(list)) loop.
++       */
++      count = min(pcp->count, count);
+       while (count) {
+               struct page *page;
+               struct list_head *list;
+@@ -7018,7 +7023,7 @@ int __meminit init_per_zone_wmark_min(void)
+ 
+       return 0;
+ }
+-core_initcall(init_per_zone_wmark_min)
++postcore_initcall(init_per_zone_wmark_min)
+ 
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/sound/soc/codecs/msm8916-wcd-analog.c 
b/sound/soc/codecs/msm8916-wcd-analog.c
+index 3633eb30dd135..4f949ad50d6a7 100644
+--- a/sound/soc/codecs/msm8916-wcd-analog.c
++++ b/sound/soc/codecs/msm8916-wcd-analog.c
+@@ -16,8 +16,8 @@
+ 
+ #define CDC_D_REVISION1                       (0xf000)
+ #define CDC_D_PERPH_SUBTYPE           (0xf005)
+-#define CDC_D_INT_EN_SET              (0x015)
+-#define CDC_D_INT_EN_CLR              (0x016)
++#define CDC_D_INT_EN_SET              (0xf015)
++#define CDC_D_INT_EN_CLR              (0xf016)
+ #define MBHC_SWITCH_INT                       BIT(7)
+ #define MBHC_MIC_ELECTRICAL_INS_REM_DET       BIT(6)
+ #define MBHC_BUTTON_PRESS_DET         BIT(5)
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c 
b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index 4558c8b930363..3a645fc425cd4 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream 
*substream,
+ 
+       ret_val = power_up_sst(stream);
+       if (ret_val < 0)
+-              return ret_val;
++              goto out_power_up;
+ 
+       /* Make sure, that the period size is always even */
+       snd_pcm_hw_constraint_step(substream->runtime, 0,
+@@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream 
*substream,
+       return snd_pcm_hw_constraint_integer(runtime,
+                        SNDRV_PCM_HW_PARAM_PERIODS);
+ out_ops:
+-      kfree(stream);
+       mutex_unlock(&sst_lock);
++out_power_up:
++      kfree(stream);
+       return ret_val;
+ }
+ 
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 8f7f9d05f38c0..bfa6d9d215569 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -1354,7 +1354,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
+       tf.ntevs = 0;
+ 
+       ret = debuginfo__find_probes(dbg, &tf.pf);
+-      if (ret < 0) {
++      if (ret < 0 || tf.ntevs == 0) {
+               for (i = 0; i < tf.ntevs; i++)
+                       clear_probe_trace_event(&tf.tevs[i]);
+               zfree(tevs);
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 3814cdad643a5..7fe673248e984 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -307,12 +307,6 @@ static void unmap_stage2_range(struct kvm *kvm, 
phys_addr_t start, u64 size)
+               next = stage2_pgd_addr_end(addr, end);
+               if (!stage2_pgd_none(*pgd))
+                       unmap_stage2_puds(kvm, pgd, addr, next);
+-              /*
+-               * If the range is too large, release the kvm->mmu_lock
+-               * to prevent starvation and lockup detector warnings.
+-               */
+-              if (next != end)
+-                      cond_resched_lock(&kvm->mmu_lock);
+       } while (pgd++, addr = next, addr != end);
+ }
+ 

Reply via email to