commit:     c12def91a2d1b232600418a34f79774f6cb40a2c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 10 12:57:38 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 10 12:57:38 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c12def91

Linux patch 4.6.6

 0000_README            |    4 +
 1005_linux-4.6.6.patch | 3625 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3629 insertions(+)

diff --git a/0000_README b/0000_README
index 9e42d11..1bb262e 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.6.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.6.5
 
+Patch:  1005_linux-4.6.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.6.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.6.6.patch b/1005_linux-4.6.6.patch
new file mode 100644
index 0000000..4fe50ff
--- /dev/null
+++ b/1005_linux-4.6.6.patch
@@ -0,0 +1,3625 @@
+diff --git a/Makefile b/Makefile
+index 7d693a825fc7..bee1a1692fed 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 6
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Charred Weasel
+ 
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index def69e347b2d..f5add9784b2d 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -66,8 +66,6 @@ endif
+ 
+ endif
+ 
+-cflags-$(CONFIG_ARC_DW2_UNWIND)               += -fasynchronous-unwind-tables
+-
+ # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
+ ifeq ($(atleast_gcc48),y)
+ cflags-$(CONFIG_ARC_DW2_UNWIND)               += -gdwarf-2
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index e0efff15a5ae..b9192a653b7e 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs 
*regs,
+        * prelogue is setup (callee regs saved and then fp set and not other
+        * way around
+        */
+-      pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
++      pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+       return 0;
+ 
+ #endif
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index d4635391c36a..ec0510cc5c3c 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -461,7 +461,10 @@ static inline unsigned int mips_cm_max_vp_width(void)
+       if (mips_cm_revision() >= CM_REV_CM3)
+               return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+ 
+-      return smp_num_siblings;
++      if (config_enabled(CONFIG_SMP))
++              return smp_num_siblings;
++
++      return 1;
+ }
+ 
+ /**
+diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
+index fdcc04020636..538ae944855e 100644
+--- a/arch/x86/include/asm/pvclock.h
++++ b/arch/x86/include/asm/pvclock.h
+@@ -85,6 +85,8 @@ unsigned __pvclock_read_cycles(const struct 
pvclock_vcpu_time_info *src,
+       u8 ret_flags;
+ 
+       version = src->version;
++      /* Make the latest version visible */
++      smp_rmb();
+ 
+       offset = pvclock_get_nsec_offset(src);
+       ret = src->system_time + offset;
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index bca14c899137..57b71373bae3 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -11,7 +11,11 @@
+ 
+ #include <linux/pci.h>
+ #include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/pci_ids.h>
++#include <linux/bcma/bcma.h>
++#include <linux/bcma/bcma_regs.h>
+ #include <drm/i915_drm.h>
+ #include <asm/pci-direct.h>
+ #include <asm/dma.h>
+@@ -21,6 +25,9 @@
+ #include <asm/iommu.h>
+ #include <asm/gart.h>
+ #include <asm/irq_remapping.h>
++#include <asm/early_ioremap.h>
++
++#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, 
msg)
+ 
+ static void __init fix_hypertransport_config(int num, int slot, int func)
+ {
+@@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
+ #ifdef CONFIG_ACPI
+ #ifdef CONFIG_X86_IO_APIC
+       /*
++       * Only applies to Nvidia root ports (bus 0) and not to
++       * Nvidia graphics cards with PCI ports on secondary buses.
++       */
++      if (num)
++              return;
++
++      /*
+        * All timer overrides on Nvidia are
+        * wrong unless HPET is enabled.
+        * Unfortunately that's not true on many Asus boards.
+@@ -590,6 +604,61 @@ static void __init force_disable_hpet(int num, int slot, 
int func)
+ #endif
+ }
+ 
++#define BCM4331_MMIO_SIZE     16384
++#define BCM4331_PM_CAP                0x40
++#define bcma_aread32(reg)     ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
++#define bcma_awrite32(reg, val)       iowrite32(val, mmio + 1 * 
BCMA_CORE_SIZE + reg)
++
++static void __init apple_airport_reset(int bus, int slot, int func)
++{
++      void __iomem *mmio;
++      u16 pmcsr;
++      u64 addr;
++      int i;
++
++      if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
++              return;
++
++      /* Card may have been put into PCI_D3hot by grub quirk */
++      pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + 
PCI_PM_CTRL);
++
++      if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
++              pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++              write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + 
PCI_PM_CTRL, pmcsr);
++              mdelay(10);
++
++              pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + 
PCI_PM_CTRL);
++              if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
++                      dev_err("Cannot power up Apple AirPort card\n");
++                      return;
++              }
++      }
++
++      addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
++      addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
++      addr &= PCI_BASE_ADDRESS_MEM_MASK;
++
++      mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
++      if (!mmio) {
++              dev_err("Cannot iomap Apple AirPort card\n");
++              return;
++      }
++
++      pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
++
++      for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
++              udelay(10);
++
++      bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
++      bcma_aread32(BCMA_RESET_CTL);
++      udelay(1);
++
++      bcma_awrite32(BCMA_RESET_CTL, 0);
++      bcma_aread32(BCMA_RESET_CTL);
++      udelay(10);
++
++      early_iounmap(mmio, BCM4331_MMIO_SIZE);
++}
+ 
+ #define QFLAG_APPLY_ONCE      0x1
+ #define QFLAG_APPLIED         0x2
+@@ -603,12 +672,6 @@ struct chipset {
+       void (*f)(int num, int slot, int func);
+ };
+ 
+-/*
+- * Only works for devices on the root bus. If you add any devices
+- * not on bus 0 readd another loop level in early_quirks(). But
+- * be careful because at least the Nvidia quirk here relies on
+- * only matching on bus 0.
+- */
+ static struct chipset early_qrk[] __initdata = {
+       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+         PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
+@@ -638,9 +701,13 @@ static struct chipset early_qrk[] __initdata = {
+        */
+       { PCI_VENDOR_ID_INTEL, 0x0f00,
+               PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
++      { PCI_VENDOR_ID_BROADCOM, 0x4331,
++        PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
+       {}
+ };
+ 
++static void __init early_pci_scan_bus(int bus);
++
+ /**
+  * check_dev_quirk - apply early quirks to a given PCI device
+  * @num: bus number
+@@ -649,7 +716,7 @@ static struct chipset early_qrk[] __initdata = {
+  *
+  * Check the vendor & device ID against the early quirks table.
+  *
+- * If the device is single function, let early_quirks() know so we don't
++ * If the device is single function, let early_pci_scan_bus() know so we don't
+  * poke at this device again.
+  */
+ static int __init check_dev_quirk(int num, int slot, int func)
+@@ -658,6 +725,7 @@ static int __init check_dev_quirk(int num, int slot, int 
func)
+       u16 vendor;
+       u16 device;
+       u8 type;
++      u8 sec;
+       int i;
+ 
+       class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
+@@ -685,25 +753,36 @@ static int __init check_dev_quirk(int num, int slot, int 
func)
+ 
+       type = read_pci_config_byte(num, slot, func,
+                                   PCI_HEADER_TYPE);
++
++      if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
++              sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
++              if (sec > num)
++                      early_pci_scan_bus(sec);
++      }
++
+       if (!(type & 0x80))
+               return -1;
+ 
+       return 0;
+ }
+ 
+-void __init early_quirks(void)
++static void __init early_pci_scan_bus(int bus)
+ {
+       int slot, func;
+ 
+-      if (!early_pci_allowed())
+-              return;
+-
+       /* Poor man's PCI discovery */
+-      /* Only scan the root bus */
+       for (slot = 0; slot < 32; slot++)
+               for (func = 0; func < 8; func++) {
+                       /* Only probe function 0 on single fn devices */
+-                      if (check_dev_quirk(0, slot, func))
++                      if (check_dev_quirk(bus, slot, func))
+                               break;
+               }
+ }
++
++void __init early_quirks(void)
++{
++      if (!early_pci_allowed())
++              return;
++
++      early_pci_scan_bus(0);
++}
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 99bfc025111d..7f82fe0a6807 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -66,6 +66,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
+ 
+       do {
+               version = __pvclock_read_cycles(src, &ret, &flags);
++              /* Make sure that the version double-check is last. */
++              smp_rmb();
+       } while ((src->version & 1) || version != src->version);
+ 
+       return flags & valid_flags;
+@@ -80,6 +82,8 @@ cycle_t pvclock_clocksource_read(struct 
pvclock_vcpu_time_info *src)
+ 
+       do {
+               version = __pvclock_read_cycles(src, &ret, &flags);
++              /* Make sure that the version double-check is last. */
++              smp_rmb();
+       } while ((src->version & 1) || version != src->version);
+ 
+       if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
+diff --git a/block/ioprio.c b/block/ioprio.c
+index cc7800e9eb44..01b8116298a1 100644
+--- a/block/ioprio.c
++++ b/block/ioprio.c
+@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
+       if (ret)
+               goto out;
+       ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
++      task_lock(p);
+       if (p->io_context)
+               ret = p->io_context->ioprio;
++      task_unlock(p);
+ out:
+       return ret;
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 55e257c268dd..c40deed8c440 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4141,6 +4141,12 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+        */
+       { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
+ 
++      /*
++       * Device times out with higher max sects.
++       * https://bugzilla.kernel.org/show_bug.cgi?id=121671
++       */
++      { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
++
+       /* Devices we expect to fail diagnostics */
+ 
+       /* Devices where NCQ should be avoided */
+diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
+index eda09090cb52..f642c4264c27 100644
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -8,8 +8,6 @@
+ #include <linux/bcma/bcma.h>
+ #include <linux/delay.h>
+ 
+-#define BCMA_CORE_SIZE                0x1000
+-
+ #define bcma_err(bus, fmt, ...) \
+       pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ #define bcma_warn(bus, fmt, ...) \
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 6405b6557792..d6ea31d30bf9 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -207,6 +207,9 @@ struct blkfront_info
+       struct blk_mq_tag_set tag_set;
+       struct blkfront_ring_info *rinfo;
+       unsigned int nr_rings;
++      /* Save uncomplete reqs and bios for migration. */
++      struct list_head requests;
++      struct bio_list bio_list;
+ };
+ 
+ static unsigned int nr_minors;
+@@ -874,8 +877,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+                         const struct blk_mq_queue_data *qd)
+ {
+       unsigned long flags;
+-      struct blkfront_ring_info *rinfo = (struct blkfront_ring_info 
*)hctx->driver_data;
++      int qid = hctx->queue_num;
++      struct blkfront_info *info = hctx->queue->queuedata;
++      struct blkfront_ring_info *rinfo = NULL;
+ 
++      BUG_ON(info->nr_rings <= qid);
++      rinfo = &info->rinfo[qid];
+       blk_mq_start_request(qd->rq);
+       spin_lock_irqsave(&rinfo->ring_lock, flags);
+       if (RING_FULL(&rinfo->ring))
+@@ -901,20 +908,9 @@ out_busy:
+       return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+ 
+-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+-                          unsigned int index)
+-{
+-      struct blkfront_info *info = (struct blkfront_info *)data;
+-
+-      BUG_ON(info->nr_rings <= index);
+-      hctx->driver_data = &info->rinfo[index];
+-      return 0;
+-}
+-
+ static struct blk_mq_ops blkfront_mq_ops = {
+       .queue_rq = blkif_queue_rq,
+       .map_queue = blk_mq_map_queue,
+-      .init_hctx = blk_mq_init_hctx,
+ };
+ 
+ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
+@@ -950,6 +946,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 
sector_size,
+               return PTR_ERR(rq);
+       }
+ 
++      rq->queuedata = info;
+       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+ 
+       if (info->feature_discard) {
+@@ -2007,69 +2004,22 @@ static int blkif_recover(struct blkfront_info *info)
+ {
+       unsigned int i, r_index;
+       struct request *req, *n;
+-      struct blk_shadow *copy;
+       int rc;
+       struct bio *bio, *cloned_bio;
+-      struct bio_list bio_list, merge_bio;
+       unsigned int segs, offset;
+       int pending, size;
+       struct split_bio *split_bio;
+-      struct list_head requests;
+ 
+       blkfront_gather_backend_features(info);
+       segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
+       blk_queue_max_segments(info->rq, segs);
+-      bio_list_init(&bio_list);
+-      INIT_LIST_HEAD(&requests);
+ 
+       for (r_index = 0; r_index < info->nr_rings; r_index++) {
+-              struct blkfront_ring_info *rinfo;
+-
+-              rinfo = &info->rinfo[r_index];
+-              /* Stage 1: Make a safe copy of the shadow state. */
+-              copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
+-                             GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
+-              if (!copy)
+-                      return -ENOMEM;
+-
+-              /* Stage 2: Set up free list. */
+-              memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
+-              for (i = 0; i < BLK_RING_SIZE(info); i++)
+-                      rinfo->shadow[i].req.u.rw.id = i+1;
+-              rinfo->shadow_free = rinfo->ring.req_prod_pvt;
+-              rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
++              struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
+ 
+               rc = blkfront_setup_indirect(rinfo);
+-              if (rc) {
+-                      kfree(copy);
++              if (rc)
+                       return rc;
+-              }
+-
+-              for (i = 0; i < BLK_RING_SIZE(info); i++) {
+-                      /* Not in use? */
+-                      if (!copy[i].request)
+-                              continue;
+-
+-                      /*
+-                       * Get the bios in the request so we can re-queue them.
+-                       */
+-                      if (copy[i].request->cmd_flags &
+-                          (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+-                              /*
+-                               * Flush operations don't contain bios, so
+-                               * we need to requeue the whole request
+-                               */
+-                              list_add(&copy[i].request->queuelist, 
&requests);
+-                              continue;
+-                      }
+-                      merge_bio.head = copy[i].request->bio;
+-                      merge_bio.tail = copy[i].request->biotail;
+-                      bio_list_merge(&bio_list, &merge_bio);
+-                      copy[i].request->bio = NULL;
+-                      blk_end_request_all(copy[i].request, 0);
+-              }
+-
+-              kfree(copy);
+       }
+       xenbus_switch_state(info->xbdev, XenbusStateConnected);
+ 
+@@ -2084,7 +2034,7 @@ static int blkif_recover(struct blkfront_info *info)
+               kick_pending_request_queues(rinfo);
+       }
+ 
+-      list_for_each_entry_safe(req, n, &requests, queuelist) {
++      list_for_each_entry_safe(req, n, &info->requests, queuelist) {
+               /* Requeue pending requests (flush or discard) */
+               list_del_init(&req->queuelist);
+               BUG_ON(req->nr_phys_segments > segs);
+@@ -2092,7 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
+       }
+       blk_mq_kick_requeue_list(info->rq);
+ 
+-      while ((bio = bio_list_pop(&bio_list)) != NULL) {
++      while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
+               /* Traverse the list of pending bios and re-queue them */
+               if (bio_segments(bio) > segs) {
+                       /*
+@@ -2138,9 +2088,42 @@ static int blkfront_resume(struct xenbus_device *dev)
+ {
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
+       int err = 0;
++      unsigned int i, j;
+ 
+       dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+ 
++      bio_list_init(&info->bio_list);
++      INIT_LIST_HEAD(&info->requests);
++      for (i = 0; i < info->nr_rings; i++) {
++              struct blkfront_ring_info *rinfo = &info->rinfo[i];
++              struct bio_list merge_bio;
++              struct blk_shadow *shadow = rinfo->shadow;
++
++              for (j = 0; j < BLK_RING_SIZE(info); j++) {
++                      /* Not in use? */
++                      if (!shadow[j].request)
++                              continue;
++
++                      /*
++                       * Get the bios in the request so we can re-queue them.
++                       */
++                      if (shadow[j].request->cmd_flags &
++                                      (REQ_FLUSH | REQ_FUA | REQ_DISCARD | 
REQ_SECURE)) {
++                              /*
++                               * Flush operations don't contain bios, so
++                               * we need to requeue the whole request
++                               */
++                              list_add(&shadow[j].request->queuelist, 
&info->requests);
++                              continue;
++                      }
++                      merge_bio.head = shadow[j].request->bio;
++                      merge_bio.tail = shadow[j].request->biotail;
++                      bio_list_merge(&info->bio_list, &merge_bio);
++                      shadow[j].request->bio = NULL;
++                      blk_mq_end_request(shadow[j].request, 0);
++              }
++      }
++
+       blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+ 
+       err = negotiate_mq(info);
+@@ -2148,6 +2131,8 @@ static int blkfront_resume(struct xenbus_device *dev)
+               return err;
+ 
+       err = talk_to_blkback(dev, info);
++      if (!err)
++              blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
+ 
+       /*
+        * We have to wait for the backend to switch to
+@@ -2484,10 +2469,23 @@ static void blkback_changed(struct xenbus_device *dev,
+               break;
+ 
+       case XenbusStateConnected:
+-              if (dev->state != XenbusStateInitialised) {
++              /*
++               * talk_to_blkback sets state to XenbusStateInitialised
++               * and blkfront_connect sets it to XenbusStateConnected
++               * (if connection went OK).
++               *
++               * If the backend (or toolstack) decides to poke at backend
++               * state (and re-trigger the watch by setting the state 
repeatedly
++               * to XenbusStateConnected (4)) we need to deal with this.
++               * This is allowed as this is used to communicate to the guest
++               * that the size of disk has changed!
++               */
++              if ((dev->state != XenbusStateInitialised) &&
++                  (dev->state != XenbusStateConnected)) {
+                       if (talk_to_blkback(dev, info))
+                               break;
+               }
++
+               blkfront_connect(info);
+               break;
+ 
+diff --git a/drivers/clk/at91/clk-programmable.c 
b/drivers/clk/at91/clk-programmable.c
+index 10f846cc8db1..25d5906640c3 100644
+--- a/drivers/clk/at91/clk-programmable.c
++++ b/drivers/clk/at91/clk-programmable.c
+@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 
index)
+       struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
+       unsigned int mask = layout->css_mask;
+-      unsigned int pckr = 0;
++      unsigned int pckr = index;
+ 
+       if (layout->have_slck_mck)
+               mask |= AT91_PMC_CSSMCK_MCK;
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c 
b/drivers/clk/rockchip/clk-mmc-phase.c
+index e0dc7e83403a..102399f3c550 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -153,6 +153,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
+               return ERR_PTR(-ENOMEM);
+ 
+       init.name = name;
++      init.flags = 0;
+       init.num_parents = num_parents;
+       init.parent_names = parent_names;
+       init.ops = &rockchip_mmc_clk_ops;
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index c4acfc5273b3..e380457792ae 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2169,6 +2169,10 @@ int cpufreq_update_policy(unsigned int cpu)
+        * -> ask driver for current freq and notify governors about a change
+        */
+       if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
++              if (cpufreq_suspended) {
++                      ret = -EAGAIN;
++                      goto unlock;
++              }
+               new_policy.cur = cpufreq_update_current_freq(policy);
+               if (WARN_ON(!new_policy.cur)) {
+                       ret = -EIO;
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 8e304b1befc5..75bd6621dc5d 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -242,7 +242,7 @@ struct at_xdmac_lld {
+       u32             mbr_dus;        /* Destination Microblock Stride 
Register */
+ };
+ 
+-
++/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic 
way. */
+ struct at_xdmac_desc {
+       struct at_xdmac_lld             lld;
+       enum dma_transfer_direction     direction;
+@@ -253,7 +253,7 @@ struct at_xdmac_desc {
+       unsigned int                    xfer_size;
+       struct list_head                descs_list;
+       struct list_head                xfer_node;
+-};
++} __aligned(sizeof(u64));
+ 
+ static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, 
unsigned int chan_nb)
+ {
+@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t 
cookie,
+       u32                     cur_nda, check_nda, cur_ubc, mask, value;
+       u8                      dwidth = 0;
+       unsigned long           flags;
++      bool                    initd;
+ 
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE)
+@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t 
cookie,
+       residue = desc->xfer_size;
+       /*
+        * Flush FIFO: only relevant when the transfer is source peripheral
+-       * synchronized.
++       * synchronized. Flush is needed before reading CUBC because data in
++       * the FIFO are not reported by CUBC. Reporting a residue of the
++       * transfer length while we have data in FIFO can cause issue.
++       * Usecase: atmel USART has a timeout which means I have received
++       * characters but there is no more character received for a while. On
++       * timeout, it requests the residue. If the data are in the DMA FIFO,
++       * we will return a residue of the transfer length. It means no data
++       * received. If an application is waiting for these data, it will hang
++       * since we won't have another USART timeout without receiving new
++       * data.
+        */
+       mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
+       value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
+@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t 
cookie,
+       }
+ 
+       /*
+-       * When processing the residue, we need to read two registers but we
+-       * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+-       * we stand in the descriptor list and AT_XDMAC_CUBC is used
+-       * to know how many data are remaining for the current descriptor.
+-       * Since the dma channel is not paused to not loose data, between the
+-       * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+-       * descriptor.
+-       * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+-       * still using the same descriptor by reading a second time
+-       * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+-       * read again AT_XDMAC_CUBC.
++       * The easiest way to compute the residue should be to pause the DMA
++       * but doing this can lead to miss some data as some devices don't
++       * have FIFO.
++       * We need to read several registers because:
++       * - DMA is running therefore a descriptor change is possible while
++       * reading these registers
++       * - When the block transfer is done, the value of the CUBC register
++       * is set to its initial value until the fetch of the next descriptor.
++       * This value will corrupt the residue calculation so we have to skip
++       * it.
++       *
++       * INITD --------                    ------------
++       *              |____________________|
++       *       _______________________  _______________
++       * NDA       @desc2             \/   @desc3
++       *       _______________________/\_______________
++       *       __________  ___________  _______________
++       * CUBC       0    \/ MAX desc1 \/  MAX desc2
++       *       __________/\___________/\_______________
++       *
++       * Since descriptors are aligned on 64 bits, we can assume that
++       * the update of NDA and CUBC is atomic.
+        * Memory barriers are used to ensure the read order of the registers.
+-       * A max number of retries is set because unlikely it can never ends if
+-       * we are transferring a lot of data with small buffers.
++       * A max number of retries is set because unlikely it could never ends.
+        */
+-      cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+-      rmb();
+-      cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+       for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+-              rmb();
+               check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 
0xfffffffc;
+-
+-              if (likely(cur_nda == check_nda))
+-                      break;
+-
+-              cur_nda = check_nda;
++              rmb();
++              initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & 
AT_XDMAC_CC_INITD);
+               rmb();
+               cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++              rmb();
++              cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 
0xfffffffc;
++              rmb();
++
++              if ((check_nda == cur_nda) && initd)
++                      break;
+       }
+ 
+       if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+@@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t 
cookie,
+       }
+ 
+       /*
++       * Flush FIFO: only relevant when the transfer is source peripheral
++       * synchronized. Another flush is needed here because CUBC is updated
++       * when the controller sends the data write command. It can lead to
++       * report data that are not written in the memory or the device. The
++       * FIFO flush ensures that data are really written.
++       */
++      if ((desc->lld.mbr_cfg & mask) == value) {
++              at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
++              while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & 
AT_XDMAC_CIS_FIS))
++                      cpu_relax();
++      }
++
++      /*
+        * Remove size of all microblocks already transferred and the current
+        * one. Then add the remaining size to transfer of the current
+        * microblock.
+diff --git a/drivers/hwtracing/intel_th/core.c 
b/drivers/hwtracing/intel_th/core.c
+index 4272f2ce5f6e..dca16540e7fd 100644
+--- a/drivers/hwtracing/intel_th/core.c
++++ b/drivers/hwtracing/intel_th/core.c
+@@ -440,6 +440,38 @@ static struct intel_th_subdevice {
+       },
+ };
+ 
++#ifdef CONFIG_MODULES
++static void __intel_th_request_hub_module(struct work_struct *work)
++{
++      struct intel_th *th = container_of(work, struct intel_th,
++                                         request_module_work);
++
++      request_module("intel_th_%s", th->hub->name);
++}
++
++static int intel_th_request_hub_module(struct intel_th *th)
++{
++      INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
++      schedule_work(&th->request_module_work);
++
++      return 0;
++}
++
++static void intel_th_request_hub_module_flush(struct intel_th *th)
++{
++      flush_work(&th->request_module_work);
++}
++#else
++static inline int intel_th_request_hub_module(struct intel_th *th)
++{
++      return -EINVAL;
++}
++
++static inline void intel_th_request_hub_module_flush(struct intel_th *th)
++{
++}
++#endif /* CONFIG_MODULES */
++
+ static int intel_th_populate(struct intel_th *th, struct resource *devres,
+                            unsigned int ndevres, int irq)
+ {
+@@ -510,7 +542,7 @@ static int intel_th_populate(struct intel_th *th, struct 
resource *devres,
+               /* need switch driver to be loaded to enumerate the rest */
+               if (subdev->type == INTEL_TH_SWITCH && !req) {
+                       th->hub = thdev;
+-                      err = request_module("intel_th_%s", subdev->name);
++                      err = intel_th_request_hub_module(th);
+                       if (!err)
+                               req++;
+               }
+@@ -627,6 +659,7 @@ void intel_th_free(struct intel_th *th)
+ {
+       int i;
+ 
++      intel_th_request_hub_module_flush(th);
+       for (i = 0; i < TH_SUBDEVICE_MAX; i++)
+               if (th->thdev[i] != th->hub)
+                       intel_th_device_remove(th->thdev[i]);
+diff --git a/drivers/hwtracing/intel_th/intel_th.h 
b/drivers/hwtracing/intel_th/intel_th.h
+index eedd09332db6..72cd3c6018e1 100644
+--- a/drivers/hwtracing/intel_th/intel_th.h
++++ b/drivers/hwtracing/intel_th/intel_th.h
+@@ -199,6 +199,9 @@ struct intel_th {
+ 
+       int                     id;
+       int                     major;
++#ifdef CONFIG_MODULES
++      struct work_struct      request_module_work;
++#endif /* CONFIG_MODULES */
+ #ifdef CONFIG_INTEL_TH_DEBUG
+       struct dentry           *dbg;
+ #endif
+diff --git a/drivers/hwtracing/intel_th/pci.c 
b/drivers/hwtracing/intel_th/pci.c
+index bca7a2ac00d6..72c9189fac8a 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -75,6 +75,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = 
{
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+               .driver_data = (kernel_ulong_t)0,
+       },
++      {
++              /* Kaby Lake PCH-H */
++              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
++              .driver_data = (kernel_ulong_t)0,
++      },
+       { 0 },
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index 23eaabb19f96..a5eb09c5539f 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
+               }
+       }
+ 
++      idx = 0;
++
+       do {
+               if (msgs[idx].len == 0) {
+                       ret = -EINVAL;
+diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
+index 5fbd5bd0878f..49fc2c7e560a 100644
+--- a/drivers/i2c/muxes/i2c-mux-reg.c
++++ b/drivers/i2c/muxes/i2c-mux-reg.c
+@@ -150,7 +150,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
+               mux->data.idle_in_use = true;
+ 
+       /* map address from "reg" if exists */
+-      if (of_address_to_resource(np, 0, &res)) {
++      if (of_address_to_resource(np, 0, &res) == 0) {
+               mux->data.reg_size = resource_size(&res);
+               mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
+               if (IS_ERR(mux->data.reg))
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index ca62a6e11846..4a6104beb60d 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1421,22 +1421,15 @@ static int xpad_probe(struct usb_interface *intf, 
const struct usb_device_id *id
+       int ep_irq_in_idx;
+       int i, error;
+ 
++      if (intf->cur_altsetting->desc.bNumEndpoints != 2)
++              return -ENODEV;
++
+       for (i = 0; xpad_device[i].idVendor; i++) {
+               if ((le16_to_cpu(udev->descriptor.idVendor) == 
xpad_device[i].idVendor) &&
+                   (le16_to_cpu(udev->descriptor.idProduct) == 
xpad_device[i].idProduct))
+                       break;
+       }
+ 
+-      if (xpad_device[i].xtype == XTYPE_XBOXONE &&
+-          intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+-              /*
+-               * The Xbox One controller lists three interfaces all with the
+-               * same interface class, subclass and protocol. Differentiate by
+-               * interface number.
+-               */
+-              return -ENODEV;
+-      }
+-
+       xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
+       if (!xpad)
+               return -ENOMEM;
+@@ -1468,6 +1461,8 @@ static int xpad_probe(struct usb_interface *intf, const 
struct usb_device_id *id
+               if (intf->cur_altsetting->desc.bInterfaceClass == 
USB_CLASS_VENDOR_SPEC) {
+                       if (intf->cur_altsetting->desc.bInterfaceProtocol == 
129)
+                               xpad->xtype = XTYPE_XBOX360W;
++                      else if (intf->cur_altsetting->desc.bInterfaceProtocol 
== 208)
++                              xpad->xtype = XTYPE_XBOXONE;
+                       else
+                               xpad->xtype = XTYPE_XBOX360;
+               } else {
+@@ -1482,6 +1477,17 @@ static int xpad_probe(struct usb_interface *intf, const 
struct usb_device_id *id
+                       xpad->mapping |= MAP_STICKS_TO_NULL;
+       }
+ 
++      if (xpad->xtype == XTYPE_XBOXONE &&
++          intf->cur_altsetting->desc.bInterfaceNumber != 0) {
++              /*
++               * The Xbox One controller lists three interfaces all with the
++               * same interface class, subclass and protocol. Differentiate by
++               * interface number.
++               */
++              error = -ENODEV;
++              goto err_free_in_urb;
++      }
++
+       error = xpad_init_output(intf, xpad);
+       if (error)
+               goto err_free_in_urb;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 78f93cf68840..be5b399da5d3 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data 
*etd)
+               case 5:
+                       etd->hw_version = 3;
+                       break;
+-              case 6:
+-              case 7:
+-              case 8:
+-              case 9:
+-              case 10:
+-              case 13:
+-              case 14:
++              case 6 ... 14:
+                       etd->hw_version = 4;
+                       break;
+               default:
+diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
+index a3f0f5a47490..0f586780ceb4 100644
+--- a/drivers/input/mouse/vmmouse.c
++++ b/drivers/input/mouse/vmmouse.c
+@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool 
set_properties)
+               return -ENXIO;
+       }
+ 
+-      if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+-              psmouse_dbg(psmouse, "VMMouse port in use.\n");
+-              return -EBUSY;
+-      }
+-
+       /* Check if the device is present */
+       response = ~VMMOUSE_PROTO_MAGIC;
+       VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
+-      if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
+-              release_region(VMMOUSE_PROTO_PORT, 4);
++      if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
+               return -ENXIO;
+-      }
+ 
+       if (set_properties) {
+               psmouse->vendor = VMMOUSE_VENDOR;
+@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool 
set_properties)
+               psmouse->model = version;
+       }
+ 
+-      release_region(VMMOUSE_PROTO_PORT, 4);
+-
+       return 0;
+ }
+ 
+@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
+       psmouse_reset(psmouse);
+       input_unregister_device(priv->abs_dev);
+       kfree(priv);
+-      release_region(VMMOUSE_PROTO_PORT, 4);
+ }
+ 
+ /**
+@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
+       struct input_dev *rel_dev = psmouse->dev, *abs_dev;
+       int error;
+ 
+-      if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+-              psmouse_dbg(psmouse, "VMMouse port in use.\n");
+-              return -EBUSY;
+-      }
+-
+       psmouse_reset(psmouse);
+       error = vmmouse_enable(psmouse);
+       if (error)
+-              goto release_region;
++              return error;
+ 
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       abs_dev = input_allocate_device();
+@@ -502,8 +487,5 @@ init_fail:
+       kfree(priv);
+       psmouse->private = NULL;
+ 
+-release_region:
+-      release_region(VMMOUSE_PROTO_PORT, 4);
+-
+       return error;
+ }
+diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
+index 8dd3fb5e1f94..88e91559c84e 100644
+--- a/drivers/input/rmi4/rmi_f12.c
++++ b/drivers/input/rmi4/rmi_f12.c
+@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
+       struct rmi_device *rmi_dev = fn->rmi_dev;
+       int ret;
+       int offset;
+-      u8 buf[14];
++      u8 buf[15];
+       int pitch_x = 0;
+       int pitch_y = 0;
+       int clip_x_low = 0;
+@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
+ 
+       offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
+ 
+-      if (item->reg_size > 14) {
+-              dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
+-                      item->reg_size);
++      if (item->reg_size > sizeof(buf)) {
++              dev_err(&fn->dev,
++                      "F12 control8 should be no bigger than %zd bytes, not: 
%ld\n",
++                      sizeof(buf), item->reg_size);
+               return -ENODEV;
+       }
+ 
+diff --git a/drivers/input/touchscreen/tsc2004.c 
b/drivers/input/touchscreen/tsc2004.c
+index 7295c198aa08..6fe55d598fac 100644
+--- a/drivers/input/touchscreen/tsc2004.c
++++ b/drivers/input/touchscreen/tsc2004.c
+@@ -22,6 +22,11 @@
+ #include <linux/regmap.h>
+ #include "tsc200x-core.h"
+ 
++static const struct input_id tsc2004_input_id = {
++      .bustype = BUS_I2C,
++      .product = 2004,
++};
++
+ static int tsc2004_cmd(struct device *dev, u8 cmd)
+ {
+       u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
+@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
+                        const struct i2c_device_id *id)
+ 
+ {
+-      return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
++      return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
+                            devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
+                            tsc2004_cmd);
+ }
+diff --git a/drivers/input/touchscreen/tsc2005.c 
b/drivers/input/touchscreen/tsc2005.c
+index b9f593dfd2ef..f2c5f0e47f77 100644
+--- a/drivers/input/touchscreen/tsc2005.c
++++ b/drivers/input/touchscreen/tsc2005.c
+@@ -24,6 +24,11 @@
+ #include <linux/regmap.h>
+ #include "tsc200x-core.h"
+ 
++static const struct input_id tsc2005_input_id = {
++      .bustype = BUS_SPI,
++      .product = 2005,
++};
++
+ static int tsc2005_cmd(struct device *dev, u8 cmd)
+ {
+       u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
+@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
+       if (error)
+               return error;
+ 
+-      return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
++      return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
+                            devm_regmap_init_spi(spi, &tsc200x_regmap_config),
+                            tsc2005_cmd);
+ }
+diff --git a/drivers/input/touchscreen/tsc200x-core.c 
b/drivers/input/touchscreen/tsc200x-core.c
+index 15240c1ee850..dfa7f1c4f545 100644
+--- a/drivers/input/touchscreen/tsc200x-core.c
++++ b/drivers/input/touchscreen/tsc200x-core.c
+@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
+       mutex_unlock(&ts->mutex);
+ }
+ 
+-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
++int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
+                 struct regmap *regmap,
+                 int (*tsc200x_cmd)(struct device *dev, u8 cmd))
+ {
+@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 
bustype,
+       snprintf(ts->phys, sizeof(ts->phys),
+                "%s/input-ts", dev_name(dev));
+ 
+-      input_dev->name = "TSC200X touchscreen";
++      if (tsc_id->product == 2004) {
++              input_dev->name = "TSC200X touchscreen";
++      } else {
++              input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
++                                               "TSC%04d touchscreen",
++                                               tsc_id->product);
++              if (!input_dev->name)
++                      return -ENOMEM;
++      }
++
+       input_dev->phys = ts->phys;
+-      input_dev->id.bustype = bustype;
++      input_dev->id = *tsc_id;
+       input_dev->dev.parent = dev;
+       input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+       input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+diff --git a/drivers/input/touchscreen/tsc200x-core.h 
b/drivers/input/touchscreen/tsc200x-core.h
+index 7a482d102614..49a63a3c6840 100644
+--- a/drivers/input/touchscreen/tsc200x-core.h
++++ b/drivers/input/touchscreen/tsc200x-core.h
+@@ -70,7 +70,7 @@
+ extern const struct regmap_config tsc200x_regmap_config;
+ extern const struct dev_pm_ops tsc200x_pm_ops;
+ 
+-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
++int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
+                 struct regmap *regmap,
+                 int (*tsc200x_cmd)(struct device *dev, u8 cmd));
+ int tsc200x_remove(struct device *dev);
+diff --git a/drivers/input/touchscreen/wacom_w8001.c 
b/drivers/input/touchscreen/wacom_w8001.c
+index bab3c6acf6a2..b6fc4bde79de 100644
+--- a/drivers/input/touchscreen/wacom_w8001.c
++++ b/drivers/input/touchscreen/wacom_w8001.c
+@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <[email protected]>");
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+ 
+-#define W8001_MAX_LENGTH      11
++#define W8001_MAX_LENGTH      13
+ #define W8001_LEAD_MASK               0x80
+ #define W8001_LEAD_BYTE               0x80
+ #define W8001_TAB_MASK                0x40
+@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
+               bool touch = data[0] & (1 << i);
+ 
+               input_mt_slot(dev, i);
++              input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
+               if (touch) {
+                       x = (data[6 * i + 1] << 7) | data[6 * i + 2];
+                       y = (data[6 * i + 3] << 7) | data[6 * i + 4];
+@@ -339,6 +340,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
+               w8001->idx = 0;
+               parse_multi_touch(w8001);
+               break;
++
++      default:
++              /*
++               * ThinkPad X60 Tablet PC (pen only device) sometimes
++               * sends invalid data packets that are larger than
++               * W8001_PKTLEN_TPCPEN. Let's start over again.
++               */
++              if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
++                      w8001->idx = 0;
+       }
+ 
+       return IRQ_HANDLED;
+@@ -513,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char 
*basename,
+                                       0, touch.x, 0, 0);
+               input_set_abs_params(dev, ABS_MT_POSITION_Y,
+                                       0, touch.y, 0, 0);
++              input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
++                                      0, MT_TOOL_MAX, 0, 0);
+ 
+               strlcat(basename, " 2FG", basename_sz);
+               if (w8001->max_pen_x && w8001->max_pen_y)
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index 40fb1209d512..83f93404c446 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -706,7 +706,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, 
unsigned int virq,
+ 
+       spin_lock_irqsave(&gic_lock, flags);
+       gic_map_to_pin(intr, gic_cpu_pin);
+-      gic_map_to_vpe(intr, vpe);
++      gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
+       for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
+               clear_bit(intr, pcpu_masks[i].pcpu_mask);
+       set_bit(intr, pcpu_masks[vpe].pcpu_mask);
+@@ -947,7 +947,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct 
device_node *node,
+       switch (bus_token) {
+       case DOMAIN_BUS_IPI:
+               is_ipi = d->bus_token == bus_token;
+-              return to_of_node(d->fwnode) == node && is_ipi;
++              return (!node || to_of_node(d->fwnode) == node) && is_ipi;
+               break;
+       default:
+               return 0;
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 41a1bfc5eaa7..3646aaf7188c 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -779,11 +779,31 @@ static const struct v4l2_dv_timings_cap 
adv76xx_timings_cap_digital = {
+                       V4L2_DV_BT_CAP_CUSTOM)
+ };
+ 
+-static inline const struct v4l2_dv_timings_cap *
+-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
++/*
++ * Return the DV timings capabilities for the requested sink pad. As a special
++ * case, pad value -1 returns the capabilities for the currently selected 
input.
++ */
++static const struct v4l2_dv_timings_cap *
++adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
+ {
+-      return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
+-                                    &adv7604_timings_cap_analog;
++      if (pad == -1) {
++              struct adv76xx_state *state = to_state(sd);
++
++              pad = state->selected_input;
++      }
++
++      switch (pad) {
++      case ADV76XX_PAD_HDMI_PORT_A:
++      case ADV7604_PAD_HDMI_PORT_B:
++      case ADV7604_PAD_HDMI_PORT_C:
++      case ADV7604_PAD_HDMI_PORT_D:
++              return &adv76xx_timings_cap_digital;
++
++      case ADV7604_PAD_VGA_RGB:
++      case ADV7604_PAD_VGA_COMP:
++      default:
++              return &adv7604_timings_cap_analog;
++      }
+ }
+ 
+ 
+@@ -1329,7 +1349,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
+               const struct v4l2_bt_timings *bt = 
&v4l2_dv_timings_presets[i].bt;
+ 
+               if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
+-                                         adv76xx_get_dv_timings_cap(sd),
++                                         adv76xx_get_dv_timings_cap(sd, -1),
+                                          adv76xx_check_dv_timings, NULL))
+                       continue;
+               if (vtotal(bt) != stdi->lcf + 1)
+@@ -1430,18 +1450,22 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev 
*sd,
+               return -EINVAL;
+ 
+       return v4l2_enum_dv_timings_cap(timings,
+-              adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
++              adv76xx_get_dv_timings_cap(sd, timings->pad),
++              adv76xx_check_dv_timings, NULL);
+ }
+ 
+ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
+                       struct v4l2_dv_timings_cap *cap)
+ {
+       struct adv76xx_state *state = to_state(sd);
++      unsigned int pad = cap->pad;
+ 
+       if (cap->pad >= state->source_pad)
+               return -EINVAL;
+ 
+-      *cap = *adv76xx_get_dv_timings_cap(sd);
++      *cap = *adv76xx_get_dv_timings_cap(sd, pad);
++      cap->pad = pad;
++
+       return 0;
+ }
+ 
+@@ -1450,9 +1474,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
+ static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
+               struct v4l2_dv_timings *timings)
+ {
+-      v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
+-                      is_digital_input(sd) ? 250000 : 1000000,
+-                      adv76xx_check_dv_timings, NULL);
++      v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
++                               is_digital_input(sd) ? 250000 : 1000000,
++                               adv76xx_check_dv_timings, NULL);
+ }
+ 
+ static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
+@@ -1620,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
+ 
+       bt = &timings->bt;
+ 
+-      if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
++      if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
+                                  adv76xx_check_dv_timings, NULL))
+               return -ERANGE;
+ 
+diff --git a/drivers/media/usb/airspy/airspy.c 
b/drivers/media/usb/airspy/airspy.c
+index 87c12930416f..92d9d4214c3a 100644
+--- a/drivers/media/usb/airspy/airspy.c
++++ b/drivers/media/usb/airspy/airspy.c
+@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
+       if (ret) {
+               dev_err(s->dev, "Failed to register as video device (%d)\n",
+                               ret);
+-              goto err_unregister_v4l2_dev;
++              goto err_free_controls;
+       }
+       dev_info(s->dev, "Registered as %s\n",
+                       video_device_node_name(&s->vdev));
+@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
+ 
+ err_free_controls:
+       v4l2_ctrl_handler_free(&s->hdl);
+-err_unregister_v4l2_dev:
+       v4l2_device_unregister(&s->v4l2_dev);
+ err_free_mem:
+       kfree(s);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index b0a27413cb13..185bd359ee6e 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -352,8 +352,10 @@ static struct mmc_blk_ioc_data 
*mmc_blk_ioctl_copy_from_user(
+               goto idata_err;
+       }
+ 
+-      if (!idata->buf_bytes)
++      if (!idata->buf_bytes) {
++              idata->buf = NULL;
+               return idata;
++      }
+ 
+       idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
+       if (!idata->buf) {
+@@ -1760,8 +1762,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
+ 
+       packed_cmd_hdr = packed->cmd_hdr;
+       memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+-      packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+-              (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
++      packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
++              (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
+       hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+ 
+       /*
+@@ -1775,14 +1777,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct 
mmc_queue_req *mqrq,
+                       ((brq->data.blocks * brq->data.blksz) >=
+                        card->ext_csd.data_tag_unit_size);
+               /* Argument of CMD23 */
+-              packed_cmd_hdr[(i * 2)] =
++              packed_cmd_hdr[(i * 2)] = cpu_to_le32(
+                       (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+                       (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+-                      blk_rq_sectors(prq);
++                      blk_rq_sectors(prq));
+               /* Argument of CMD18 or CMD25 */
+-              packed_cmd_hdr[((i * 2)) + 1] =
++              packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
+                       mmc_card_blockaddr(card) ?
+-                      blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
++                      blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
+               packed->blocks += blk_rq_sectors(prq);
+               i++;
+       }
+diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
+index 8b3275d7792a..8f5e93cb7975 100644
+--- a/drivers/net/can/at91_can.c
++++ b/drivers/net/can/at91_can.c
+@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
+ 
+       /* upper group completed, look again in lower */
+       if (priv->rx_next > get_mb_rx_low_last(priv) &&
+-          quota > 0 && mb > get_mb_rx_last(priv)) {
++          mb > get_mb_rx_last(priv)) {
+               priv->rx_next = get_mb_rx_first(priv);
+-              goto again;
++              if (quota > 0)
++                      goto again;
+       }
+ 
+       return received;
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index f91b094288da..e3dccd3200d5 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, 
int iface,
+ 
+       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
+ 
+-      for (i = 0; i < frame->can_dlc; i += 2) {
+-              priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+-                              frame->data[i] | (frame->data[i + 1] << 8));
++      if (priv->type == BOSCH_D_CAN) {
++              u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
++
++              for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
++                      data = (u32)frame->data[i];
++                      data |= (u32)frame->data[i + 1] << 8;
++                      data |= (u32)frame->data[i + 2] << 16;
++                      data |= (u32)frame->data[i + 3] << 24;
++                      priv->write_reg32(priv, dreg, data);
++              }
++      } else {
++              for (i = 0; i < frame->can_dlc; i += 2) {
++                      priv->write_reg(priv,
++                                      C_CAN_IFACE(DATA1_REG, iface) + i / 2,
++                                      frame->data[i] |
++                                      (frame->data[i + 1] << 8));
++              }
+       }
+ }
+ 
+@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, 
int iface, u32 ctrl)
+       } else {
+               int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
+ 
+-              for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
+-                      data = priv->read_reg(priv, dreg);
+-                      frame->data[i] = data;
+-                      frame->data[i + 1] = data >> 8;
++              if (priv->type == BOSCH_D_CAN) {
++                      for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
++                              data = priv->read_reg32(priv, dreg);
++                              frame->data[i] = data;
++                              frame->data[i + 1] = data >> 8;
++                              frame->data[i + 2] = data >> 16;
++                              frame->data[i + 3] = data >> 24;
++                      }
++              } else {
++                      for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
++                              data = priv->read_reg(priv, dreg);
++                              frame->data[i] = data;
++                              frame->data[i + 1] = data >> 8;
++                      }
+               }
+       }
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 910c12e2638e..ad535a854e5c 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr 
*data[])
+        * - control mode with CAN_CTRLMODE_FD set
+        */
+ 
++      if (!data)
++              return 0;
++
+       if (data[IFLA_CAN_CTRLMODE]) {
+               struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ 
+@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct 
net_device *dev,
+       return -EOPNOTSUPP;
+ }
+ 
++static void can_dellink(struct net_device *dev, struct list_head *head)
++{
++      return;
++}
++
+ static struct rtnl_link_ops can_link_ops __read_mostly = {
+       .kind           = "can",
+       .maxtype        = IFLA_CAN_MAX,
+@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = 
{
+       .validate       = can_validate,
+       .newlink        = can_newlink,
+       .changelink     = can_changelink,
++      .dellink        = can_dellink,
+       .get_size       = can_get_size,
+       .fill_info      = can_fill_info,
+       .get_xstats_size = can_get_xstats_size,
+diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
+index a6d26d351dfc..bfddcab82c29 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -244,7 +244,7 @@
+ /* Various constants */
+ 
+ /* Coalescing */
+-#define MVNETA_TXDONE_COAL_PKTS               1
++#define MVNETA_TXDONE_COAL_PKTS               0       /* interrupt per packet 
*/
+ #define MVNETA_RX_COAL_PKTS           32
+ #define MVNETA_RX_COAL_USEC           100
+ 
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c 
b/drivers/pinctrl/freescale/pinctrl-imx.c
+index 9cfa544072b5..cfde7bc551a1 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -209,9 +209,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, 
unsigned selector,
+               pin_reg = &info->pin_regs[pin_id];
+ 
+               if (pin_reg->mux_reg == -1) {
+-                      dev_err(ipctl->dev, "Pin(%s) does not support mux 
function\n",
++                      dev_dbg(ipctl->dev, "Pin(%s) does not support mux 
function\n",
+                               info->pins[pin_id].name);
+-                      return -EINVAL;
++                      continue;
+               }
+ 
+               if (info->flags & SHARE_MUX_CONF_REG) {
+diff --git a/drivers/pinctrl/pinctrl-single.c 
b/drivers/pinctrl/pinctrl-single.c
+index cf9bafa10acf..bfdf720db270 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1580,6 +1580,9 @@ static inline void pcs_irq_set(struct pcs_soc_data 
*pcs_soc,
+               else
+                       mask &= ~soc_mask;
+               pcs->write(mask, pcswi->reg);
++
++              /* flush posted write */
++              mask = pcs->read(pcswi->reg);
+               raw_spin_unlock(&pcs->lock);
+       }
+ 
+diff --git a/drivers/platform/chrome/cros_ec_dev.c 
b/drivers/platform/chrome/cros_ec_dev.c
+index d45cd254ed1c..2b331d5b9e79 100644
+--- a/drivers/platform/chrome/cros_ec_dev.c
++++ b/drivers/platform/chrome/cros_ec_dev.c
+@@ -147,13 +147,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, 
void __user *arg)
+               goto exit;
+       }
+ 
++      if (u_cmd.outsize != s_cmd->outsize ||
++          u_cmd.insize != s_cmd->insize) {
++              ret = -EINVAL;
++              goto exit;
++      }
++
+       s_cmd->command += ec->cmd_offset;
+       ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+       /* Only copy data to userland if data was received. */
+       if (ret < 0)
+               goto exit;
+ 
+-      if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
++      if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
+               ret = -EFAULT;
+ exit:
+       kfree(s_cmd);
+diff --git a/drivers/power/power_supply_core.c 
b/drivers/power/power_supply_core.c
+index 456987c88baa..b13cd074c52a 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct 
thermal_zone_device *tzd,
+ 
+       WARN_ON(tzd == NULL);
+       psy = tzd->devdata;
+-      ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
++      ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
++      if (ret)
++              return ret;
+ 
+       /* Convert tenths of degree Celsius to milli degree Celsius. */
+-      if (!ret)
+-              *temp = val.intval * 100;
++      *temp = val.intval * 100;
+ 
+       return ret;
+ }
+@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct 
thermal_cooling_device *tcd,
+       int ret;
+ 
+       psy = tcd->devdata;
+-      ret = psy->desc->get_property(psy,
+-              POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+-      if (!ret)
+-              *state = val.intval;
++      ret = power_supply_get_property(psy,
++                      POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
++      if (ret)
++              return ret;
++
++      *state = val.intval;
+ 
+       return ret;
+ }
+@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct 
thermal_cooling_device *tcd,
+       int ret;
+ 
+       psy = tcd->devdata;
+-      ret = psy->desc->get_property(psy,
+-              POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-      if (!ret)
+-              *state = val.intval;
++      ret = power_supply_get_property(psy,
++                      POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
++      if (ret)
++              return ret;
++
++      *state = val.intval;
+ 
+       return ret;
+ }
+diff --git a/drivers/pps/clients/pps_parport.c 
b/drivers/pps/clients/pps_parport.c
+index 38a8bbe74810..83797d89c30f 100644
+--- a/drivers/pps/clients/pps_parport.c
++++ b/drivers/pps/clients/pps_parport.c
+@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
+       struct pps_client_pp *device;
+ 
+       /* FIXME: oooh, this is ugly! */
+-      if (strcmp(pardev->name, KBUILD_MODNAME))
++      if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
+               /* not our port */
+               return;
+ 
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 80b1979e8d95..df036b872b05 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device 
*cgdev)
+               qeth_l2_set_offline(cgdev);
+ 
+       if (card->dev) {
++              netif_napi_del(&card->napi);
+               unregister_netdev(card->dev);
+               card->dev = NULL;
+       }
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index ac544330daeb..709b52339ff9 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device 
*cgdev)
+               qeth_l3_set_offline(cgdev);
+ 
+       if (card->dev) {
++              netif_napi_del(&card->napi);
+               unregister_netdev(card->dev);
+               card->dev = NULL;
+       }
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index d6a691e27d33..d6803a9e5ab8 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
+               ioa_cfg->intr_flag = IPR_USE_MSI;
+       else {
+               ioa_cfg->intr_flag = IPR_USE_LSI;
++              ioa_cfg->clear_isr = 1;
+               ioa_cfg->nvectors = 1;
+               dev_info(&pdev->dev, "Cannot enable MSI.\n");
+       }
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 5649c200d37c..a92a62dea793 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host 
*vha,
+       if (!vha->flags.online)
+               return;
+ 
+-      if (rsp->msix->cpuid != smp_processor_id()) {
++      if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
+               /* if kernel does not notify qla of IRQ's CPU change,
+                * then set it here.
+                */
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index ff41c310c900..eaccd651ccda 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -429,7 +429,7 @@ static struct scsi_dev_info_list 
*scsi_dev_info_list_find(const char *vendor,
+        * here, and we don't know what device it is
+        * trying to work with, leave it as-is.
+        */
+-      vmax = 8;       /* max length of vendor */
++      vmax = sizeof(devinfo->vendor);
+       vskip = vendor;
+       while (vmax > 0 && *vskip == ' ') {
+               vmax--;
+@@ -439,7 +439,7 @@ static struct scsi_dev_info_list 
*scsi_dev_info_list_find(const char *vendor,
+       while (vmax > 0 && vskip[vmax - 1] == ' ')
+               --vmax;
+ 
+-      mmax = 16;      /* max length of model */
++      mmax = sizeof(devinfo->model);
+       mskip = model;
+       while (mmax > 0 && *mskip == ' ') {
+               mmax--;
+@@ -455,10 +455,12 @@ static struct scsi_dev_info_list 
*scsi_dev_info_list_find(const char *vendor,
+                        * Behave like the older version of get_device_flags.
+                        */
+                       if (memcmp(devinfo->vendor, vskip, vmax) ||
+-                                      devinfo->vendor[vmax])
++                                      (vmax < sizeof(devinfo->vendor) &&
++                                              devinfo->vendor[vmax]))
+                               continue;
+                       if (memcmp(devinfo->model, mskip, mmax) ||
+-                                      devinfo->model[mmax])
++                                      (mmax < sizeof(devinfo->model) &&
++                                              devinfo->model[mmax]))
+                               continue;
+                       return devinfo;
+               } else {
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 6c6c0013ec7a..8048c2fedf5b 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
+               struct spi_device *spi,
+               struct spi_transfer *xfer)
+ {
+-      int ret = 1;
++      int ret = 0;
+       struct rockchip_spi *rs = spi_master_get_devdata(master);
+ 
+       WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
+                       spi_enable_chip(rs, 1);
+                       ret = rockchip_spi_prepare_dma(rs);
+               }
++              /* successful DMA prepare means the transfer is in progress */
++              ret = ret ? ret : 1;
+       } else {
+               spi_enable_chip(rs, 1);
+               ret = rockchip_spi_pio_transfer(rs);
+diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
+index 1ddd9e2309b6..cf007f3b83ec 100644
+--- a/drivers/spi/spi-sun4i.c
++++ b/drivers/spi/spi-sun4i.c
+@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master 
*master,
+ {
+       struct sun4i_spi *sspi = spi_master_get_devdata(master);
+       unsigned int mclk_rate, div, timeout;
++      unsigned int start, end, tx_time;
+       unsigned int tx_len = 0;
+       int ret = 0;
+       u32 reg;
+ 
+       /* We don't support transfer larger than the FIFO */
+       if (tfr->len > SUN4I_FIFO_DEPTH)
+-              return -EINVAL;
++              return -EMSGSIZE;
++
++      if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
++              return -EMSGSIZE;
+ 
+       reinit_completion(&sspi->done);
+       sspi->tx_buf = tfr->tx_buf;
+@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master 
*master,
+       sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+       sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+ 
+-      /* Fill the TX FIFO */
+-      sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
++      /*
++       * Fill the TX FIFO
++       * Filling the FIFO fully causes timeout for some reason
++       * at least on spi2 on A10s
++       */
++      sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
+ 
+       /* Enable the interrupts */
+       sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master 
*master,
+       reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+       sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ 
++      tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
++      start = jiffies;
+       timeout = wait_for_completion_timeout(&sspi->done,
+-                                            msecs_to_jiffies(1000));
++                                            msecs_to_jiffies(tx_time));
++      end = jiffies;
+       if (!timeout) {
++              dev_warn(&master->dev,
++                       "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
++                       dev_name(&spi->dev), tfr->len, tfr->speed_hz,
++                       jiffies_to_msecs(end - start), tx_time);
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index 42e2c4bd690a..7fce79a60608 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master 
*master,
+ {
+       struct sun6i_spi *sspi = spi_master_get_devdata(master);
+       unsigned int mclk_rate, div, timeout;
++      unsigned int start, end, tx_time;
+       unsigned int tx_len = 0;
+       int ret = 0;
+       u32 reg;
+@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master 
*master,
+       reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+       sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ 
++      tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
++      start = jiffies;
+       timeout = wait_for_completion_timeout(&sspi->done,
+-                                            msecs_to_jiffies(1000));
++                                            msecs_to_jiffies(tx_time));
++      end = jiffies;
+       if (!timeout) {
++              dev_warn(&master->dev,
++                       "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
++                       dev_name(&spi->dev), tfr->len, tfr->speed_hz,
++                       jiffies_to_msecs(end - start), tx_time);
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index e198996c5b83..9e45c5728d9b 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, 
struct tty_struct *tty)
+               fsi = tty->driver_data;
+       else
+               fsi = tty->link->driver_data;
+-      devpts_kill_index(fsi, tty->index);
+-      devpts_put_ref(fsi);
++
++      if (fsi) {
++              devpts_kill_index(fsi, tty->index);
++              devpts_put_ref(fsi);
++      }
+ }
+ 
+ static const struct tty_operations ptm_unix98_ops = {
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index d029bbe9eb36..641fed609911 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed 
*ed)
+ {
+       int     branch;
+ 
+-      ed->state = ED_OPER;
+       ed->ed_prev = NULL;
+       ed->ed_next = NULL;
+       ed->hwNextED = 0;
+@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed 
*ed)
+       /* the HC may not see the schedule updates yet, but if it does
+        * then they'll be properly ordered.
+        */
++
++      ed->state = ED_OPER;
+       return 0;
+ }
+ 
+diff --git a/drivers/xen/xen-pciback/conf_space.c 
b/drivers/xen/xen-pciback/conf_space.c
+index 8e67336f8ddd..6a25533da237 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, 
int size,
+               field_start = OFFSET(cfg_entry);
+               field_end = OFFSET(cfg_entry) + field->size;
+ 
+-              if ((req_start >= field_start && req_start < field_end)
+-                  || (req_end > field_start && req_end <= field_end)) {
++               if (req_end > field_start && field_end > req_start) {
+                       err = conf_space_read(dev, cfg_entry, field_start,
+                                             &tmp_val);
+                       if (err)
+@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int 
offset, int size, u32 value)
+               field_start = OFFSET(cfg_entry);
+               field_end = OFFSET(cfg_entry) + field->size;
+ 
+-              if ((req_start >= field_start && req_start < field_end)
+-                  || (req_end > field_start && req_end <= field_end)) {
++               if (req_end > field_start && field_end > req_start) {
+                       tmp_val = 0;
+ 
+                       err = xen_pcibk_config_read(dev, field_start,
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c 
b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index cacf30d14747..7487971f9f78 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
+                       rc = -ENOMEM;
+                       goto out;
+               }
++      } else {
++              list_for_each_entry(trans, &u->transactions, list)
++                      if (trans->handle.id == u->u.msg.tx_id)
++                              break;
++              if (&trans->list == &u->transactions)
++                      return -ESRCH;
+       }
+ 
+       reply = xenbus_dev_request_and_reply(&u->u.msg);
+       if (IS_ERR(reply)) {
+-              kfree(trans);
++              if (msg_type == XS_TRANSACTION_START)
++                      kfree(trans);
+               rc = PTR_ERR(reply);
+               goto out;
+       }
+@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
+                       list_add(&trans->list, &u->transactions);
+               }
+       } else if (u->u.msg.type == XS_TRANSACTION_END) {
+-              list_for_each_entry(trans, &u->transactions, list)
+-                      if (trans->handle.id == u->u.msg.tx_id)
+-                              break;
+-              BUG_ON(&trans->list == &u->transactions);
+               list_del(&trans->list);
+-
+               kfree(trans);
+       }
+ 
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index 374b12af8812..0bd3d47ad24d 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -249,9 +249,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+ 
+       mutex_unlock(&xs_state.request_mutex);
+ 
+-      if (IS_ERR(ret))
+-              return ret;
+-
+       if ((msg->type == XS_TRANSACTION_END) ||
+           ((req_msg.type == XS_TRANSACTION_START) &&
+            (msg->type == XS_ERROR)))
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index b84c291ba1eb..d7b78d531e63 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
+                                       v9fs_proto_dotu(v9ses));
+       fid = file->private_data;
+       if (!fid) {
+-              fid = v9fs_fid_clone(file->f_path.dentry);
++              fid = v9fs_fid_clone(file_dentry(file));
+               if (IS_ERR(fid))
+                       return PTR_ERR(fid);
+ 
+@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
+                * because we want write after unlink usecase
+                * to work.
+                */
+-              fid = v9fs_writeback_fid(file->f_path.dentry);
++              fid = v9fs_writeback_fid(file_dentry(file));
+               if (IS_ERR(fid)) {
+                       err = PTR_ERR(fid);
+                       mutex_unlock(&v9inode->v_mutex);
+@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct 
vm_area_struct *vma)
+                * because we want write after unlink usecase
+                * to work.
+                */
+-              fid = v9fs_writeback_fid(filp->f_path.dentry);
++              fid = v9fs_writeback_fid(file_dentry(filp));
+               if (IS_ERR(fid)) {
+                       retval = PTR_ERR(fid);
+                       mutex_unlock(&v9inode->v_mutex);
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index f02404052b7b..27794b137b24 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -169,6 +169,19 @@ out:
+       return rc;
+ }
+ 
++static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++      struct file *lower_file = ecryptfs_file_to_lower(file);
++      /*
++       * Don't allow mmap on top of file systems that don't support it
++       * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
++       * allows recursive mounting, this will need to be extended.
++       */
++      if (!lower_file->f_op->mmap)
++              return -ENODEV;
++      return generic_file_mmap(file, vma);
++}
++
+ /**
+  * ecryptfs_open
+  * @inode: inode speciying file to open
+@@ -403,7 +416,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+       .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+-      .mmap = generic_file_mmap,
++      .mmap = ecryptfs_mmap,
+       .open = ecryptfs_open,
+       .flush = ecryptfs_flush,
+       .release = ecryptfs_release,
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index e818f5ac7a26..866bb18efefe 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -25,7 +25,6 @@
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/mount.h>
+-#include <linux/file.h>
+ #include "ecryptfs_kernel.h"
+ 
+ struct ecryptfs_open_req {
+@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+       flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
+       (*lower_file) = dentry_open(&req.path, flags, cred);
+       if (!IS_ERR(*lower_file))
+-              goto have_file;
++              goto out;
+       if ((flags & O_ACCMODE) == O_RDONLY) {
+               rc = PTR_ERR((*lower_file));
+               goto out;
+@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
+       mutex_unlock(&ecryptfs_kthread_ctl.mux);
+       wake_up(&ecryptfs_kthread_ctl.wait);
+       wait_for_completion(&req.done);
+-      if (IS_ERR(*lower_file)) {
++      if (IS_ERR(*lower_file))
+               rc = PTR_ERR(*lower_file);
+-              goto out;
+-      }
+-have_file:
+-      if ((*lower_file)->f_op->mmap == NULL) {
+-              fput(*lower_file);
+-              *lower_file = NULL;
+-              rc = -EMEDIUMTYPE;
+-      }
+ out:
+       return rc;
+ }
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 95bf4679ac54..de692b91c166 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -469,6 +469,10 @@ static int __ext4_ext_check(const char *function, 
unsigned int line,
+               error_msg = "invalid extent entries";
+               goto corrupted;
+       }
++      if (unlikely(depth > 32)) {
++              error_msg = "too large eh_depth";
++              goto corrupted;
++      }
+       /* Verify checksum on non-root extent tree nodes */
+       if (ext_depth(inode) != depth &&
+           !ext4_extent_block_csum_verify(inode, eh)) {
+diff --git a/fs/inode.c b/fs/inode.c
+index 69b8b526c194..721fa18ead59 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1739,8 +1739,8 @@ static int __remove_privs(struct dentry *dentry, int 
kill)
+  */
+ int file_remove_privs(struct file *file)
+ {
+-      struct dentry *dentry = file->f_path.dentry;
+-      struct inode *inode = d_inode(dentry);
++      struct dentry *dentry = file_dentry(file);
++      struct inode *inode = file_inode(file);
+       int kill;
+       int error = 0;
+ 
+@@ -1748,7 +1748,7 @@ int file_remove_privs(struct file *file)
+       if (IS_NOSEC(inode))
+               return 0;
+ 
+-      kill = file_needs_remove_privs(file);
++      kill = dentry_needs_remove_privs(dentry);
+       if (kill < 0)
+               return kill;
+       if (kill)
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 116a333e9c77..0f56deb24ce6 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, 
void __user *arg)
+               goto out;
+       }
+ 
++      same->dest_count = count;
+       ret = vfs_dedupe_file_range(file, same);
+       if (ret)
+               goto out;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 154a107cd376..fc4084ef4736 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = {
+ };
+ #endif
+ 
+-static void lockd_svc_exit_thread(void)
++static void lockd_unregister_notifiers(void)
+ {
+       unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
+ #if IS_ENABLED(CONFIG_IPV6)
+       unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
+ #endif
++}
++
++static void lockd_svc_exit_thread(void)
++{
++      lockd_unregister_notifiers();
+       svc_exit_thread(nlmsvc_rqst);
+ }
+ 
+@@ -462,7 +467,7 @@ int lockd_up(struct net *net)
+        * Note: svc_serv structures have an initial use count of 1,
+        * so we exit through here on both success and failure.
+        */
+-err_net:
++err_put:
+       svc_destroy(serv);
+ err_create:
+       mutex_unlock(&nlmsvc_mutex);
+@@ -470,7 +475,9 @@ err_create:
+ 
+ err_start:
+       lockd_down_net(serv, net);
+-      goto err_net;
++err_net:
++      lockd_unregister_notifiers();
++      goto err_put;
+ }
+ EXPORT_SYMBOL_GPL(lockd_up);
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 7c5f91be9b65..ee1b15f6fc13 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct 
file_lock **flp, void **pr
+ {
+       struct file_lock *fl, *my_fl = NULL, *lease;
+       struct dentry *dentry = filp->f_path.dentry;
+-      struct inode *inode = dentry->d_inode;
++      struct inode *inode = file_inode(filp);
+       struct file_lock_context *ctx;
+       bool is_deleg = (*flp)->fl_flags & FL_DELEG;
+       int error;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 783004af5707..419f746d851d 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
+               goto out_unlock;
+ 
+       lock_mount_hash();
++      event++;
+       while (!hlist_empty(&mp->m_list)) {
+               mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+               if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 69bd801afb53..37e49cb2ac4c 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+       if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
+               return 0;
+       bytes = le16_to_cpu(sbp->s_bytes);
+-      if (bytes > BLOCK_SIZE)
++      if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
+               return 0;
+       crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
+                      sumoff);
+diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
+index 339125bb4d2c..2a9f10c8af1b 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -540,15 +540,19 @@
+ 
+ #define INIT_TEXT                                                     \
+       *(.init.text)                                                   \
++      *(.text.startup)                                                \
+       MEM_DISCARD(init.text)
+ 
+ #define EXIT_DATA                                                     \
+       *(.exit.data)                                                   \
++      *(.fini_array)                                                  \
++      *(.dtors)                                                       \
+       MEM_DISCARD(exit.data)                                          \
+       MEM_DISCARD(exit.rodata)
+ 
+ #define EXIT_TEXT                                                     \
+       *(.exit.text)                                                   \
++      *(.text.exit)                                                   \
+       MEM_DISCARD(exit.text)
+ 
+ #define EXIT_CALL                                                     \
+diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
+index 0367c63f5960..5c37b58445c6 100644
+--- a/include/linux/bcma/bcma.h
++++ b/include/linux/bcma/bcma.h
+@@ -158,6 +158,7 @@ struct bcma_host_ops {
+ #define BCMA_CORE_DEFAULT             0xFFF
+ 
+ #define BCMA_MAX_NR_CORES             16
++#define BCMA_CORE_SIZE                        0x1000
+ 
+ /* Chip IDs of PCIe devices */
+ #define BCMA_CHIP_ID_BCM4313  0x4313
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 1191d79aa495..4d061681dbf1 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -97,6 +97,11 @@ enum mem_cgroup_events_target {
+ #define MEM_CGROUP_ID_SHIFT   16
+ #define MEM_CGROUP_ID_MAX     USHRT_MAX
+ 
++struct mem_cgroup_id {
++      int id;
++      atomic_t ref;
++};
++
+ struct mem_cgroup_stat_cpu {
+       long count[MEMCG_NR_STAT];
+       unsigned long events[MEMCG_NR_EVENTS];
+@@ -172,6 +177,9 @@ enum memcg_kmem_state {
+ struct mem_cgroup {
+       struct cgroup_subsys_state css;
+ 
++      /* Private memcg ID. Used to ID objects that outlive the cgroup */
++      struct mem_cgroup_id id;
++
+       /* Accounted resources */
+       struct page_counter memory;
+       struct page_counter swap;
+@@ -330,22 +338,9 @@ static inline unsigned short mem_cgroup_id(struct 
mem_cgroup *memcg)
+       if (mem_cgroup_disabled())
+               return 0;
+ 
+-      return memcg->css.id;
+-}
+-
+-/**
+- * mem_cgroup_from_id - look up a memcg from an id
+- * @id: the id to look up
+- *
+- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
+- */
+-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+-{
+-      struct cgroup_subsys_state *css;
+-
+-      css = css_from_id(id, &memory_cgrp_subsys);
+-      return mem_cgroup_from_css(css);
++      return memcg->id.id;
+ }
++struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+ 
+ /**
+  * parent_mem_cgroup - find the accounting parent of a memcg
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 51a97ac8bfbf..495a09bfeb94 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -399,6 +399,7 @@ static inline __must_check
+ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+ {
+       iter->next_index = iter->index;
++      iter->tags = 0;
+       return NULL;
+ }
+ 
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 49eb4f8ebac9..2b0fad83683f 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
+ /*
+  * rmap interfaces called when adding or removing pte of page
+  */
+-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned 
long);
++void page_move_anon_rmap(struct page *, struct vm_area_struct *);
+ void page_add_anon_rmap(struct page *, struct vm_area_struct *,
+               unsigned long, bool);
+ void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
+diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
+index 813ffb2e22c9..1c589b62c244 100644
+--- a/include/uapi/linux/Kbuild
++++ b/include/uapi/linux/Kbuild
+@@ -244,6 +244,7 @@ endif
+ header-y += hw_breakpoint.h
+ header-y += l2tp.h
+ header-y += libc-compat.h
++header-y += lirc.h
+ header-y += limits.h
+ header-y += llc.h
+ header-y += loop.h
+diff --git a/init/Kconfig b/init/Kconfig
+index 0dfd09d54c65..d895c7a183c6 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1423,6 +1423,7 @@ config KALLSYMS_ALL
+ 
+ config KALLSYMS_ABSOLUTE_PERCPU
+       bool
++      depends on KALLSYMS
+       default X86_64 && SMP
+ 
+ config KALLSYMS_BASE_RELATIVE
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 86cb5c6e8932..75c0ff00aca6 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -837,6 +837,8 @@ static void put_css_set_locked(struct css_set *cset)
+ 
+ static void put_css_set(struct css_set *cset)
+ {
++      unsigned long flags;
++
+       /*
+        * Ensure that the refcount doesn't hit zero while any readers
+        * can see it. Similar to atomic_dec_and_lock(), but for an
+@@ -845,9 +847,9 @@ static void put_css_set(struct css_set *cset)
+       if (atomic_add_unless(&cset->refcount, -1, 1))
+               return;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irqsave(&css_set_lock, flags);
+       put_css_set_locked(cset);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irqrestore(&css_set_lock, flags);
+ }
+ 
+ /*
+@@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(struct css_set 
*old_cset,
+ 
+       /* First see if we already have a cgroup group that matches
+        * the desired set */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       cset = find_existing_css_set(old_cset, cgrp, template);
+       if (cset)
+               get_css_set(cset);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       if (cset)
+               return cset;
+@@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(struct css_set 
*old_cset,
+        * find_existing_css_set() */
+       memcpy(cset->subsys, template, sizeof(cset->subsys));
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       /* Add reference counts and links from the new css_set. */
+       list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
+               struct cgroup *c = link->cgrp;
+@@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(struct css_set 
*old_cset,
+               css_get(css);
+       }
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       return cset;
+ }
+@@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+        * Release all the links from cset_links to this hierarchy's
+        * root cgroup
+        */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
+               list_del(&link->cset_link);
+@@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
+               kfree(link);
+       }
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       if (!list_empty(&root->root_list)) {
+               list_del(&root->root_list);
+@@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgroup_root 
*dst_root, u16 ss_mask)
+               ss->root = dst_root;
+               css->cgroup = dcgrp;
+ 
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               hash_for_each(css_set_table, i, cset, hlist)
+                       list_move_tail(&cset->e_cset_node[ss->id],
+                                      &dcgrp->e_csets[ss->id]);
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+ 
+               /* default hierarchy doesn't enable controllers by default */
+               dst_root->subsys_mask |= 1 << ssid;
+@@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_file *sf, 
struct kernfs_node *kf_node,
+       if (!buf)
+               return -ENOMEM;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
+       len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       if (len >= PATH_MAX)
+               len = -ERANGE;
+@@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists(void)
+ {
+       struct task_struct *p, *g;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       if (use_task_css_set_links)
+               goto out_unlock;
+@@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists(void)
+                * entry won't be deleted though the process has exited.
+                * Do it while holding siglock so that we don't end up
+                * racing against cgroup_exit().
++               *
++               * Interrupts were already disabled while acquiring
++               * the css_set_lock, so we do not need to disable it
++               * again when acquiring the sighand->siglock here.
+                */
+-              spin_lock_irq(&p->sighand->siglock);
++              spin_lock(&p->sighand->siglock);
+               if (!(p->flags & PF_EXITING)) {
+                       struct css_set *cset = task_css_set(p);
+ 
+@@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists(void)
+                       list_add_tail(&p->cg_list, &cset->tasks);
+                       get_css_set(cset);
+               }
+-              spin_unlock_irq(&p->sighand->siglock);
++              spin_unlock(&p->sighand->siglock);
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+ out_unlock:
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ }
+ 
+ static void init_cgroup_housekeeping(struct cgroup *cgrp)
+@@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgroup_root *root, 
u16 ss_mask)
+        * Link the root cgroup in this hierarchy into all the css_set
+        * objects.
+        */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       hash_for_each(css_set_table, i, cset, hlist) {
+               link_css_set(&tmp_links, cset, root_cgrp);
+               if (css_set_populated(cset))
+                       cgroup_update_populated(root_cgrp, true);
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       BUG_ON(!list_empty(&root_cgrp->self.children));
+       BUG_ON(atomic_read(&root->nr_cgrps) != 1);
+@@ -2256,11 +2262,11 @@ out_mount:
+               struct cgroup *cgrp;
+ 
+               mutex_lock(&cgroup_mutex);
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+ 
+               cgrp = cset_cgroup_from_root(ns->root_cset, root);
+ 
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+               mutex_unlock(&cgroup_mutex);
+ 
+               nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
+@@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, 
size_t buflen,
+       char *ret;
+ 
+       mutex_lock(&cgroup_mutex);
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       mutex_unlock(&cgroup_mutex);
+ 
+       return ret;
+@@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struct *task, char 
*buf, size_t buflen)
+       char *path = NULL;
+ 
+       mutex_lock(&cgroup_mutex);
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
+ 
+@@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struct *task, char 
*buf, size_t buflen)
+                       path = buf;
+       }
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       mutex_unlock(&cgroup_mutex);
+       return path;
+ }
+@@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset 
*tset,
+        * the new cgroup.  There are no failure cases after here, so this
+        * is the commit point.
+        */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(cset, &tset->src_csets, mg_node) {
+               list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, 
cg_list) {
+                       struct css_set *from_cset = task_css_set(task);
+@@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset 
*tset,
+                       put_css_set_locked(from_cset);
+               }
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       /*
+        * Migration is committed, all target tasks are now on dst_csets.
+@@ -2597,13 +2603,13 @@ out_cancel_attach:
+               }
+       } while_each_subsys_mask();
+ out_release_tset:
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_splice_init(&tset->dst_csets, &tset->src_csets);
+       list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
+               list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
+               list_del_init(&cset->mg_node);
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       return ret;
+ }
+ 
+@@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct list_head 
*preloaded_csets)
+ 
+       lockdep_assert_held(&cgroup_mutex);
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, 
mg_preload_node) {
+               cset->mg_src_cgrp = NULL;
+               cset->mg_dst_cgrp = NULL;
+@@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct list_head 
*preloaded_csets)
+               list_del_init(&cset->mg_preload_node);
+               put_css_set_locked(cset);
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ }
+ 
+ /**
+@@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_struct *leader, 
bool threadgroup,
+        * already PF_EXITING could be freed from underneath us unless we
+        * take an rcu_read_lock.
+        */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       rcu_read_lock();
+       task = leader;
+       do {
+@@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_struct *leader, 
bool threadgroup,
+                       break;
+       } while_each_thread(leader, task);
+       rcu_read_unlock();
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       return cgroup_taskset_migrate(&tset, root);
+ }
+@@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
+               return -EBUSY;
+ 
+       /* look up all src csets */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       rcu_read_lock();
+       task = leader;
+       do {
+@@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
+                       break;
+       } while_each_thread(leader, task);
+       rcu_read_unlock();
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       /* prepare dst csets and commit */
+       ret = cgroup_migrate_prepare_dst(&preloaded_csets);
+@@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission(struct 
task_struct *task,
+               struct cgroup *cgrp;
+               struct inode *inode;
+ 
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+ 
+               while (!cgroup_is_descendant(dst_cgrp, cgrp))
+                       cgrp = cgroup_parent(cgrp);
+@@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_struct *from, 
struct task_struct *tsk)
+               if (root == &cgrp_dfl_root)
+                       continue;
+ 
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               from_cgrp = task_cgroup_from_root(from, root);
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+ 
+               retval = cgroup_attach_task(from_cgrp, tsk, false);
+               if (retval)
+@@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+       percpu_down_write(&cgroup_threadgroup_rwsem);
+ 
+       /* look up all csses currently attached to @cgrp's subtree */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
+               struct cgrp_cset_link *link;
+ 
+@@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+                       cgroup_migrate_add_src(link->cset, dsct,
+                                              &preloaded_csets);
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       /* NULL dst indicates self on default hierarchy */
+       ret = cgroup_migrate_prepare_dst(&preloaded_csets);
+       if (ret)
+               goto out_finish;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
+               struct task_struct *task, *ntask;
+ 
+@@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
+               list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
+                       cgroup_taskset_add(task, &tset);
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       ret = cgroup_taskset_migrate(&tset, cgrp->root);
+ out_finish:
+@@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
+       int count = 0;
+       struct cgrp_cset_link *link;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(link, &cgrp->cset_links, cset_link)
+               count += atomic_read(&link->cset->refcount);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       return count;
+ }
+ 
+@@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
+ 
+       memset(it, 0, sizeof(*it));
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       it->ss = css->ss;
+ 
+@@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
+ 
+       css_task_iter_advance_css_set(it);
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ }
+ 
+ /**
+@@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(struct 
css_task_iter *it)
+               it->cur_task = NULL;
+       }
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       if (it->task_pos) {
+               it->cur_task = list_entry(it->task_pos, struct task_struct,
+@@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(struct 
css_task_iter *it)
+               css_task_iter_advance(it);
+       }
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       return it->cur_task;
+ }
+@@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(struct 
css_task_iter *it)
+ void css_task_iter_end(struct css_task_iter *it)
+ {
+       if (it->cur_cset) {
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               list_del(&it->iters_node);
+               put_css_set_locked(it->cur_cset);
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+       }
+ 
+       if (it->cur_task)
+@@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct 
cgroup *from)
+       mutex_lock(&cgroup_mutex);
+ 
+       /* all tasks in @from are being moved, all csets are source */
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(link, &from->cset_links, cset_link)
+               cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       ret = cgroup_migrate_prepare_dst(&preloaded_csets);
+       if (ret)
+@@ -5063,6 +5069,7 @@ static void init_and_link_css(struct cgroup_subsys_state 
*css,
+       memset(css, 0, sizeof(*css));
+       css->cgroup = cgrp;
+       css->ss = ss;
++      css->id = -1;
+       INIT_LIST_HEAD(&css->sibling);
+       INIT_LIST_HEAD(&css->children);
+       css->serial_nr = css_serial_nr_next++;
+@@ -5150,7 +5157,7 @@ static struct cgroup_subsys_state *css_create(struct 
cgroup *cgrp,
+ 
+       err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
+       if (err < 0)
+-              goto err_free_percpu_ref;
++              goto err_free_css;
+       css->id = err;
+ 
+       /* @css is ready to be brought online now, make it visible */
+@@ -5174,9 +5181,6 @@ static struct cgroup_subsys_state *css_create(struct 
cgroup *cgrp,
+ 
+ err_list_del:
+       list_del_rcu(&css->sibling);
+-      cgroup_idr_remove(&ss->css_idr, css->id);
+-err_free_percpu_ref:
+-      percpu_ref_exit(&css->refcnt);
+ err_free_css:
+       call_rcu(&css->rcu_head, css_free_rcu_fn);
+       return ERR_PTR(err);
+@@ -5451,10 +5455,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+        */
+       cgrp->self.flags &= ~CSS_ONLINE;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(link, &cgrp->cset_links, cset_link)
+               link->cset->dead = true;
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+ 
+       /* initiate massacre of all css's */
+       for_each_css(css, ssid, cgrp)
+@@ -5725,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, struct 
pid_namespace *ns,
+               goto out;
+ 
+       mutex_lock(&cgroup_mutex);
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       for_each_root(root) {
+               struct cgroup_subsys *ss;
+@@ -5778,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, struct 
pid_namespace *ns,
+ 
+       retval = 0;
+ out_unlock:
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       mutex_unlock(&cgroup_mutex);
+       kfree(buf);
+ out:
+@@ -5923,13 +5927,13 @@ void cgroup_post_fork(struct task_struct *child)
+       if (use_task_css_set_links) {
+               struct css_set *cset;
+ 
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               cset = task_css_set(current);
+               if (list_empty(&child->cg_list)) {
+                       get_css_set(cset);
+                       css_set_move_task(child, NULL, cset, false);
+               }
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+       }
+ 
+       /*
+@@ -5974,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk)
+       cset = task_css_set(tsk);
+ 
+       if (!list_empty(&tsk->cg_list)) {
+-              spin_lock_bh(&css_set_lock);
++              spin_lock_irq(&css_set_lock);
+               css_set_move_task(tsk, cset, NULL, false);
+-              spin_unlock_bh(&css_set_lock);
++              spin_unlock_irq(&css_set_lock);
+       } else {
+               get_css_set(cset);
+       }
+@@ -6044,9 +6048,9 @@ static void cgroup_release_agent(struct work_struct 
*work)
+       if (!pathbuf || !agentbuf)
+               goto out;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       if (!path)
+               goto out;
+ 
+@@ -6306,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long 
flags,
+               return ERR_PTR(-EPERM);
+ 
+       mutex_lock(&cgroup_mutex);
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+ 
+       cset = task_css_set(current);
+       get_css_set(cset);
+ 
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       mutex_unlock(&cgroup_mutex);
+ 
+       new_ns = alloc_cgroup_ns();
+@@ -6435,7 +6439,7 @@ static int current_css_set_cg_links_read(struct seq_file 
*seq, void *v)
+       if (!name_buf)
+               return -ENOMEM;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       rcu_read_lock();
+       cset = rcu_dereference(current->cgroups);
+       list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
+@@ -6446,7 +6450,7 @@ static int current_css_set_cg_links_read(struct seq_file 
*seq, void *v)
+                          c->root->hierarchy_id, name_buf);
+       }
+       rcu_read_unlock();
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       kfree(name_buf);
+       return 0;
+ }
+@@ -6457,7 +6461,7 @@ static int cgroup_css_links_read(struct seq_file *seq, 
void *v)
+       struct cgroup_subsys_state *css = seq_css(seq);
+       struct cgrp_cset_link *link;
+ 
+-      spin_lock_bh(&css_set_lock);
++      spin_lock_irq(&css_set_lock);
+       list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
+               struct css_set *cset = link->cset;
+               struct task_struct *task;
+@@ -6480,7 +6484,7 @@ static int cgroup_css_links_read(struct seq_file *seq, 
void *v)
+       overflow:
+               seq_puts(seq, "  ...\n");
+       }
+-      spin_unlock_bh(&css_set_lock);
++      spin_unlock_irq(&css_set_lock);
+       return 0;
+ }
+ 
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 3e3f6e49eabb..e7cea29bb9e2 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1218,6 +1218,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
+               .teardown               = takedown_cpu,
+               .cant_stop              = true,
+       },
++#else
++      [CPUHP_BRINGUP_CPU] = { },
+ #endif
+ };
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 11546a6ed5df..65716cef21f4 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4993,14 +4993,16 @@ void show_state_filter(unsigned long state_filter)
+               /*
+                * reset the NMI-timeout, listing all files on a slow
+                * console might take a lot of time:
++               * Also, reset softlockup watchdogs on all CPUs, because
++               * another CPU might be blocked waiting for us to process
++               * an IPI.
+                */
+               touch_nmi_watchdog();
++              touch_all_softlockup_watchdogs();
+               if (!state_filter || (p->state & state_filter))
+                       sched_show_task(p);
+       }
+ 
+-      touch_all_softlockup_watchdogs();
+-
+ #ifdef CONFIG_SCHED_DEBUG
+       sysrq_sched_debug_show();
+ #endif
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index eeaf920f46b9..0858959cf747 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -687,8 +687,6 @@ void init_entity_runnable_average(struct sched_entity *se)
+       /* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
+ }
+ 
+-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
+-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
+ #else
+ void init_entity_runnable_average(struct sched_entity *se)
+ {
+@@ -3194,7 +3192,7 @@ static inline void check_schedstat_required(void)
+                       trace_sched_stat_iowait_enabled()  ||
+                       trace_sched_stat_blocked_enabled() ||
+                       trace_sched_stat_runtime_enabled())  {
+-              pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
++              printk_deferred_once("Scheduler tracepoints stat_sleep, 
stat_iowait, "
+                            "stat_blocked and stat_runtime require the "
+                            "kernel parameter schedstats=enabled or "
+                            "kernel.sched_schedstats=1\n");
+@@ -4822,19 +4820,24 @@ static long effective_load(struct task_group *tg, int 
cpu, long wl, long wg)
+               return wl;
+ 
+       for_each_sched_entity(se) {
+-              long w, W;
++              struct cfs_rq *cfs_rq = se->my_q;
++              long W, w = cfs_rq_load_avg(cfs_rq);
+ 
+-              tg = se->my_q->tg;
++              tg = cfs_rq->tg;
+ 
+               /*
+                * W = @wg + \Sum rw_j
+                */
+-              W = wg + calc_tg_weight(tg, se->my_q);
++              W = wg + atomic_long_read(&tg->load_avg);
++
++              /* Ensure \Sum rw_j >= rw_i */
++              W -= cfs_rq->tg_load_avg_contrib;
++              W += w;
+ 
+               /*
+                * w = rw_i + @wl
+                */
+-              w = cfs_rq_load_avg(se->my_q) + wl;
++              w += wl;
+ 
+               /*
+                * wl = S * s'_i; see (2)
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 1cafba860b08..39008d78927a 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -777,6 +777,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, 
struct itimerspec *itp)
+                       timer->it.cpu.expires = 0;
+                       sample_to_timespec(timer->it_clock, 
timer->it.cpu.expires,
+                                          &itp->it_value);
++                      return;
+               } else {
+                       cpu_timer_sample_group(timer->it_clock, p, &now);
+                       unlock_task_sighand(p, &flags);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index f8e925eb479b..c59a3c480f83 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -436,25 +436,23 @@ static unsigned long isolate_freepages_block(struct 
compact_control *cc,
+ 
+               /* Found a free page, break it into order-0 pages */
+               isolated = split_free_page(page);
++              if (!isolated)
++                      break;
++
+               total_isolated += isolated;
++              cc->nr_freepages += isolated;
+               for (i = 0; i < isolated; i++) {
+                       list_add(&page->lru, freelist);
+                       page++;
+               }
+-
+-              /* If a page was split, advance to the end of it */
+-              if (isolated) {
+-                      cc->nr_freepages += isolated;
+-                      if (!strict &&
+-                              cc->nr_migratepages <= cc->nr_freepages) {
+-                              blockpfn += isolated;
+-                              break;
+-                      }
+-
+-                      blockpfn += isolated - 1;
+-                      cursor += isolated - 1;
+-                      continue;
++              if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
++                      blockpfn += isolated;
++                      break;
+               }
++              /* Advance to the end of split page */
++              blockpfn += isolated - 1;
++              cursor += isolated - 1;
++              continue;
+ 
+ isolate_fail:
+               if (strict)
+@@ -464,6 +462,9 @@ isolate_fail:
+ 
+       }
+ 
++      if (locked)
++              spin_unlock_irqrestore(&cc->zone->lock, flags);
++
+       /*
+        * There is a tiny chance that we have read bogus compound_order(),
+        * so be careful to not go outside of the pageblock.
+@@ -485,9 +486,6 @@ isolate_fail:
+       if (strict && blockpfn < end_pfn)
+               total_isolated = 0;
+ 
+-      if (locked)
+-              spin_unlock_irqrestore(&cc->zone->lock, flags);
+-
+       /* Update the pageblock-skip if the whole pageblock was scanned */
+       if (blockpfn == end_pfn)
+               update_pageblock_skip(cc, valid_page, total_isolated, false);
+@@ -938,7 +936,6 @@ static void isolate_freepages(struct compact_control *cc)
+                               block_end_pfn = block_start_pfn,
+                               block_start_pfn -= pageblock_nr_pages,
+                               isolate_start_pfn = block_start_pfn) {
+-
+               /*
+                * This can iterate a massively long zone without finding any
+                * suitable migration targets, so periodically check if we need
+@@ -962,32 +959,30 @@ static void isolate_freepages(struct compact_control *cc)
+                       continue;
+ 
+               /* Found a block suitable for isolating free pages from. */
+-              isolate_freepages_block(cc, &isolate_start_pfn,
+-                                      block_end_pfn, freelist, false);
++              isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
++                                      freelist, false);
+ 
+               /*
+-               * If we isolated enough freepages, or aborted due to async
+-               * compaction being contended, terminate the loop.
+-               * Remember where the free scanner should restart next time,
+-               * which is where isolate_freepages_block() left off.
+-               * But if it scanned the whole pageblock, isolate_start_pfn
+-               * now points at block_end_pfn, which is the start of the next
+-               * pageblock.
+-               * In that case we will however want to restart at the start
+-               * of the previous pageblock.
++               * If we isolated enough freepages, or aborted due to lock
++               * contention, terminate.
+                */
+               if ((cc->nr_freepages >= cc->nr_migratepages)
+                                                       || cc->contended) {
+-                      if (isolate_start_pfn >= block_end_pfn)
++                      if (isolate_start_pfn >= block_end_pfn) {
++                              /*
++                               * Restart at previous pageblock if more
++                               * freepages can be isolated next time.
++                               */
+                               isolate_start_pfn =
+                                       block_start_pfn - pageblock_nr_pages;
++                      }
+                       break;
+-              } else {
++              } else if (isolate_start_pfn < block_end_pfn) {
+                       /*
+-                       * isolate_freepages_block() should not terminate
+-                       * prematurely unless contended, or isolated enough
++                       * If isolation failed early, do not continue
++                       * needlessly.
+                        */
+-                      VM_BUG_ON(isolate_start_pfn < block_end_pfn);
++                      break;
+               }
+       }
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 19d0d08b396f..6b5058f9a1e3 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3328,7 +3328,7 @@ retry_avoidcopy:
+       /* If no-one else is actually using this page, avoid the copy
+        * and just make the page writable */
+       if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
+-              page_move_anon_rmap(old_page, vma, address);
++              page_move_anon_rmap(old_page, vma);
+               set_huge_ptep_writable(vma, address, ptep);
+               return 0;
+       }
+diff --git a/mm/internal.h b/mm/internal.h
+index b79abb6721cf..bb309ad2b5e3 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -24,7 +24,8 @@
+  */
+ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+                       __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
+-                      __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
++                      __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
++                      __GFP_ATOMIC)
+ 
+ /* The GFP flags allowed during early boot */
+ #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index a2e79b83920f..bf860dbdd26e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4038,6 +4038,60 @@ static struct cftype mem_cgroup_legacy_files[] = {
+       { },    /* terminate */
+ };
+ 
++/*
++ * Private memory cgroup IDR
++ *
++ * Swap-out records and page cache shadow entries need to store memcg
++ * references in constrained space, so we maintain an ID space that is
++ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
++ * memory-controlled cgroups to 64k.
++ *
++ * However, there usually are many references to the oflline CSS after
++ * the cgroup has been destroyed, such as page cache or reclaimable
++ * slab objects, that don't need to hang on to the ID. We want to keep
++ * those dead CSS from occupying IDs, or we might quickly exhaust the
++ * relatively small ID space and prevent the creation of new cgroups
++ * even when there are much fewer than 64k cgroups - possibly none.
++ *
++ * Maintain a private 16-bit ID space for memcg, and allow the ID to
++ * be freed and recycled when it's no longer needed, which is usually
++ * when the CSS is offlined.
++ *
++ * The only exception to that are records of swapped out tmpfs/shmem
++ * pages that need to be attributed to live ancestors on swapin. But
++ * those references are manageable from userspace.
++ */
++
++static DEFINE_IDR(mem_cgroup_idr);
++
++static void mem_cgroup_id_get(struct mem_cgroup *memcg)
++{
++      atomic_inc(&memcg->id.ref);
++}
++
++static void mem_cgroup_id_put(struct mem_cgroup *memcg)
++{
++      if (atomic_dec_and_test(&memcg->id.ref)) {
++              idr_remove(&mem_cgroup_idr, memcg->id.id);
++              memcg->id.id = 0;
++
++              /* Memcg ID pins CSS */
++              css_put(&memcg->css);
++      }
++}
++
++/**
++ * mem_cgroup_from_id - look up a memcg from a memcg id
++ * @id: the memcg id to look up
++ *
++ * Caller must hold rcu_read_lock().
++ */
++struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
++{
++      WARN_ON_ONCE(!rcu_read_lock_held());
++      return idr_find(&mem_cgroup_idr, id);
++}
++
+ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
+ {
+       struct mem_cgroup_per_node *pn;
+@@ -4097,6 +4151,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+       if (!memcg)
+               return NULL;
+ 
++      memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
++                               1, MEM_CGROUP_ID_MAX,
++                               GFP_KERNEL);
++      if (memcg->id.id < 0)
++              goto fail;
++
+       memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
+       if (!memcg->stat)
+               goto fail;
+@@ -4123,8 +4183,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ #ifdef CONFIG_CGROUP_WRITEBACK
+       INIT_LIST_HEAD(&memcg->cgwb_list);
+ #endif
++      idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+       return memcg;
+ fail:
++      if (memcg->id.id > 0)
++              idr_remove(&mem_cgroup_idr, memcg->id.id);
+       mem_cgroup_free(memcg);
+       return NULL;
+ }
+@@ -4184,15 +4247,14 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state 
*parent_css)
+       return &memcg->css;
+ fail:
+       mem_cgroup_free(memcg);
+-      return NULL;
++      return ERR_PTR(-ENOMEM);
+ }
+ 
+-static int
+-mem_cgroup_css_online(struct cgroup_subsys_state *css)
++static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ {
+-      if (css->id > MEM_CGROUP_ID_MAX)
+-              return -ENOSPC;
+-
++      /* Online state pins memcg ID, memcg ID pins CSS */
++      mem_cgroup_id_get(mem_cgroup_from_css(css));
++      css_get(css);
+       return 0;
+ }
+ 
+@@ -4215,6 +4277,8 @@ static void mem_cgroup_css_offline(struct 
cgroup_subsys_state *css)
+ 
+       memcg_offline_kmem(memcg);
+       wb_memcg_offline(memcg);
++
++      mem_cgroup_id_put(memcg);
+ }
+ 
+ static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
+@@ -5524,6 +5588,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct 
page *newpage)
+       struct mem_cgroup *memcg;
+       unsigned int nr_pages;
+       bool compound;
++      unsigned long flags;
+ 
+       VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
+       VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+@@ -5554,10 +5619,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct 
page *newpage)
+ 
+       commit_charge(newpage, memcg, false);
+ 
+-      local_irq_disable();
++      local_irq_save(flags);
+       mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+       memcg_check_events(memcg, newpage);
+-      local_irq_enable();
++      local_irq_restore(flags);
+ }
+ 
+ DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
+@@ -5735,6 +5800,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t 
entry)
+       if (!memcg)
+               return;
+ 
++      mem_cgroup_id_get(memcg);
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+       mem_cgroup_swap_statistics(memcg, true);
+@@ -5753,6 +5819,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t 
entry)
+       VM_BUG_ON(!irqs_disabled());
+       mem_cgroup_charge_statistics(memcg, page, false, -1);
+       memcg_check_events(memcg, page);
++
++      if (!mem_cgroup_is_root(memcg))
++              css_put(&memcg->css);
+ }
+ 
+ /*
+@@ -5783,11 +5852,11 @@ int mem_cgroup_try_charge_swap(struct page *page, 
swp_entry_t entry)
+           !page_counter_try_charge(&memcg->swap, 1, &counter))
+               return -ENOMEM;
+ 
++      mem_cgroup_id_get(memcg);
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+       mem_cgroup_swap_statistics(memcg, true);
+ 
+-      css_get(&memcg->css);
+       return 0;
+ }
+ 
+@@ -5816,7 +5885,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
+                               page_counter_uncharge(&memcg->memsw, 1);
+               }
+               mem_cgroup_swap_statistics(memcg, false);
+-              css_put(&memcg->css);
++              mem_cgroup_id_put(memcg);
+       }
+       rcu_read_unlock();
+ }
+diff --git a/mm/memory.c b/mm/memory.c
+index 07493e34ab7e..338cc5406fe0 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2397,8 +2397,7 @@ static int do_wp_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+                                * Protected against the rmap code by
+                                * the page lock.
+                                */
+-                              page_move_anon_rmap(compound_head(old_page),
+-                                                  vma, address);
++                              page_move_anon_rmap(old_page, vma);
+                       }
+                       unlock_page(old_page);
+                       return wp_page_reuse(mm, vma, address, page_table, ptl,
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 898fe3f909f9..ff50f5974d74 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
+ /* Returns true if the struct page for the pfn is uninitialised */
+ static inline bool __meminit early_page_uninitialised(unsigned long pfn)
+ {
+-      if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
++      int nid = early_pfn_to_nid(pfn);
++
++      if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
+               return true;
+ 
+       return false;
+@@ -1110,7 +1112,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
+       spin_lock(&early_pfn_lock);
+       nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
+       if (nid < 0)
+-              nid = 0;
++              nid = first_online_node;
+       spin_unlock(&early_pfn_lock);
+ 
+       return nid;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 3ebf9c4c2f4d..08fc03a31aaa 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
+  * page_move_anon_rmap - move a page to our anon_vma
+  * @page:     the page to move to our anon_vma
+  * @vma:      the vma the page belongs to
+- * @address:  the user virtual address mapped
+  *
+  * When a page belongs exclusively to one process after a COW event,
+  * that page can be moved into the anon_vma that belongs to just that
+  * process, so the rmap code will not search the parent or sibling
+  * processes.
+  */
+-void page_move_anon_rmap(struct page *page,
+-      struct vm_area_struct *vma, unsigned long address)
++void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
+ {
+       struct anon_vma *anon_vma = vma->anon_vma;
+ 
++      page = compound_head(page);
++
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+       VM_BUG_ON_VMA(!anon_vma, vma);
+-      if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
+-              address &= HPAGE_PMD_MASK;
+-      VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
+ 
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       /*
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 3239bfd758e6..3ac9e661dc86 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -526,8 +526,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
+               goto out_unlock;
+ 
+       cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
+-      cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
+-                             css->id, memcg_name_buf);
++      cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
++                             css->serial_nr, memcg_name_buf);
+       if (!cache_name)
+               goto out_unlock;
+ 
+diff --git a/mm/swap.c b/mm/swap.c
+index 03aacbcb013f..374aa6e0c576 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -239,7 +239,7 @@ void rotate_reclaimable_page(struct page *page)
+               get_page(page);
+               local_irq_save(flags);
+               pvec = this_cpu_ptr(&lru_rotate_pvecs);
+-              if (!pagevec_add(pvec, page))
++              if (!pagevec_add(pvec, page) || PageCompound(page))
+                       pagevec_move_tail(pvec);
+               local_irq_restore(flags);
+       }
+@@ -295,7 +295,7 @@ void activate_page(struct page *page)
+               struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+ 
+               get_page(page);
+-              if (!pagevec_add(pvec, page))
++              if (!pagevec_add(pvec, page) || PageCompound(page))
+                       pagevec_lru_move_fn(pvec, __activate_page, NULL);
+               put_cpu_var(activate_page_pvecs);
+       }
+@@ -390,9 +390,8 @@ static void __lru_cache_add(struct page *page)
+       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ 
+       get_page(page);
+-      if (!pagevec_space(pvec))
++      if (!pagevec_add(pvec, page) || PageCompound(page))
+               __pagevec_lru_add(pvec);
+-      pagevec_add(pvec, page);
+       put_cpu_var(lru_add_pvec);
+ }
+ 
+@@ -627,7 +626,7 @@ void deactivate_file_page(struct page *page)
+       if (likely(get_page_unless_zero(page))) {
+               struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
+ 
+-              if (!pagevec_add(pvec, page))
++              if (!pagevec_add(pvec, page) || PageCompound(page))
+                       pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+               put_cpu_var(lru_deactivate_file_pvecs);
+       }
+@@ -647,7 +646,7 @@ void deactivate_page(struct page *page)
+               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+ 
+               get_page(page);
+-              if (!pagevec_add(pvec, page))
++              if (!pagevec_add(pvec, page) || PageCompound(page))
+                       pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+               put_cpu_var(lru_deactivate_pvecs);
+       }
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index 243574c8cf33..8a832fa80dde 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -1201,6 +1201,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void 
*end)
+ }
+ 
+ /*
++ * Encoding order is (new_up_client, new_state, new_weight).  Need to
++ * apply in the (new_weight, new_state, new_up_client) order, because
++ * an incremental map may look like e.g.
++ *
++ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
++ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
++ */
++static int decode_new_up_state_weight(void **p, void *end,
++                                    struct ceph_osdmap *map)
++{
++      void *new_up_client;
++      void *new_state;
++      void *new_weight_end;
++      u32 len;
++
++      new_up_client = *p;
++      ceph_decode_32_safe(p, end, len, e_inval);
++      len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
++      ceph_decode_need(p, end, len, e_inval);
++      *p += len;
++
++      new_state = *p;
++      ceph_decode_32_safe(p, end, len, e_inval);
++      len *= sizeof(u32) + sizeof(u8);
++      ceph_decode_need(p, end, len, e_inval);
++      *p += len;
++
++      /* new_weight */
++      ceph_decode_32_safe(p, end, len, e_inval);
++      while (len--) {
++              s32 osd;
++              u32 w;
++
++              ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
++              osd = ceph_decode_32(p);
++              w = ceph_decode_32(p);
++              BUG_ON(osd >= map->max_osd);
++              pr_info("osd%d weight 0x%x %s\n", osd, w,
++                   w == CEPH_OSD_IN ? "(in)" :
++                   (w == CEPH_OSD_OUT ? "(out)" : ""));
++              map->osd_weight[osd] = w;
++
++              /*
++               * If we are marking in, set the EXISTS, and clear the
++               * AUTOOUT and NEW bits.
++               */
++              if (w) {
++                      map->osd_state[osd] |= CEPH_OSD_EXISTS;
++                      map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
++                                               CEPH_OSD_NEW);
++              }
++      }
++      new_weight_end = *p;
++
++      /* new_state (up/down) */
++      *p = new_state;
++      len = ceph_decode_32(p);
++      while (len--) {
++              s32 osd;
++              u8 xorstate;
++              int ret;
++
++              osd = ceph_decode_32(p);
++              xorstate = ceph_decode_8(p);
++              if (xorstate == 0)
++                      xorstate = CEPH_OSD_UP;
++              BUG_ON(osd >= map->max_osd);
++              if ((map->osd_state[osd] & CEPH_OSD_UP) &&
++                  (xorstate & CEPH_OSD_UP))
++                      pr_info("osd%d down\n", osd);
++              if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
++                  (xorstate & CEPH_OSD_EXISTS)) {
++                      pr_info("osd%d does not exist\n", osd);
++                      map->osd_weight[osd] = CEPH_OSD_IN;
++                      ret = set_primary_affinity(map, osd,
++                                                 
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
++                      if (ret)
++                              return ret;
++                      memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
++                      map->osd_state[osd] = 0;
++              } else {
++                      map->osd_state[osd] ^= xorstate;
++              }
++      }
++
++      /* new_up_client */
++      *p = new_up_client;
++      len = ceph_decode_32(p);
++      while (len--) {
++              s32 osd;
++              struct ceph_entity_addr addr;
++
++              osd = ceph_decode_32(p);
++              ceph_decode_copy(p, &addr, sizeof(addr));
++              ceph_decode_addr(&addr);
++              BUG_ON(osd >= map->max_osd);
++              pr_info("osd%d up\n", osd);
++              map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
++              map->osd_addr[osd] = addr;
++      }
++
++      *p = new_weight_end;
++      return 0;
++
++e_inval:
++      return -EINVAL;
++}
++
++/*
+  * decode and apply an incremental map update.
+  */
+ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+@@ -1299,49 +1408,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, 
void *end,
+                       __remove_pg_pool(&map->pg_pools, pi);
+       }
+ 
+-      /* new_up */
+-      ceph_decode_32_safe(p, end, len, e_inval);
+-      while (len--) {
+-              u32 osd;
+-              struct ceph_entity_addr addr;
+-              ceph_decode_32_safe(p, end, osd, e_inval);
+-              ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
+-              ceph_decode_addr(&addr);
+-              pr_info("osd%d up\n", osd);
+-              BUG_ON(osd >= map->max_osd);
+-              map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
+-              map->osd_addr[osd] = addr;
+-      }
+-
+-      /* new_state */
+-      ceph_decode_32_safe(p, end, len, e_inval);
+-      while (len--) {
+-              u32 osd;
+-              u8 xorstate;
+-              ceph_decode_32_safe(p, end, osd, e_inval);
+-              xorstate = **(u8 **)p;
+-              (*p)++;  /* clean flag */
+-              if (xorstate == 0)
+-                      xorstate = CEPH_OSD_UP;
+-              if (xorstate & CEPH_OSD_UP)
+-                      pr_info("osd%d down\n", osd);
+-              if (osd < map->max_osd)
+-                      map->osd_state[osd] ^= xorstate;
+-      }
+-
+-      /* new_weight */
+-      ceph_decode_32_safe(p, end, len, e_inval);
+-      while (len--) {
+-              u32 osd, off;
+-              ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
+-              osd = ceph_decode_32(p);
+-              off = ceph_decode_32(p);
+-              pr_info("osd%d weight 0x%x %s\n", osd, off,
+-                   off == CEPH_OSD_IN ? "(in)" :
+-                   (off == CEPH_OSD_OUT ? "(out)" : ""));
+-              if (osd < map->max_osd)
+-                      map->osd_weight[osd] = off;
+-      }
++      /* new_up_client, new_state, new_weight */
++      err = decode_new_up_state_weight(p, end, map);
++      if (err)
++              goto bad;
+ 
+       /* new_pg_temp */
+       err = decode_new_pg_temp(p, end, map);
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 86187dad1440..f8d94f4042ef 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -544,7 +544,7 @@ static int rds_tcp_init(void)
+ 
+       ret = rds_tcp_recv_init();
+       if (ret)
+-              goto out_slab;
++              goto out_pernet;
+ 
+       ret = rds_trans_register(&rds_tcp_transport);
+       if (ret)
+@@ -556,8 +556,9 @@ static int rds_tcp_init(void)
+ 
+ out_recv:
+       rds_tcp_recv_exit();
+-out_slab:
++out_pernet:
+       unregister_pernet_subsys(&rds_tcp_net_ops);
++out_slab:
+       kmem_cache_destroy(rds_tcp_conn_slab);
+ out:
+       return ret;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 47b917841623..29b145ed3338 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int 
hlen,
+        * alignment since sizeof(struct ethhdr) is 14.
+        */
+       frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
++      if (!frame)
++              return NULL;
+ 
+       skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+       skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 23b73f6ac040..98378539fc59 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1225,6 +1225,7 @@ static void snd_timer_user_ccallback(struct 
snd_timer_instance *timeri,
+               tu->tstamp = *tstamp;
+       if ((tu->filter & (1 << event)) == 0 || !tu->tread)
+               return;
++      memset(&r1, 0, sizeof(r1));
+       r1.event = event;
+       r1.tstamp = *tstamp;
+       r1.val = resolution;
+@@ -1267,6 +1268,7 @@ static void snd_timer_user_tinterrupt(struct 
snd_timer_instance *timeri,
+       }
+       if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
+           tu->last_resolution != resolution) {
++              memset(&r1, 0, sizeof(r1));
+               r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
+               r1.tstamp = tstamp;
+               r1.val = resolution;
+@@ -1739,6 +1741,7 @@ static int snd_timer_user_params(struct file *file,
+       if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
+               if (tu->tread) {
+                       struct snd_timer_tread tread;
++                      memset(&tread, 0, sizeof(tread));
+                       tread.event = SNDRV_TIMER_EVENT_EARLY;
+                       tread.tstamp.tv_sec = 0;
+                       tread.tstamp.tv_nsec = 0;

Reply via email to